<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:video="http://www.google.com/schemas/sitemap-video/1.1">
  <url>
    <loc>https://www.andrew-silva.com/blog</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2025-02-18</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/learning-a-personalized-arxiv-feed</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-02-18</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/ce45ea45-0623-4ffc-94b1-fa3a0eb7a33e/embedding_movement.png</image:loc>
      <image:title>Blog - Learning a Personalized arXiv Feed - Make it stand out</image:title>
      <image:caption>Simplified visualization of how a personalized embedding should move to make similarity a useful metric for finding papers that you want to see each day.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/e0c60b45-f16d-44ea-ae19-3749bb8732ab/arxiv_sanity_screenshot.png</image:loc>
      <image:title>Blog - Learning a Personalized arXiv Feed - Make it stand out</image:title>
      <image:caption>Screenshot of the recommendation system working to surface relevant papers via a learned personal embedding, with examples of the like/dislike mechanism.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/reading-scaling-monosemanticity-extracting-interpretable-features-from-claude-3-sonnet</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2025-01-17</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/21ac6296-62c9-4330-abd8-97c3ffcae502/scaling_monosemanticity_loss</image:loc>
      <image:title>Blog - Reading: Scaling Monosemanticity -- Extracting Interpretable Features from Claude 3 Sonnet - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/b66c67c2-b0c0-45e5-b5ae-366ca2407e3f/scaling-monosemanticity-scaling-laws.png</image:loc>
      <image:title>Blog - Reading: Scaling Monosemanticity -- Extracting Interpretable Features from Claude 3 Sonnet - Make it stand out</image:title>
      <image:caption>Scaling laws plots, taken from https://transformer-circuits.pub/2024/scaling-monosemanticity/index.html</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1826a28f-d036-4ecc-9899-69133e2c5095/scaling-monosemanticity-image-ex.png</image:loc>
      <image:title>Blog - Reading: Scaling Monosemanticity -- Extracting Interpretable Features from Claude 3 Sonnet - Make it stand out</image:title>
      <image:caption>Top examples for activated "Golden Gate Bridge" Feature. Image cropped from https://transformer-circuits.pub/2024/scaling-monosemanticity/index.html</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/059445ba-99be-4de1-8980-1ef6aa590b0f/scaling-monosemanticity-aiscores.png</image:loc>
      <image:title>Blog - Reading: Scaling Monosemanticity -- Extracting Interpretable Features from Claude 3 Sonnet - Make it stand out</image:title>
      <image:caption>Feature interpretability ratings from Claude 3, taken from https://transformer-circuits.pub/2024/scaling-monosemanticity/index.html</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/finding-your-thesis-statement</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/preparing-your-thesis-proposal-presentation</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/responding-when-you-dont-know-something</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/the-thesis-proposal-process-at-georgia-tech</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/choosing-your-thesis-committee</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/writing-your-proposal-document</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/get-early-feedback-on-your-thesis</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/delivering-your-proposal-presentation</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-09-16</lastmod>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tldr-artificial-intelligence-for-aging-and-longevity-research-recent-advances-and-perspective</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2021-01-08</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1610074220814-T008IQPFOFKV6M4GUMY2/medical_ml_pipeline.jpeg</image:loc>
      <image:title>Blog - TL;DR: Artificial intelligence for aging and longevity research: Recent advances and perspective</image:title>
      <image:caption>The end-goal of machine learning in aging and longevity research is to enable researchers to disentangle the overwhelming amount of available data, develop personalized interventions, and more accurately quantify the aging process.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1610071354227-RPS1DB3L5XKP28R02KL4/simulated_aging</image:loc>
      <image:title>Blog - TL;DR: Artificial intelligence for aging and longevity research: Recent advances and perspective</image:title>
      <image:caption>Being able to forecast or forward-and-backward simulate an individual’s age would help to predict the effects of different drugs, lifestyle changes, or vitals on a person’s aging process.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/simplifying-muzero-in-mastering-atari-go-chess-and-shogi-by-planning-with-a-learned-model</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-12-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1608762372899-80IQ0BC20EDR1F6DR7Z0/muzero_performance.jpg</image:loc>
      <image:title>Blog - Simplifying MuZero in "Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model"</image:title>
      <image:caption>Orange represents AlphaZero for Go, Chess, and Shogi, and it represents the prior state of the art for Atari. In all cases, MuZero meets or exceeds the baselines, showing that MCTS with a learned, agent-specific simulator is a viable and successful approach to learning high-performing policies in domains without having access to simulators. Image taken from https://arxiv.org/abs/1911.08265</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tldr-imitating-interactive-intelligence</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-12-21</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1608499021092-I5CVTGU846D6532LBS44/playroom.jpg</image:loc>
      <image:title>Blog - TL;DR: Imitating Interactive Intelligence</image:title>
      <image:caption>Figure 1 from the paper itself. Panel A shows a closeup of a Playroom, with two agents looking at a toy helicopter. Panel B shows four examples of the dynamic, random configurations of the Playroom. Panel C shows examples of various objects which could be randomly distributed in a Playroom. Image source: https://arxiv.org/pdf/2012.05672.pdf</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1608505649012-WJFDPGKZBV3XLPP26JX9/imitating-interactive-intelligence-architecture.png</image:loc>
      <image:title>Blog - TL;DR: Imitating Interactive Intelligence</image:title>
      <image:caption>Figure 5 in the original paper. Images are embedded using ResNet, words are tokenized into a 500-word vocabulary, and then these embeddings all go to a transformer. Transformer tokens go into an LSTM, which produces the hidden states for the various policy heads. Image source: https://arxiv.org/pdf/2012.05672.pdf</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/learning-differentiable-decision-trees-for-reinforcement-learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-12-21</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1598644376789-N1K6DCRSRCNID42JQ4O4/mdp.png</image:loc>
      <image:title>Blog - Learning Differentiable Decision Trees for Reinforcement Learning: Q-Learning or Policy Gradient?</image:title>
      <image:caption>Our 4-state MDP for evaluating Q-Learning and Policy Gradient in DDTs. The agent receives reward for being in the middle states, so the optimal policy is one that moves left from S3 and right from R2.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1598644930946-APM101X8EIINRZSHTVMI/decision_tree.png</image:loc>
      <image:title>Blog - Learning Differentiable Decision Trees for Reinforcement Learning: Q-Learning or Policy Gradient?</image:title>
      <image:caption>The optimal decision tree for our 4-state MDP, where “True” corresponds with moving left down the tree, and “False” corresponds with moving right down the tree.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1598646656463-9KP1IPGM8XFLW4Y7PYXH/Q_Learning_CPs.png</image:loc>
      <image:title>Blog - Learning Differentiable Decision Trees for Reinforcement Learning: Q-Learning or Policy Gradient?</image:title>
      <image:caption>Critical points for Q-Learning applied to our 1-node DDT. As we can see, Q-Learning exhibits pretty substantial instability for learning this model’s parameters, presenting us with 5 zero-gradient options, only 1 of which is coincident with the optimal setting of 2.5.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1598646972214-QZ8WJ0K8REWX3TAFIVJS/Policy_Gradient_CPs.png</image:loc>
      <image:title>Blog - Learning Differentiable Decision Trees for Reinforcement Learning: Q-Learning or Policy Gradient?</image:title>
      <image:caption>Critical points for Policy Gradient applied to our 1-node DDT. As we can see here, Policy Gradient is significantly more stable for this problem, presenting with only one critical point which is nearly exactly on 2.5.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/interpretable-machine-learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-08-31</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1576203917719-OZWE8IU7HWMZYHJV0QLX/cartpole_upright.gif</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>One of my agents on the cart pole problem in the OpenAI Gym</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582145412020-ZG1TAEIBSY6QJE3UE4PQ/usual+network+visualization</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>Here’s the usual drawing of a neural network. Our inputs are A, B, C, D, and they go to a single layer of hidden units H1, H2, H3, and H4, before being passed to the output units (Left or Right).</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1576204091631-B1YAB7IL1KMPWRBE6HGF/full_cartpole_mlp</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>A drawing of the weights for a two-layer multi-layer perceptron on the cart pole problem. For reference, A is the cart position, B is the cart velocity, C is the pole angle, and D is the pole’s angular velocity.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1576204657090-NT0NSUI2UUD0NYA9U1L7/one-hot+cart+pole+mlp</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>If we make the above problem more “one-hot” it still doesn’t end up in the friendliest of places…</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582146663023-SPQVFL8F3179VVV2C9Z2/Screen+Shot+2020-02-19+at+4.10.48+PM.png</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>If A is our position, 0 is the center, and left is negative, and the left arrow is “True” and the right arrow is “False”, then this perfectly captures: “If left of center: move right. Otherwise: move left.”</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582147423159-HJ6XZN1PCA37CWR37LRA/multi-layer+perceptron</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>A multi-layer perceptron which would be rather nightmarish to manually follow through every input to trace out how we got the actions we got.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582150157473-OE1APMNXXRU1U1IWZF7Q/decision+tree+network+splits</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>Each layer in the network can be seen as a mini-network which makes a single choice: True or False?</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582150707736-4FZUZB3T1J0XFBKZO1KR/simplifying+a+mini+network</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>We start with a whole single-layer network, then we simplify into a single-variable network, and then further into a single-operation network</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582318404717-000RYM27CR2552MB0DDJ/simplified+cart+pole</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>Cleaning up the [A, B, C, D] notation, this is one of the decision trees extracted from our approach for the cart pole problem</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1582320140735-XUY43W3FVDJ7VUM4QRWD/simplified+decision+tree+sklearn</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Neural Networks and Differentiable Decision Trees</image:title>
      <image:caption>Suspiciously small decision tree for cart pole using sklearn over a set of demonstrations.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/what-is-interpretability-in-machine-learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-01-20</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1577049624980-TTA6SIDFEX9W3V1G0HY5/transparent_algorithm.png</image:loc>
      <image:title>Blog - Interpretable Machine Learning: What is Interpretability? - Transparent Algorithm</image:title>
      <image:caption>Looking at the process for loan approval on the left, we immediately know exactly how it works and what to expect for every new person.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1577049828502-6M8FSW9P9ZVR98GDBQTY/explainable_algorithm.png</image:loc>
      <image:title>Blog - Interpretable Machine Learning: What is Interpretability? - Explainable Algorithm</image:title>
      <image:caption>Explainable algorithms produce both a decision and an explanation for each input, describing how they make each decision, as in the two examples on the left.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/why-should-ml-be-interpretable</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-01-20</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1578498792008-7J7OM0JZQQ53LXXCAQ48/zoolander.gif</image:loc>
      <image:title>Blog - Interpretable Machine Learning: Why?</image:title>
      <image:caption>We often want our machine learning systems to cooperate with us and be more transparent about what’s going on inside.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/winning-brainhack-atl-2019</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2020-01-20</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1575426227009-4CR7LY60X971AFJ2WOAI/good_vs_bad_brains.png</image:loc>
      <image:title>Blog - Winning BrainHack ATL 2019</image:title>
      <image:caption>A well-processed brain</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1575426275031-OG1Y5HH3ITTTAA85T9SV/bad_brain.png</image:loc>
      <image:title>Blog - Winning BrainHack ATL 2019</image:title>
      <image:caption>A poorly-processed brain (note the chopped off edges and awkward zoom)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1575425605376-YWVJNSSUDBIBXVHU1112/resnet3d.jpg</image:loc>
      <image:title>Blog - Winning BrainHack ATL 2019</image:title>
      <image:caption>Our ResNet Architecture, thanks to Zac: http://www.letianchen.me/</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/category/Interpretability</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/category/Paper+Summary</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/category/PhD+Milestones</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/category/Side+Projects</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/tl%3Bdr</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/supervised+learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/neuroscience</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/neural+networks</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/brainhack+2019</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/machine+learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/Presentation</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/Thesis</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/reinforcement+learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/imitation+learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/interpretability</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/medical+machine+learning</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/blog/tag/Proposal</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
  </url>
  <url>
    <loc>https://www.andrew-silva.com/home</loc>
    <changefreq>daily</changefreq>
    <priority>1.0</priority>
    <lastmod>2025-08-30</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/1610074412148-YNH2DHFUXBMBOSUDRB4C/my_headshot.jpg</image:loc>
      <image:title>Home</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/36306f31-7a9d-4d2c-95cc-95764d2180be/sdm_high_level.png</image:loc>
      <image:title>Home - Dream2Assist</image:title>
      <image:caption>In Dream2Assist we present a framework that combines a rich world model with an assistive agent, resulting in a robot partner that can infer human intentions and then take actions to help satisfy human objectives. Published Link — Paper Site — Preprint Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/ce45ea45-0623-4ffc-94b1-fa3a0eb7a33e/embedding_movement.png</image:loc>
      <image:title>Home - Personalized arXiv Feed</image:title>
      <image:caption>A quick write-up on a personalized paper recommender that I wrote to help researchers keep up with the deluge of arXiv papers everyday! Link to write-up</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/61c227ba-b7b9-495f-ac1a-ed42c68676e9/model_diagram.png</image:loc>
      <image:title>Home - FedPerC</image:title>
      <image:caption>In FedPC, we present a new approach to personalization for federated deployment of large language models, using personal and context embeddings for each user. Published Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/711fced0-633b-4864-b56a-5d5695cf30d9/xai_condition.png</image:loc>
      <image:title>Home - Evaluating Explainability</image:title>
      <image:caption>Explainability is increasingly popular and important for machine learning research and deployment, but little work is evaluated with real humans. In this user study, we compare seven explainability conditions with real human users. Published Link — Preprint Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/33d3b934-c74b-4e8e-a9cf-234e5c4da5d0/mog_empirical_influence_0.png</image:loc>
      <image:title>Home - CLIF</image:title>
      <image:caption>Influence functions can help to show why a model make certain decisions, but previously have only been proven for matched train/test objectives. We show that influence functions can work with unsupervised or self-supervised learning. Published Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/697fa6f0-aeaf-44d2-ac23-da2949e0d05a/fedembed_pipeline_whitebg.png</image:loc>
      <image:title>Home - FedEmbed</image:title>
      <image:caption>We present a new approach to private, personalized federated learning, leveraging personal embeddings and clustering of users with similar preferences. arXiv Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/26f8f7d3-0dd0-410e-8833-d40ef4fb74be/window_sim.gif</image:loc>
      <image:title>Home - LanCon-Learn</image:title>
      <image:caption>We present an approach to language-conditioned multi-task learning, using language-based command embeddings rather than conventional one-hot goal specifications. Published Link — Preprint Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/61da1a38-bf25-4b6b-bd57-b6225ee7894d/icassp_architecture.png</image:loc>
      <image:title>Home - Multimodal Punctuation Prediction</image:title>
      <image:caption>Speech-to-text systems often transcribe raw audio into text, but do not always consider the structure of text and how that might affect the meaning. In this work, we explore multi-modality and we introduce context-dropout to improve punctuation prediction from raw audio and text. Published Link — arXiv Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/a6265606-f33d-4fe6-91ab-486b875a1989/Screenshot+2022-11-24+at+00.16.41.png</image:loc>
      <image:title>Home - ProLoNets</image:title>
      <image:caption>Reinforcement learning agents waste hundreds of hours simply learning the rules of the world. With ProLoNets, we can hard-code heuristics directly into RL-agent’s neural network weights before training even begins, enabling faster learning in challenging domains. Published Link — arXiv Link — GitHub Link</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5ddb1ffae7b0381e755fcb2c/cc9a38bf-603c-483d-aab4-71e5ac379c4e/discrete_trees_rl.png</image:loc>
      <image:title>Home - Interpretable RL</image:title>
      <image:caption>After learning the weights of a differentiable decision tree for an RL task, it’s possible to convert the network into a discrete, ordinary decision tree while preserving performance. This offers an interpretable, small tree that can be used by the agent to improve human trust and efficiency. Published Link</image:caption>
    </image:image>
  </url>
</urlset>

