Skip to content

Commit

Permalink
updates to publications and people
Browse files Browse the repository at this point in the history
  • Loading branch information
tobiasgerstenberg committed Feb 27, 2024
1 parent 21ad998 commit 155609e
Show file tree
Hide file tree
Showing 22 changed files with 781 additions and 350 deletions.
26 changes: 13 additions & 13 deletions content/home/people.md
Original file line number Diff line number Diff line change
Expand Up @@ -131,19 +131,6 @@ weight = 3
email = "[email protected]"
description = "I'm passionate with complex systems, human intellectuality, and world-building prospects. Majoring in computer science, I’m excited about AI and interdisciplinary research. Things I enjoy: poetry and guitar, hiking and dancing, and a sip of Earl Grey Tea."

[[member]]
id = "Cindy Xin"
position = "Research Assistant"
email = "[email protected]"
description = "I'm an undergraduate majoring in symbolic systems and philosophy. I am interested in epistemology, philosophy of mind, experimental philosophy, and learning more about how people reason. In my spare time, I enjoy knitting and writing poetry."

[[member]]
id = "Ayesha Khawaja"
position = "Research Assistant"
email = "[email protected]"
github = "ayeshakhawaja"
description = "I’m a senior at Stanford studying Symbolic Systems with a concentration in Artificial Intelligence. I’m interested in using computational methods to understand human and artificial cognition, with a focus on moral judgment and reasoning. Outside of school, I enjoy reading and watching soccer."

[[member]]
id = "Haoran Zhao"
position = "Research Assistant"
Expand Down Expand Up @@ -260,6 +247,19 @@ weight = 3
website = "http://kanishkgandhi.com/"
description = "I am interested in reasoning and social cognition; how we can build machines that interact with, learn from and communicate with others."

# [[member]]
# id = "Cindy Xin"
# position = "Research Assistant"
# email = "[email protected]"
# description = "I'm an undergraduate majoring in symbolic systems and philosophy. I am interested in epistemology, philosophy of mind, experimental philosophy, and learning more about how people reason. In my spare time, I enjoy knitting and writing poetry."

# [[member]]
# id = "Ayesha Khawaja"
# position = "Research Assistant"
# email = "[email protected]"
# github = "ayeshakhawaja"
# description = "I’m a senior at Stanford studying Symbolic Systems with a concentration in Artificial Intelligence. I’m interested in using computational methods to understand human and artificial cognition, with a focus on moral judgment and reasoning. Outside of school, I enjoy reading and watching soccer."

# [[member]]
# id = "Elyse Chase"
# position = "Lab Affiliate"
Expand Down
33 changes: 0 additions & 33 deletions content/publication/vodrahalli2021trust.md

This file was deleted.

33 changes: 33 additions & 0 deletions content/publication/vodrahalli2022humans.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
+++
# 0 -> 'Forthcoming',
# 1 -> 'Preprint',
# 2 -> 'Journal',
# 3 -> 'Conference Proceedings',
# 4 -> 'Book chapter',
# 5 -> 'Thesis'

title = "Do humans trust advice more if it comes from AI? an analysis of human-AI interactions"
date = "2022-01-01"
authors = ["K. Vodrahalli","R. Daneshjou","T. Gerstenberg","J. Zou"]
publication_types = ["3"]
publication_short = "_Proceedings of the 2022 AAAI/ACM Conference on AI, Ethics, and Society_"
publication = "Vodrahalli, K., Daneshjou, R., Gerstenberg, T., Zou, J. (2022). Do humans trust advice more if it comes from AI? An analysis of human-AI interactions. In _Proceedings of the 2022 AAAI/ACM Conference on AI, Ethics, and Society_, 2022 (pp. 763--777)."
abstract = "In decision support applications of AI, the AI algorithm's output is framed as a suggestion to a human user. The user may ignore this advice or take it into consideration to modify their decision. With the increasing prevalence of such human-AI interactions, it is important to understand how users react to AI advice. In this paper, we recruited over 1100 crowdworkers to characterize how humans use AI suggestions relative to equivalent suggestions from a group of peer humans across several experimental settings. We find that participants' beliefs about how human versus AI performance on a given task affects whether they heed the advice. When participants do heed the advice, they use it similarly for human and AI suggestions. Based on these results, we propose a two-stage, 'activation-integration' model for human behavior and use it to characterize the factors that affect human-AI interactions."
image_preview = ""
selected = false
projects = []
url_pdf = "papers/vodrahalli2022humans.pdf"
url_preprint = "https://arxiv.org/abs/2107.07015"
url_code = ""
url_dataset = ""
url_slides = ""
url_video = ""
url_poster = ""
url_source = ""
url_custom = [{name = "Github", url = "https://github.com/kailas-v/human-ai-interactions"}]
math = true
highlight = true
[header]
# image = "publications/vodrahalli2022humans.png"
caption = ""
+++
8 changes: 4 additions & 4 deletions content/publication/vodrahalli2022uncalibrated.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
# 4 -> 'Book chapter',
# 5 -> 'Thesis'

title = "Uncalibrated Models Can Improve Human-AI Collaboration"
date = "2022-03-08"
title = "Uncalibrated models can improve human-AI collaboration"
date = "2022-01-01"
authors = ["K. Vodrahalli","T. Gerstenberg","J. Zou"]
publication_types = ["3"]
publication_short = "_NeurIPS_ (oral presentation)"
publication = "Vodrahalli, K., Gerstenberg, T. & Zou, J. (2022). Uncalibrated Models Can Improve Human-AI Collaboration. NeurIPS."
publication_short = "_Advances in Neural Information Processing Systems_"
publication = "Vodrahalli, K., Gerstenberg, T., Zou, J. (2022). Uncalibrated models can improve human-AI collaboration. In _Advances in Neural Information Processing Systems_, 35, 4004--4016."
abstract = "In many practical applications of AI, an AI model is used as a decision aid for human users. The AI provides advice that a human (sometimes) incorporates into their decision-making process. The AI advice is often presented with some measure of 'confidence' that the human can use to calibrate how much they depend on or trust the advice. In this paper, we present an initial exploration that suggests showing AI models as more confident than they actually are, even when the original AI is well-calibrated, can improve human-AI performance (measured as the accuracy and confidence of the human's final prediction after seeing the AI advice). We first train a model to predict human incorporation of AI advice using data from thousands of human-AI interactions. This enables us to explicitly estimate how to transform the AI's prediction confidence, making the AI uncalibrated, in order to improve the final human prediction. We empirically validate our results across four different tasks---dealing with images, text and tabular data---involving hundreds of human participants. We further support our findings with simulation analysis. Our findings suggest the importance of jointly optimizing the human-AI system as opposed to the standard paradigm of optimizing the AI model alone."
image_preview = ""
selected = false
Expand Down
46 changes: 22 additions & 24 deletions docs/bibtex/cic_papers.bib
Original file line number Diff line number Diff line change
@@ -1,13 +1,34 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Tobias Gerstenberg at 2024-01-25 13:55:46 -0800
%% Created for Tobias Gerstenberg at 2024-02-26 20:03:42 -0800
%% Saved with string encoding Unicode (UTF-8)
@inproceedings{vodrahalli2022humans,
abstract = {In decision support applications of AI, the AI algorithm's output is framed as a suggestion to a human user. The user may ignore this advice or take it into consideration to modify their decision. With the increasing prevalence of such human-AI interactions, it is important to understand how users react to AI advice. In this paper, we recruited over 1100 crowdworkers to characterize how humans use AI suggestions relative to equivalent suggestions from a group of peer humans across several experimental settings. We find that participants' beliefs about how human versus AI performance on a given task affects whether they heed the advice. When participants do heed the advice, they use it similarly for human and AI suggestions. Based on these results, we propose a two-stage, 'activation-integration' model for human behavior and use it to characterize the factors that affect human-AI interactions.},
author = {Vodrahalli, Kailas and Daneshjou, Roxana and Gerstenberg, Tobias and Zou, James},
booktitle = {Proceedings of the 2022 AAAI/ACM Conference on AI, Ethics, and Society},
date-added = {2024-02-26 20:00:46 -0800},
date-modified = {2024-02-26 20:02:18 -0800},
pages = {763--777},
title = {{Do humans trust advice more if it comes from AI? an analysis of human-AI interactions}},
year = {2022}}

@article{vodrahalli2022uncalibrated,
abstract = {In many practical applications of AI, an AI model is used as a decision aid for human users. The AI provides advice that a human (sometimes) incorporates into their decision-making process. The AI advice is often presented with some measure of 'confidence' that the human can use to calibrate how much they depend on or trust the advice. In this paper, we present an initial exploration that suggests showing AI models as more confident than they actually are, even when the original AI is well-calibrated, can improve human-AI performance (measured as the accuracy and confidence of the human's final prediction after seeing the AI advice). We first train a model to predict human incorporation of AI advice using data from thousands of human-AI interactions. This enables us to explicitly estimate how to transform the AI's prediction confidence, making the AI uncalibrated, in order to improve the final human prediction. We empirically validate our results across four different tasks---dealing with images, text and tabular data---involving hundreds of human participants. We further support our findings with simulation analysis. Our findings suggest the importance of jointly optimizing the human-AI system as opposed to the standard paradigm of optimizing the AI model alone.},
author = {Vodrahalli, Kailas and Gerstenberg, Tobias and Zou, James Y},
date-added = {2024-02-26 20:00:46 -0800},
date-modified = {2024-02-26 20:03:41 -0800},
journal = {Advances in Neural Information Processing Systems},
pages = {4004--4016},
title = {{Uncalibrated models can improve human-AI collaboration}},
volume = {35},
year = {2022}}

@article{prinzing2024purpose,
abstract = {People attribute purposes to all kinds of things, from artifacts to body parts to human lives. This invites the question of whether the cognitive processes underlying purpose attributions are domain-general or domain-specific. In three studies (total N = 13,720 observations from N = 3,430 participants), we examined the effects of four factors on purpose attributions in six domains: artifacts, social institutions, animals, body parts, sacred objects, and human lives. Study 1 found that original design (i.e., what something was originally created for) and present practice (i.e., how people currently use it) each influence purpose attributions in all six domains, though their relative importance differs substantially across domains. Study 2 found that effectiveness (i.e., whether something is good at achieving a goal) and morality (i.e., whether the goal is good) each influences purpose attributions, and in the same way across domains. Finally, Study 3 revealed that, within domains, the impacts of original design and present practice depend on which entity plays the role of original designer versus present user, suggesting that the apparent inter-domain differences in the impacts of these two factors might have been illusory. Overall, there are at least some respects in which purpose attributions are strikingly similar across what might seem to be very different domains.},
author = {Michael Prinzing and David Rose and Siying Zhang and Eric Tu and Abigail Concha and Michael Rea and Jonathan Schaffer and Tobias Gerstenberg and Joshua Knobe},
Expand Down Expand Up @@ -340,17 +361,6 @@ @incollection{smith2023probabilistic
title = {Probabilistic models of physical reasoning},
year = {{in press}}}

@article{vodrahalli2022uncalibrated,
abstract = {In many practical applications of AI, an AI model is used as a decision aid for human users. The AI provides advice that a human (sometimes) incorporates into their decision-making process. The AI advice is often presented with some measure of "confidence" that the human can use to calibrate how much they depend on or trust the advice. In this paper, we demonstrate that presenting AI models as more confident than they actually are, even when the original AI is well-calibrated, can improve human-AI performance (measured as the accuracy and confidence of the human's final prediction after seeing the AI advice). We first learn a model for how humans incorporate AI advice using data from thousands of human interactions. This enables us to explicitly estimate how to transform the AI's prediction confidence, making the AI uncalibrated, in order to improve the final human prediction. We empirically validate our results across four different tasks -- dealing with images, text and tabular data -- involving hundreds of human participants. We further support our findings with simulation analysis. Our findings suggest the importance of and a framework for jointly optimizing the human-AI system as opposed to the standard paradigm of optimizing the AI model alone.},
author = {Kailas Vodrahalli and Tobias Gerstenberg and James Zou},
date-added = {2022-03-08 23:02:38 -0800},
date-modified = {2022-03-08 23:02:38 -0800},
journal = {arXiv},
title = {Uncalibrated Models Can Improve Human-AI Collaboration},
url = {https://arxiv.org/abs/2202.05983},
year = {2022},
bdsk-url-1 = {https://arxiv.org/abs/2202.05983}}

@article{zhou2023jenga,
abstract = {From building towers to picking an orange from a stack of fruit, assessing support is critical for successfully interacting with the physical world. But how do people determine whether one object supports another? In this paper we develop the Counterfactual Simulation Model (CSM) of physical support. The CSM predicts that people judge physical support by mentally simulating what would happen to a scene if the object of interest were removed. Three experiments test the model by asking one group of participants to judge what would happen to a tower if one of the blocks were removed, and another group of participants how responsible that block was for the tower's stability. The CSM accurately captures participants' predictions about what would happen by running noisy simulations that incorporate different sources of uncertainty. Participants' responsibility judgments are closely related to counterfactual predictions: the more likely the tower would be predicted to fall if a block were removed, the more responsible this block was judged for the tower's stability. By construing physical support as preventing from falling, the CSM provides a unified account across dynamic and static physical scenes of how causal judgments arise from the process of counterfactual simulation.},
author = {Liang Zhou and Kevin A. Smith and Joshua B. Tenenbaum and Tobias Gerstenberg},
Expand All @@ -362,18 +372,6 @@ @article{zhou2023jenga
year = {2023},
bdsk-url-1 = {https://psyarxiv.com/4a5uh}}

@article{vodrahalli2021trust,
abstract = {In many applications of AI, the algorithm's output is framed as a suggestion to a human user. The user may ignore the advice or take it into consideration to modify his/her decisions. With the increasing prevalence of such human-AI interactions, it is important to understand how users act (or do not act) upon AI advice, and how users regard advice differently if they believe the advice come from an ``AI'' versus another human. In this paper, we characterize how humans use AI suggestions relative to equivalent suggestions from a group of peer humans across several experimental settings. We find that participants' beliefs about the human versus AI performance on a given task affects whether or not they heed the advice. When participants decide to use the advice, they do so similarly for human and AI suggestions. These results provide insights into factors that affect human-AI interactions.},
author = {Vodrahalli, Kailas and Gerstenberg, Tobias and Zou, James},
date-added = {2021-07-21 21:46:09 -0700},
date-modified = {2021-07-21 21:48:05 -0700},
journal = {arXiv},
title = {{Do humans trust advice more if it comes from AI? An analysis of Human-AI interactions}},
url = {http://arxiv.org/abs/2107.07015},
urldate = {2021-07-22},
year = {2021},
bdsk-url-1 = {http://arxiv.org/abs/2107.07015}}

@article{gerstenberg2022hypothetical,
abstract = {How do people make causal judgments? In this paper, I show that counterfactuals are necessary for explaining causal judgments about events, and that hypotheticals don't suffice. In two experiments, participants viewed video clips of dynamic interactions between billiard balls. In Experiment 1, participants either made hypothetical judgments about whether ball B would go through the gate if ball A weren't present in the scene, or counterfactual judgments about whether ball B would have gone through the gate if ball A hadn't been present. Because the clips featured a block in front of the gate that sometimes moved and sometimes stayed put, hypothetical and counterfactual judgments came apart. A computational model that evaluates hypotheticals and counterfactuals by running noisy physical simulations accurately captured participants' judgments. In Experiment 2, participants judged whether ball A caused ball B to go through the gate. The results showed a tight fit between counterfactual and causal judgments, whereas hypothetical judgments didn't predict causal judgments. I discuss the implications of this work for theories of causality, and for studying the development of counterfactual thinking in children.},
author = {Tobias Gerstenberg},
Expand Down
Loading

0 comments on commit 155609e

Please sign in to comment.