diff --git a/content/home/people.md b/content/home/people.md
index 9b0a0fb..a2e8434 100644
--- a/content/home/people.md
+++ b/content/home/people.md
@@ -19,24 +19,14 @@ weight = 3
id = "Tobias Gerstenberg"
position = "Principal Investigator"
email = "gerstenberg@stanford.edu"
- twitter = "tobigerstenberg"
+ twitter = "tobigerstenberg"
+# bluesky = "tobigerstenberg.bsky.social"
github = "tobiasgerstenberg"
scholar = "citations?user=d0TfP8EAAAAJ&hl=en&oi=ao"
cv = "tobias_gerstenberg.pdf"
website = "tobias_gerstenberg"
description = "I am interested in how people hold others responsible, how these judgments are grounded in causal representations of the world, and supported by counterfactual simulations. I also like to drink tea."
-[[member]]
- id = "Jan-Philipp Fränken"
- position = "Postdoctoral Researcher"
- image = "jan-philipp_franken.jpg"
-# because special characters have trouble rendering on OS X
- email = "janphilipp.franken@gmail.com"
- github = "janphilippfranken"
- scholar = "citations?user=s2omqQcAAAAJ&hl=en"
- website = "https://janphilippfranken.github.io/"
- description = "I'm interested in representation learning, causal inference, and theory of mind reasoning. I currently work on [MARPLE](https://hai.stanford.edu/2022-hoffman-yee-grant-recipients): Explaining what happened through multimodal simulation. I like to drink hot chocolate."
-
[[member]]
id = "Erik Brockbank"
position = "Postdoctoral Researcher"
@@ -96,12 +86,6 @@ weight = 3
twitter = "verona_teo"
description = "I'm interested in computational and behavioral models of cognition, including social, moral, and causal reasoning. I did my undergraduate studies in data science and philosophy. I like coffee."
-[[member]]
- id = "Addison Jadwin"
- position = "Research Assistant"
- email = "ajadwin@stanford.edu"
- description = "I'm a junior at Stanford majoring in symbolic systems. I'm interested in understanding cognition through computational models. Outside of this I enjoy playing viola and taking care of my fish and corals!"
-
[[member]]
id = "Ricky Ma"
position = "Research Assistant"
@@ -118,12 +102,6 @@ weight = 3
github = "shruti-sridhar"
description = "I am a sophomore at Stanford looking to major in Computer Science on the AI track. I am interested in using computational models to explore causality in social settings. Outside of that, I enjoy dancing and amateur vegan baking."
-[[member]]
- id = "Siying Zhang"
- position = "Research Assistant"
- email = "syzhang6@stanford.edu"
- description = "I'm a dutiful questioner and an adroit researcher. I have a background in education and second language acquisition. I'm interested in how language affects social category development as well as perceived characteristics of individual social group members. I am also interested in the psychological and sociological disciplines that interact with each other and how the information I've learned from both perspectives are related together. So far at Stanford, I'm working on a couple of projects on causal judgements and shape bias. Ultimately, I'm planning to become a human factors researcher or UX research scientist. I love to do high intensity workouts followed by vanilla sweet cream cold brew coffee, or maybe coffee first!"
-
[[member]]
id = "Sunny Yu"
position = "Research Assistant"
@@ -524,8 +502,31 @@ weight = 3
# website = "https://haoranzhao419.github.io/"
# description = "I am interested in understanding how effectively language models can perform reasoning and comprehend commonsense and factual knowledge. Subsequently, with a better understanding of LLM's cognitive abilities, I hope to build more cognitive-feasible and efficient language models at small scales. In my free time, I like running and sailing. I like lemonade."
+# [[member]]
+# id = "Jan-Philipp Fränken"
+# position = "Postdoctoral Researcher"
+# image = "jan-philipp_franken.jpg"
+# # because special characters have trouble rendering on OS X
+# email = "janphilipp.franken@gmail.com"
+# github = "janphilippfranken"
+# scholar = "citations?user=s2omqQcAAAAJ&hl=en"
+# website = "https://janphilippfranken.github.io/"
+# description = "I'm interested in representation learning, causal inference, and theory of mind reasoning. I currently work on [MARPLE](https://hai.stanford.edu/2022-hoffman-yee-grant-recipients): Explaining what happened through multimodal simulation. I like to drink hot chocolate."
+
+# [[member]]
+# id = "Siying Zhang"
+# position = "Research Assistant"
+# email = "syzhang6@stanford.edu"
+# description = "I'm a dutiful questioner and an adroit researcher. I have a background in education and second language acquisition. I'm interested in how language affects social category development as well as perceived characteristics of individual social group members. I am also interested in the psychological and sociological disciplines that interact with each other and how the information I've learned from both perspectives are related together. So far at Stanford, I'm working on a couple of projects on causal judgements and shape bias. Ultimately, I'm planning to become a human factors researcher or UX research scientist. I love to do high intensity workouts followed by vanilla sweet cream cold brew coffee, or maybe coffee first!"
+
+# [[member]]
+# id = "Addison Jadwin"
+# position = "Research Assistant"
+# email = "ajadwin@stanford.edu"
+# description = "I'm a junior at Stanford majoring in symbolic systems. I'm interested in understanding cognition through computational models. Outside of this I enjoy playing viola and taking care of my fish and corals!"
+
[[member]]
id = "Alumni"
-description = "
Lara Kirfel (postdoc): Now Postdoctoral Fellow at the Center for Humans and Machines, MPI Berlin.
Damini Kusum (research assistant): Now MSc student at Carnegie Mellon University.
Joseph Outa (research assistant): Now PhD student at Johns Hopkins University.
Zach Davis (postdoc): Now research scientist at Facebook Reality Labs.
Bryce Linford (research assistant): Next step 👣 PhD student at UCLA.
Antonia Langenhoff (research assistant): Next step 👣 PhD student at UC Berkeley.
"
+++
diff --git a/content/publication/beller2024causation.md b/content/publication/beller2024causation.md
index 9e7584f..b350dba 100644
--- a/content/publication/beller2024causation.md
+++ b/content/publication/beller2024causation.md
@@ -16,7 +16,7 @@ abstract = "The words we use to describe what happened shape what comes to a lis
image_preview = ""
selected = false
projects = []
-#url_pdf = "papers/beller2023language.pdf"
+url_pdf = "papers/beller2024causation.pdf"
url_preprint = "https://psyarxiv.com/xv8hf"
url_code = ""
url_dataset = ""
diff --git a/content/publication/du2024robotic.md b/content/publication/du2024robotic.md
index c738130..a9dd257 100644
--- a/content/publication/du2024robotic.md
+++ b/content/publication/du2024robotic.md
@@ -16,7 +16,7 @@ abstract = "When faced with a novel scenario, it can be hard to succeed on the f
image_preview = ""
selected = false
projects = []
-#url_pdf = "papers/du2024robotic.pdf"
+url_pdf = "papers/du2024robotic.pdf"
url_preprint = "https://arxiv.org/abs/2406.15917"
url_code = ""
url_dataset = ""
diff --git a/content/publication/johnson2024wise.md b/content/publication/johnson2024wise.md
index 8cc1875..4639c0b 100644
--- a/content/publication/johnson2024wise.md
+++ b/content/publication/johnson2024wise.md
@@ -16,7 +16,7 @@ abstract = "Recent advances in artificial intelligence (AI) have produced system
image_preview = ""
selected = false
projects = []
-#url_pdf = "papers/johnson2024wise.pdf"
+url_pdf = "papers/johnson2024wise.pdf"
url_preprint = "https://arxiv.org/abs/2411.02478"
url_code = ""
url_dataset = ""
diff --git a/content/publication/prinzing2024purpose.md b/content/publication/prinzing2024purpose.md
index ef5dec3..a5e8df8 100644
--- a/content/publication/prinzing2024purpose.md
+++ b/content/publication/prinzing2024purpose.md
@@ -16,10 +16,10 @@ abstract = "People attribute purposes in both mundane and profound ways—such a
image_preview = ""
selected = false
projects = []
-#url_pdf = "papers/prinzing2024purpose.pdf"
+url_pdf = "papers/prinzing2024purpose.pdf"
url_preprint = "https://osf.io/7enkr"
url_code = ""
-url_dataset = ""
+url_dataset = "https://osf.io/uj7vf/"
url_slides = ""
url_video = ""
url_poster = ""
diff --git a/content/publication/xiang2024handicapping.md b/content/publication/xiang2024handicapping.md
new file mode 100644
index 0000000..674a814
--- /dev/null
+++ b/content/publication/xiang2024handicapping.md
@@ -0,0 +1,33 @@
++++
+# 0 -> 'Forthcoming',
+# 1 -> 'Preprint',
+# 2 -> 'Journal',
+# 3 -> 'Conference Proceedings',
+# 4 -> 'Book chapter',
+# 5 -> 'Thesis'
+
+title = "A signaling theory of self-handicapping"
+date = "2024-11-24"
+authors = ["Y. Xiang","S. J Gershman","T. Gerstenberg"]
+publication_types = ["1"]
+publication_short = "_PsyArXiv_"
+publication = "Xiang, Y., Gershman*, S. J., Gerstenberg*, T. (2024). A signaling theory of self-handicapping. _PsyArXiv_."
+abstract = "People use various strategies to bolster the perception of their competence. One strategy is self-handicapping, by which people deliberately impede their performance in order to protect or enhance perceived competence. Despite much prior research, it is unclear why, when, and how self-handicapping occurs. We develop a formal theory that chooses the optimal degree of selfhandicapping based on its anticipated performance and signaling effects. We test the theory's predictions in two experiments (𝑁 = 400), showing that self-handicapping occurs more often when it is unlikely to affect the outcome and when it increases the perceived competence in the eyes of a naive observer. With sophisticated observers (who consider whether a person chooses to self-handicap), self-handicapping is less effective when followed by failure. We show that the theory also explains the findings of several past studies. By offering a systematic explanation of self-handicapping, the theory lays the groundwork for developing effective interventions."
+image_preview = ""
+selected = false
+projects = []
+url_pdf = "papers/xiang2024handicapping.pdf"
+url_preprint = "https://osf.io/preprints/psyarxiv/84tvm"
+url_code = ""
+url_dataset = ""
+url_slides = ""
+url_video = ""
+url_poster = ""
+url_source = ""
+url_custom = [{name = "Github", url = "https://github.com/yyyxiang/self-handicapping"}]
+math = true
+highlight = true
+[header]
+# image = "publications/xiang2024handicapping.png"
+caption = ""
++++
\ No newline at end of file
diff --git a/docs/404.html b/docs/404.html
index 162ca1e..1877a08 100644
--- a/docs/404.html
+++ b/docs/404.html
@@ -237,6 +237,10 @@
-
diff --git a/docs/bibtex/cic_papers.bib b/docs/bibtex/cic_papers.bib
index 4cbfcef..a06dc96 100644
--- a/docs/bibtex/cic_papers.bib
+++ b/docs/bibtex/cic_papers.bib
@@ -1,13 +1,23 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
-%% Created for Tobias Gerstenberg at 2024-11-06 11:16:38 -0600
+%% Created for Tobias Gerstenberg at 2024-11-24 10:45:20 -0800
%% Saved with string encoding Unicode (UTF-8)
+@article{xiang2024handicapping,
+ abstract = {People use various strategies to bolster the perception of their competence. One strategy is self-handicapping, by which people deliberately impede their performance in order to protect or enhance perceived competence. Despite much prior research, it is unclear why, when, and how self-handicapping occurs. We develop a formal theory that chooses the optimal degree of selfhandicapping based on its anticipated performance and signaling effects. We test the theory's predictions in two experiments (𝑁 = 400), showing that self-handicapping occurs more often when it is unlikely to affect the outcome and when it increases the perceived competence in the eyes of a naive observer. With sophisticated observers (who consider whether a person chooses to self-handicap), self-handicapping is less effective when followed by failure. We show that the theory also explains the findings of several past studies. By offering a systematic explanation of self-handicapping, the theory lays the groundwork for developing effective interventions.},
+ author = {Xiang, Yang and Gershman, Samuel J and Gerstenberg, Tobias},
+ date-added = {2024-11-24 10:45:12 -0800},
+ date-modified = {2024-11-24 10:45:12 -0800},
+ journal = {PsyArXiv},
+ note = {https://osf.io/preprints/psyarxiv/84tvm},
+ title = {A signaling theory of self-handicapping},
+ year = {2024}}
+
@article{johnson2024wise,
abstract = {Recent advances in artificial intelligence (AI) have produced systems capable of increasingly sophisticated performance on cognitive tasks. However, AI systems still struggle in critical ways: unpredictable and novel environments (robustness), lack transparency in their reasoning (explainability), face challenges in communication and commitment (cooperation), and pose risks due to potential harmful actions (safety). We argue that these shortcomings stem from one overarching failure: AI systems lack wisdom. Drawing from cognitive and social sciences, we define wisdom as the ability to navigate intractable problems---those that are ambiguous, radically uncertain, novel, chaotic, or computationally explosive---through effective task-level and metacognitive strategies. While AI research has focused on task-level strategies, metacognition---the ability to reflect on and regulate one's thought processes---is underdeveloped in AI systems. In humans, metacognitive strategies such as recognizing the limits of one's knowledge, considering diverse perspectives, and adapting to context are essential for wise decision-making. We propose that integrating metacognitive capabilities into AI systems is crucial for enhancing their robustness, explainability, cooperation, and safety. By focusing on developing wise AI, we suggest an alternative to aligning AI with specific human values---a task fraught with conceptual and practical difficulties. Instead, wise AI systems can thoughtfully navigate complex situations, account for diverse human values, and avoid harmful actions. We discuss potential approaches to building wise AI, including benchmarking metacognitive abilities and training AI systems to employ wise reasoning. Prioritizing metacognition in AI research will lead to systems that act not only intelligently but also wisely in complex, real-world situations.},
author = {Johnson, Samuel G B and Karimi, Amir-Hossein and Bengio, Yoshua and Chater, Nick and Gerstenberg, Tobias and Larson, Kate and Levine, Sydney and Mitchell, Melanie and Sch{\"o}lkopf, Bernhard and Grossmann, Igor},
diff --git a/docs/index.html b/docs/index.html
index 1ec9882..889df4e 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -110,7 +110,7 @@
-
+
@@ -372,55 +372,6 @@
- I’m interested in representation learning, causal inference, and theory of mind reasoning. I currently work on MARPLE: Explaining what happened through multimodal simulation. I like to drink hot chocolate.
-
-
-
- — Personal website
-
-
-
-
- I’m a junior at Stanford majoring in symbolic systems. I’m interested in understanding cognition through computational models. Outside of this I enjoy playing viola and taking care of my fish and corals!
-
-
-
-
- I’m a dutiful questioner and an adroit researcher. I have a background in education and second language acquisition. I’m interested in how language affects social category development as well as perceived characteristics of individual social group members. I am also interested in the psychological and sociological disciplines that interact with each other and how the information I’ve learned from both perspectives are related together. So far at Stanford, I’m working on a couple of projects on causal judgements and shape bias. Ultimately, I’m planning to become a human factors researcher or UX research scientist. I love to do high intensity workouts followed by vanilla sweet cream cold brew coffee, or maybe coffee first!
-
-
-
-
@@ -1490,7 +1375,7 @@
Alumni
-
Lara Kirfel (postdoc): Now Postdoctoral Fellow at the Center for Humans and Machines, MPI Berlin.
Damini Kusum (research assistant): Now MSc student at Carnegie Mellon University.
Joseph Outa (research assistant): Now PhD student at Johns Hopkins University.
Zach Davis (postdoc): Now research scientist at Facebook Reality Labs.
People use various strategies to bolster the perception of their competence. One strategy is self-handicapping, by which people deliberately impede their performance in order to protect or enhance perceived competence. Despite much prior research, it is unclear why, when, and how self-handicapping occurs. We develop a formal theory that chooses the optimal degree of selfhandicapping based on its anticipated performance and signaling effects. We test the theory’s predictions in two experiments (𝑁 = 400), showing that self-handicapping occurs more often when it is unlikely to affect the outcome and when it increases the perceived competence in the eyes of a naive observer. With sophisticated observers (who consider whether a person chooses to self-handicap), self-handicapping is less effective when followed by failure. We show that the theory also explains the findings of several past studies. By offering a systematic explanation of self-handicapping, the theory lays the groundwork for developing effective interventions.
+
+ People use various strategies to bolster the perception of their competence. One strategy is self-handicapping, by which people deliberately impede their performance in order to protect or enhance perceived competence. Despite much prior research, it …
+
+
{{if .twitter}}
- {{end}} {{if .github}}
+ {{end}} {{if .bluesky}}
+
+
+ {{end}}{{if .github}}
{{end}} {{if .scholar}}
diff --git a/static/bibtex/cic_papers.bib b/static/bibtex/cic_papers.bib
index 4cbfcef..a06dc96 100644
--- a/static/bibtex/cic_papers.bib
+++ b/static/bibtex/cic_papers.bib
@@ -1,13 +1,23 @@
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
-%% Created for Tobias Gerstenberg at 2024-11-06 11:16:38 -0600
+%% Created for Tobias Gerstenberg at 2024-11-24 10:45:20 -0800
%% Saved with string encoding Unicode (UTF-8)
+@article{xiang2024handicapping,
+ abstract = {People use various strategies to bolster the perception of their competence. One strategy is self-handicapping, by which people deliberately impede their performance in order to protect or enhance perceived competence. Despite much prior research, it is unclear why, when, and how self-handicapping occurs. We develop a formal theory that chooses the optimal degree of selfhandicapping based on its anticipated performance and signaling effects. We test the theory's predictions in two experiments (𝑁 = 400), showing that self-handicapping occurs more often when it is unlikely to affect the outcome and when it increases the perceived competence in the eyes of a naive observer. With sophisticated observers (who consider whether a person chooses to self-handicap), self-handicapping is less effective when followed by failure. We show that the theory also explains the findings of several past studies. By offering a systematic explanation of self-handicapping, the theory lays the groundwork for developing effective interventions.},
+ author = {Xiang, Yang and Gershman, Samuel J and Gerstenberg, Tobias},
+ date-added = {2024-11-24 10:45:12 -0800},
+ date-modified = {2024-11-24 10:45:12 -0800},
+ journal = {PsyArXiv},
+ note = {https://osf.io/preprints/psyarxiv/84tvm},
+ title = {A signaling theory of self-handicapping},
+ year = {2024}}
+
@article{johnson2024wise,
abstract = {Recent advances in artificial intelligence (AI) have produced systems capable of increasingly sophisticated performance on cognitive tasks. However, AI systems still struggle in critical ways: unpredictable and novel environments (robustness), lack transparency in their reasoning (explainability), face challenges in communication and commitment (cooperation), and pose risks due to potential harmful actions (safety). We argue that these shortcomings stem from one overarching failure: AI systems lack wisdom. Drawing from cognitive and social sciences, we define wisdom as the ability to navigate intractable problems---those that are ambiguous, radically uncertain, novel, chaotic, or computationally explosive---through effective task-level and metacognitive strategies. While AI research has focused on task-level strategies, metacognition---the ability to reflect on and regulate one's thought processes---is underdeveloped in AI systems. In humans, metacognitive strategies such as recognizing the limits of one's knowledge, considering diverse perspectives, and adapting to context are essential for wise decision-making. We propose that integrating metacognitive capabilities into AI systems is crucial for enhancing their robustness, explainability, cooperation, and safety. By focusing on developing wise AI, we suggest an alternative to aligning AI with specific human values---a task fraught with conceptual and practical difficulties. Instead, wise AI systems can thoughtfully navigate complex situations, account for diverse human values, and avoid harmful actions. We discuss potential approaches to building wise AI, including benchmarking metacognitive abilities and training AI systems to employ wise reasoning. Prioritizing metacognition in AI research will lead to systems that act not only intelligently but also wisely in complex, real-world situations.},
author = {Johnson, Samuel G B and Karimi, Amir-Hossein and Bengio, Yoshua and Chater, Nick and Gerstenberg, Tobias and Larson, Kate and Levine, Sydney and Mitchell, Melanie and Sch{\"o}lkopf, Bernhard and Grossmann, Igor},
diff --git a/static/papers/beller2024causation.pdf b/static/papers/beller2024causation.pdf
new file mode 100644
index 0000000..4a58f33
Binary files /dev/null and b/static/papers/beller2024causation.pdf differ
diff --git a/static/papers/du2024robotic.pdf b/static/papers/du2024robotic.pdf
new file mode 100644
index 0000000..eca4a5e
Binary files /dev/null and b/static/papers/du2024robotic.pdf differ
diff --git a/static/papers/johnson2024wise.pdf b/static/papers/johnson2024wise.pdf
new file mode 100644
index 0000000..8b6867b
Binary files /dev/null and b/static/papers/johnson2024wise.pdf differ
diff --git a/static/papers/prinzing2024purpose.pdf b/static/papers/prinzing2024purpose.pdf
new file mode 100644
index 0000000..c8739a7
Binary files /dev/null and b/static/papers/prinzing2024purpose.pdf differ
diff --git a/static/papers/xiang2024handicapping.pdf b/static/papers/xiang2024handicapping.pdf
new file mode 100644
index 0000000..4ccc601
Binary files /dev/null and b/static/papers/xiang2024handicapping.pdf differ