From ede83e57b71fa3a22fb5389886b68c270c88d4ba Mon Sep 17 00:00:00 2001 From: Younes Strittmatter Date: Sun, 1 Dec 2024 16:48:12 -0500 Subject: [PATCH] docs: finish up ai notebooks --- .../AI Alignment/Reinforcement Learning.ipynb | 300 ++++++++---------- .../AI Alignment/Trolley Problem.ipynb | 75 +++-- 2 files changed, 179 insertions(+), 196 deletions(-) diff --git a/docs/Use Case Tutorials/AI Alignment/Reinforcement Learning.ipynb b/docs/Use Case Tutorials/AI Alignment/Reinforcement Learning.ipynb index 3297a10..6c194b2 100644 --- a/docs/Use Case Tutorials/AI Alignment/Reinforcement Learning.ipynb +++ b/docs/Use Case Tutorials/AI Alignment/Reinforcement Learning.ipynb @@ -15,12 +15,7 @@ "id": "9b59ed742e18a0f5" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:31.993583Z", - "start_time": "2024-12-01T18:55:31.966327Z" - } - }, + "metadata": {}, "cell_type": "code", "source": [ "from sweetbean import Block, Experiment\n", @@ -35,7 +30,7 @@ ], "id": "465d8af802c206ec", "outputs": [], - "execution_count": 1 + "execution_count": null }, { "metadata": {}, @@ -48,12 +43,7 @@ "id": "838416866ba97dbd" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:32.918504Z", - "start_time": "2024-12-01T18:55:32.916393Z" - } - }, + "metadata": {}, "cell_type": "code", "source": [ "timeline = []\n", @@ -67,7 +57,7 @@ ], "id": "98d345f996176fad", "outputs": [], - "execution_count": 2 + "execution_count": null }, { "metadata": {}, @@ -80,12 +70,7 @@ "id": "3496e0ca5f0ee9ce" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:34.101112Z", - "start_time": "2024-12-01T18:55:34.098339Z" - } - }, + "metadata": {}, "cell_type": "code", "source": [ "bandit_1 = TimelineVariable(\"bandit_1\")\n", @@ -114,7 +99,7 @@ ], "id": "6a1ada08dec8c348", "outputs": [], - "execution_count": 3 + "execution_count": null }, { "metadata": {}, @@ -123,17 +108,12 @@ "id": "65ae4706ed556de1" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:45:14.067346Z", - "start_time": "2024-12-01T18:44:53.841870Z" - } - }, + "metadata": {}, "cell_type": "code", "source": "experiment.to_html(\"bandit.html\", path_local_download=\"bandit.json\")", "id": "80fbd261e9ae251a", "outputs": [], - "execution_count": 4 + "execution_count": null }, { "metadata": {}, @@ -145,12 +125,7 @@ "id": "94f8c0dff2ef6200" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:38.963190Z", - "start_time": "2024-12-01T18:55:38.959118Z" - } - }, + "metadata": {}, "cell_type": "code", "source": [ "import json\n", @@ -163,7 +138,7 @@ ], "id": "55d66aac9b404c84", "outputs": [], - "execution_count": 4 + "execution_count": null }, { "metadata": {}, @@ -178,106 +153,15 @@ "id": "b1332cb3777464bf" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:40.224472Z", - "start_time": "2024-12-01T18:55:40.219974Z" - } - }, + "metadata": {}, "cell_type": "code", "source": [ "n_responses = get_n_responses(data)\n", - "data_third_response = until_response(data, 3)\n", - "data_third_response" + "data_third_response = until_response(data, 3)" ], "id": "89f6f6a3996ac454", - "outputs": [ - { - "data": { - "text/plain": [ - "[{'rt': 1154,\n", - " 'stimulus': ['
',\n", - " '
'],\n", - " 'response': 0,\n", - " 'trial_duration': None,\n", - " 'duration': None,\n", - " 'html_array': ['
',\n", - " '
'],\n", - " 'values': [10, 0],\n", - " 'time_after_response': 2000,\n", - " 'type': 'jsPsychHtmlChoice',\n", - " 'bandits': [{'color': 'orange', 'value': 10}, {'color': 'blue', 'value': 0}],\n", - " 'value': 10,\n", - " 'score': 10},\n", - " {'rt': None,\n", - " 'stimulus': \"
Score: 10
\",\n", - " 'response': None,\n", - " 'trial_duration': 2000,\n", - " 'duration': 2000,\n", - " 'choices': [],\n", - " 'correct_key': '',\n", - " 'type': 'jsPsychHtmlKeyboardResponse',\n", - " 'text': 'Score: 10',\n", - " 'color': 'white',\n", - " 'correct': False},\n", - " {'rt': 378,\n", - " 'stimulus': ['
',\n", - " '
'],\n", - " 'response': 0,\n", - " 'trial_duration': None,\n", - " 'duration': None,\n", - " 'html_array': ['
',\n", - " '
'],\n", - " 'values': [9, 1],\n", - " 'time_after_response': 2000,\n", - " 'type': 'jsPsychHtmlChoice',\n", - " 'bandits': [{'color': 'orange', 'value': 9}, {'color': 'blue', 'value': 1}],\n", - " 'value': 9,\n", - " 'score': 19},\n", - " {'rt': None,\n", - " 'stimulus': \"
Score: 19
\",\n", - " 'response': None,\n", - " 'trial_duration': 2000,\n", - " 'duration': 2000,\n", - " 'choices': [],\n", - " 'correct_key': '',\n", - " 'type': 'jsPsychHtmlKeyboardResponse',\n", - " 'text': 'Score: 19',\n", - " 'color': 'white',\n", - " 'correct': False},\n", - " {'rt': 360,\n", - " 'stimulus': ['
',\n", - " '
'],\n", - " 'response': 0,\n", - " 'trial_duration': None,\n", - " 'duration': None,\n", - " 'html_array': ['
',\n", - " '
'],\n", - " 'values': [8, 2],\n", - " 'time_after_response': 2000,\n", - " 'type': 'jsPsychHtmlChoice',\n", - " 'bandits': [{'color': 'orange', 'value': 8}, {'color': 'blue', 'value': 2}],\n", - " 'value': 8,\n", - " 'score': 27},\n", - " {'rt': None,\n", - " 'stimulus': \"
Score: 27
\",\n", - " 'response': None,\n", - " 'trial_duration': 2000,\n", - " 'duration': 2000,\n", - " 'choices': [],\n", - " 'correct_key': '',\n", - " 'type': 'jsPsychHtmlKeyboardResponse',\n", - " 'text': 'Score: 27',\n", - " 'color': 'white',\n", - " 'correct': False}]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": 5 + "outputs": [], + "execution_count": null }, { "metadata": {}, @@ -290,54 +174,138 @@ "id": "e7d854441716de90" }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:55:56.760055Z", - "start_time": "2024-12-01T18:55:41.207411Z" - } - }, + "metadata": {}, "cell_type": "code", - "source": "data_new, _ = experiment.run_on_language(input, data=data_third_response)", + "source": "data_input, _ = experiment.run_on_language(input, data=data_third_response)", "id": "df2b43db78303c0", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "hi\n" - ] - } - ], - "execution_count": 6 + "outputs": [], + "execution_count": null }, { - "metadata": { - "ExecuteTime": { - "end_time": "2024-12-01T18:56:05.300751Z", - "start_time": "2024-12-01T18:56:05.298031Z" - } - }, + "metadata": {}, "cell_type": "code", - "source": "print(data_new)", + "source": "print(data_input)", "id": "9619a73c6f650c7e", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "([{'rt': 1154, 'stimulus': ['
', '
'], 'response': 0, 'trial_duration': None, 'duration': None, 'html_array': ['
', '
'], 'values': [10, 0], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 10}, {'color': 'blue', 'value': 0}], 'value': 10, 'score': 10}, {'rt': None, 'stimulus': \"
Score: 10
\", 'response': None, 'trial_duration': 2000, 'duration': 2000, 'html_array': ['
', '
'], 'values': [10, 0], 'time_after_response': 2000, 'type': 'jsPsychHtmlKeyboardResponse', 'bandits': [{'color': 'orange', 'value': 10}, {'color': 'blue', 'value': 0}], 'value': 10, 'score': 10, 'choices': [], 'correct_key': '', 'text': 'Score: 10', 'color': 'white', 'correct': False}, {'rt': 378, 'stimulus': ['
', '
'], 'response': 0, 'trial_duration': None, 'duration': None, 'html_array': ['
', '
'], 'values': [9, 1], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 9}, {'color': 'blue', 'value': 1}], 'value': 9, 'score': 19, 'choices': [], 'correct_key': '', 'text': 'Score: 10', 'color': 'white', 'correct': False}, {'rt': None, 'stimulus': \"
Score: 19
\", 'response': None, 'trial_duration': 2000, 'duration': 2000, 'html_array': ['
', '
'], 'values': [9, 1], 'time_after_response': 2000, 'type': 'jsPsychHtmlKeyboardResponse', 'bandits': [{'color': 'orange', 'value': 9}, {'color': 'blue', 'value': 1}], 'value': 9, 'score': 19, 'choices': [], 'correct_key': '', 'text': 'Score: 19', 'color': 'white', 'correct': False}, {'rt': 360, 'stimulus': ['
', '
'], 'response': 0, 'trial_duration': None, 'duration': None, 'html_array': ['
', '
'], 'values': [8, 2], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 8}, {'color': 'blue', 'value': 2}], 'value': 8, 'score': 27, 'choices': [], 'correct_key': '', 'text': 'Score: 19', 'color': 'white', 'correct': False}, {'rt': None, 'stimulus': \"
Score: 27
\", 'response': None, 'trial_duration': 2000, 'duration': 2000, 'html_array': ['
', '
'], 'values': [8, 2], 'time_after_response': 2000, 'type': 'jsPsychHtmlKeyboardResponse', 'bandits': [{'color': 'orange', 'value': 8}, {'color': 'blue', 'value': 2}], 'value': 8, 'score': 27, 'choices': [], 'correct_key': '', 'text': 'Score: 27', 'color': 'white', 'correct': False}, {'duration': None, 'html_array': ['
', '
'], 'values': [7, 3], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 7}, {'color': 'blue', 'value': 3}], 'response': -1, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 40
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 40', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [6, 4], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 6}, {'color': 'blue', 'value': 4}], 'response': -1, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 50
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 50', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [5, 5], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 5}, {'color': 'blue', 'value': 5}], 'response': -1, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 60
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 60', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [4, 6], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 4}, {'color': 'blue', 'value': 6}], 'response': -1, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 70
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 70', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [3, 7], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 3}, {'color': 'blue', 'value': 7}], 'response': -1, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 80
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 80', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [2, 8], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 2}, {'color': 'blue', 'value': 8}], 'response': 0, 'value': 2}, {'duration': 2000, 'stimulus': \"
Score: 90
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 90', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [1, 9], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 1}, {'color': 'blue', 'value': 9}], 'response': 0, 'value': 1}, {'duration': 2000, 'stimulus': \"
Score: 100
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 100', 'color': 'white'}, {'duration': None, 'html_array': ['
', '
'], 'values': [0, 10], 'time_after_response': 2000, 'type': 'jsPsychHtmlChoice', 'bandits': [{'color': 'orange', 'value': 0}, {'color': 'blue', 'value': 10}], 'response': 0, 'value': 0}, {'duration': 2000, 'stimulus': \"
Score: 110
\", 'choices': [], 'correct_key': '', 'type': 'jsPsychHtmlKeyboardResponse', 'text': 'Score: 110', 'color': 'white'}], [' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<1>>. The value of the chosen bandit was 10.', 'You see \"Score: 10\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<1>>. The value of the chosen bandit was 9.', 'You see \"Score: 19\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<1>>. The value of the chosen bandit was 8.', 'You see \"Score: 27\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<0>>. The response was invalid.', 'You see \"Score: 40\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<0>>. The response was invalid.', 'You see \"Score: 50\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<0>>. The response was invalid.', 'You see \"Score: 60\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<0>>. The response was invalid.', 'You see \"Score: 70\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<0>>. The response was invalid.', 'You see \"Score: 80\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<01>>. The value of the chosen bandit was 2.', 'You see \"Score: 90\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<1>>. The value of the chosen bandit was 1.', 'You see \"Score: 100\" in \"white\" for 2000ms.', ' You see 2 bandits. Bandit 1 is orange. Bandit 2 is blue. Choose a bandit by naming the number of the bandit. You name <<1>>. The value of the chosen bandit was 0.', 'You see \"Score: 110\" in \"white\" for 2000ms.'])\n" - ] - } + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "Instead of running the experiment manually, we can also use a large language model. In this case, we use [centaur](https://marcelbinz.github.io/centaur/). This model has been trained on similar tasks as the two-armed bandit task. We can use the model to predict the next response and then run the experiment on the model. We can then compare the results with the actual data.\n", + "\n", + "First, we need to install unsloth" ], - "execution_count": 7 + "id": "b6bd38c2dac7d005" }, { "metadata": {}, "cell_type": "code", + "source": "!pip install unsloth \"xformers==0.0.28.post2\"", + "id": "adfc6886e8c95521", "outputs": [], - "execution_count": null, - "source": "", - "id": "bed65e5637aef81f" + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Then, we load the model:", + "id": "9fbc588d2d05b244" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "from unsloth import FastLanguageModel\n", + "import transformers\n", + "\n", + "model, tokenizer = FastLanguageModel.from_pretrained(\n", + " model_name = \"marcelbinz/Llama-3.1-Centaur-8B-adapter\",\n", + " max_seq_length = 32768,\n", + " dtype = None,\n", + " load_in_4bit = True,\n", + ")\n", + "FastLanguageModel.for_inference(model)\n", + "\n", + "pipe = transformers.pipeline(\n", + " \"text-generation\",\n", + " model=model,\n", + " tokenizer=tokenizer,\n", + " trust_remote_code=True,\n", + " pad_token_id=0,\n", + " do_sample=True,\n", + " temperature=1.0,\n", + " max_new_tokens=1,\n", + ")" + ], + "id": "3ee1c13304ed4c58", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Finally, we create a function to pass into the experiment:", + "id": "b254acc66b742707" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "def generate(prompt):\n", + " return pipe(prompt)[0]['generated_text'][len(prompt):]" + ], + "id": "8690a65d182324f1", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "We can use this to run the full experiment:", + "id": "e0a02d78fb442cc4" + }, + { + "metadata": {}, + "cell_type": "code", + "source": "data_centaur_full = experiment.run_on_language(generate)", + "id": "788d66890f32c460", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Or we can run the experiment from the third response", + "id": "844e2d2f7d3227f9" + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "data_centaur_partial = experiment.run_on_language(generate, data=data_third_response)\n", + "\n", + "# Print the data:\n", + "print(data_centaur_full)\n", + "print(data_centaur_full)\n", + "print(data)" + ], + "id": "fab14fd27f8fae83", + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "# Conclusion\n", + "\n", + "This notebook demonstrates how to run a simple bandit task via a website or a language model. The results can, for example, be compared to analyse the language model or can be used in finetuning the model.\n", + "\n", + "SweetBean is also integrated in [AutoRa](https://autoresearch.github.io/autora/), a platform for running the same experiments automatically via prolific. This allows for automatic data collection and analysis while using large language models either for prototyping, in finding good experimental design or for automatic finetuning." + ], + "id": "259f633781be66ec" } ], "metadata": { diff --git a/docs/Use Case Tutorials/AI Alignment/Trolley Problem.ipynb b/docs/Use Case Tutorials/AI Alignment/Trolley Problem.ipynb index 25ee7b6..1857d97 100644 --- a/docs/Use Case Tutorials/AI Alignment/Trolley Problem.ipynb +++ b/docs/Use Case Tutorials/AI Alignment/Trolley Problem.ipynb @@ -229,7 +229,7 @@ { "metadata": {}, "cell_type": "markdown", - "source": "Here, we test the experiment ourselves. After the experiment is completed, the trollye_problem.json file will be downloaded to the local downloads folder. Copy it to the current working directory to process it further.", + "source": "Here, we test the experiment ourselves. After the experiment is completed, the trolley_problem.json file will be downloaded to the local downloads folder. Copy it to the current working directory to process it further.", "id": "e85006d5e524aa03" }, { @@ -374,7 +374,9 @@ "source": [ "### Run on LLM\n", "\n", - "Finally, we can run the same experiment on a large language model. Here, we use the centauer model " + "Finally, we can run the same experiment on a large language model. Here, we use a [llama model](https://huggingface.co/meta-llama/Llama-3.2-1B). \n", + "\n", + "Before running the following code, make sure you have a huggingface account, have access to the model and are logged in." ], "id": "1af35351511515ca" }, @@ -386,7 +388,19 @@ } }, "cell_type": "code", - "source": "!pip install unsloth", + "source": [ + "import torch\n", + "from transformers import pipeline\n", + "\n", + "model_id = \"meta-llama/Llama-3.2-1B\"\n", + "\n", + "pipe = pipeline(\n", + " \"text-generation\", \n", + " model=model_id, \n", + " torch_dtype=torch.bfloat16, \n", + " device_map=\"auto\"\n", + ")" + ], "id": "5265c0460f0f6ae9", "outputs": [ { @@ -468,6 +482,12 @@ ], "execution_count": 10 }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "We now create a function that we can pass in the experiment:", + "id": "a677cf8d418c9f49" + }, { "metadata": { "ExecuteTime": { @@ -477,31 +497,8 @@ }, "cell_type": "code", "source": [ - "from unsloth import FastLanguageModel\n", - "import transformers\n", - "\n", - "model, tokenizer = FastLanguageModel.from_pretrained(\n", - " model_name=\"marcelbinz/Llama-3.1-Centaur-8B-adapter\",\n", - " max_seq_length=32768,\n", - " dtype=None,\n", - " load_in_4bit=True,\n", - ")\n", - "FastLanguageModel.for_inference(model)\n", - "\n", - "pipe = transformers.pipeline(\n", - " \"text-generation\",\n", - " model=model,\n", - " tokenizer=tokenizer,\n", - " trust_remote_code=True,\n", - " pad_token_id=0,\n", - " do_sample=True,\n", - " temperature=1.0,\n", - " max_new_tokens=1,\n", - ")\n", - "\n", - "\n", - "def generate(input):\n", - " return pipe(input)[0][\"generated_text\"]" + "def generate(prompt):\n", + " return pipe(prompt)" ], "id": "4375700ed216bd3c", "outputs": [ @@ -554,13 +551,31 @@ ], "execution_count": 8 }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Again, we can compare the answers:", + "id": "558629156c9fc08f" + }, { "metadata": {}, "cell_type": "code", "outputs": [], "execution_count": null, - "source": "", - "id": "a074a86481bf678e" + "source": "print(get_qa(data_js, data_ai, 'js', 'ai'))", + "id": "bef92959420320fc" + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "## Conclusion\n", + "\n", + "Data from the language model can be used in multiple ways. For example, to analyse and compare answers from a large language models to human responses, or to finetune models.\n", + "\n", + "SweetBean is integrated into the [AutoRA ecosystem](https://autoresearch.github.io/autora/). AutoRA can be used to set up SweetBean experiments and automatically recruit participants via [prolific](https://www.prolific.com/). This makes it seamless to collect data from a large amount of participants." + ], + "id": "915f095b55f68d7e" } ], "metadata": {