diff --git a/docs/docs/integrations/chat/ollama.ipynb b/docs/docs/integrations/chat/ollama.ipynb
index e34977696cff1..99b6fba3a0ff1 100644
--- a/docs/docs/integrations/chat/ollama.ipynb
+++ b/docs/docs/integrations/chat/ollama.ipynb
@@ -246,6 +246,9 @@
"\n",
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
"\n",
+ "Browse the full set of versions for models with `tags`, such as [here](https://ollama.ai/library/llava/tags).\n",
+ "\n",
+ "Download the desired LLM:\n",
"```\n",
"ollama pull bakllava\n",
"```\n",
@@ -255,40 +258,31 @@
},
{
"cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Requirement already satisfied: pillow in /Users/jacoblee/langchain/langchain/libs/langchain/.venv/lib/python3.10/site-packages (10.1.0)\n",
- "\u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3.1 is available.\n",
- "You should consider upgrading via the '/Users/jacoblee/langchain/langchain/libs/langchain/.venv/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
- "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
- ]
- }
- ],
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
"source": [
"%pip install pillow"
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 1,
"metadata": {},
"outputs": [
{
- "ename": "FileNotFoundError",
- "evalue": "[Errno 2] No such file or directory: '/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg'",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
- "\u001b[1;32m/Users/jacoblee/langchain/langchain/docs/docs/integrations/chat/ollama.ipynb Cell 12\u001b[0m line \u001b[0;36m3\n\u001b[1;32m 31\u001b[0m display(HTML(image_html))\n\u001b[1;32m 34\u001b[0m file_path \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m---> 35\u001b[0m pil_image \u001b[39m=\u001b[39m Image\u001b[39m.\u001b[39;49mopen(file_path)\n\u001b[1;32m 37\u001b[0m image_b64 \u001b[39m=\u001b[39m convert_to_base64(pil_image)\n\u001b[1;32m 38\u001b[0m plt_img_base64(image_b64)\n",
- "File \u001b[0;32m~/langchain/langchain/libs/langchain/.venv/lib/python3.10/site-packages/PIL/Image.py:3243\u001b[0m, in \u001b[0;36mopen\u001b[0;34m(fp, mode, formats)\u001b[0m\n\u001b[1;32m 3240\u001b[0m filename \u001b[39m=\u001b[39m fp\n\u001b[1;32m 3242\u001b[0m \u001b[39mif\u001b[39;00m filename:\n\u001b[0;32m-> 3243\u001b[0m fp \u001b[39m=\u001b[39m builtins\u001b[39m.\u001b[39;49mopen(filename, \u001b[39m\"\u001b[39;49m\u001b[39mrb\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m 3244\u001b[0m exclusive_fp \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[1;32m 3246\u001b[0m \u001b[39mtry\u001b[39;00m:\n",
- "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg'"
- ]
+ "data": {
+ "text/html": [
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
}
],
"source": [
@@ -334,19 +328,18 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 3,
"metadata": {},
"outputs": [
{
- "ename": "NameError",
- "evalue": "name 'image_b64' is not defined",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
- "\u001b[1;32m/Users/jacoblee/langchain/langchain/docs/docs/integrations/chat/ollama.ipynb Cell 13\u001b[0m line \u001b[0;36m1\n\u001b[1;32m 9\u001b[0m \u001b[39m# Call the chat model with both messages and images\u001b[39;00m\n\u001b[1;32m 10\u001b[0m content_parts \u001b[39m=\u001b[39m []\n\u001b[1;32m 11\u001b[0m image_part \u001b[39m=\u001b[39m {\n\u001b[1;32m 12\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mtype\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mimage_url\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[0;32m---> 13\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mimage_url\u001b[39m\u001b[39m\"\u001b[39m: {\u001b[39m\"\u001b[39m\u001b[39murl\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mdata:image/jpeg;base64,\u001b[39m\u001b[39m{\u001b[39;00mimage_b64\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m},\n\u001b[1;32m 14\u001b[0m }\n\u001b[1;32m 15\u001b[0m text_part \u001b[39m=\u001b[39m {\u001b[39m\"\u001b[39m\u001b[39mtype\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mtext\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mtext\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39mWhat is the Daollar-based gross retention rate?\u001b[39m\u001b[39m\"\u001b[39m}\n\u001b[1;32m 17\u001b[0m content_parts\u001b[39m.\u001b[39mappend(image_part)\n",
- "\u001b[0;31mNameError\u001b[0m: name 'image_b64' is not defined"
- ]
+ "data": {
+ "text/plain": [
+ "AIMessage(content='90%')"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
}
],
"source": [
@@ -361,7 +354,7 @@
"content_parts = []\n",
"image_part = {\n",
" \"type\": \"image_url\",\n",
- " \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_b64}\"},\n",
+ " \"image_url\": f\"data:image/jpeg;base64,{image_b64}\",\n",
"}\n",
"text_part = {\"type\": \"text\", \"text\": \"What is the Daollar-based gross retention rate?\"}\n",
"\n",
@@ -388,7 +381,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.5"
+ "version": "3.9.16"
}
},
"nbformat": 4,
diff --git a/docs/docs/integrations/llms/ollama.ipynb b/docs/docs/integrations/llms/ollama.ipynb
index b069bfe56317c..adbf4eccac8ea 100644
--- a/docs/docs/integrations/llms/ollama.ipynb
+++ b/docs/docs/integrations/llms/ollama.ipynb
@@ -115,7 +115,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@@ -126,20 +126,20 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 2,
"metadata": {},
"outputs": [
{
- "ename": "FileNotFoundError",
- "evalue": "[Errno 2] No such file or directory: '/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg'",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
- "\u001b[1;32m/Users/jacoblee/langchain/langchain/docs/docs/integrations/llms/ollama.ipynb Cell 7\u001b[0m line \u001b[0;36m3\n\u001b[1;32m 31\u001b[0m display(HTML(image_html))\n\u001b[1;32m 34\u001b[0m file_path \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m---> 35\u001b[0m pil_image \u001b[39m=\u001b[39m Image\u001b[39m.\u001b[39;49mopen(file_path)\n\u001b[1;32m 36\u001b[0m image_b64 \u001b[39m=\u001b[39m convert_to_base64(pil_image)\n\u001b[1;32m 37\u001b[0m plt_img_base64(image_b64)\n",
- "File \u001b[0;32m~/langchain/langchain/libs/langchain/.venv/lib/python3.10/site-packages/PIL/Image.py:3243\u001b[0m, in \u001b[0;36mopen\u001b[0;34m(fp, mode, formats)\u001b[0m\n\u001b[1;32m 3240\u001b[0m filename \u001b[39m=\u001b[39m fp\n\u001b[1;32m 3242\u001b[0m \u001b[39mif\u001b[39;00m filename:\n\u001b[0;32m-> 3243\u001b[0m fp \u001b[39m=\u001b[39m builtins\u001b[39m.\u001b[39;49mopen(filename, \u001b[39m\"\u001b[39;49m\u001b[39mrb\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m 3244\u001b[0m exclusive_fp \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[1;32m 3246\u001b[0m \u001b[39mtry\u001b[39;00m:\n",
- "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/Users/rlm/Desktop/Eval_Sets/multi_modal_presentations/DDOG/img_23.jpg'"
- ]
+ "data": {
+ "text/html": [
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
}
],
"source": [
@@ -184,31 +184,23 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 5,
"metadata": {},
"outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "90%"
- ]
- },
{
"data": {
"text/plain": [
"'90%'"
]
},
- "execution_count": 18,
+ "execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
- "\n",
- "llm_with_image_context(prompt=\"What is the dollar based gross retention rate:\")"
+ "llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
]
}
],
@@ -228,7 +220,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.5"
+ "version": "3.9.16"
}
},
"nbformat": 4,