diff --git a/docs/core_docs/docs/tutorials/rag.ipynb b/docs/core_docs/docs/tutorials/rag.ipynb index ffb49c120726..5de1dd38681f 100644 --- a/docs/core_docs/docs/tutorials/rag.ipynb +++ b/docs/core_docs/docs/tutorials/rag.ipynb @@ -252,9 +252,7 @@ "\n", "\n", "// Compile application and test\n", - "const graph = new StateGraph({\n", - " stateSchema: StateAnnotation,\n", - "})\n", + "const graph = new StateGraph(StateAnnotation)\n", " .addNode(\"retrieve\", retrieve)\n", " .addNode(\"generate\", generate)\n", " .addEdge(\"__start__\", \"retrieve\")\n", @@ -592,19 +590,11 @@ "};\n", "\n", "\n", - "// Below we stream the output from the LLM. This is to support token-by-token\n", - "// streaming for older versions of @langchain/core. For\n", - "// @langchain/core >= 0.2.3, we can use `llm.invoke` instead.\n", - "// Read more: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/\n", "const generate = async (state: typeof StateAnnotation.State) => {\n", " const docsContent = state.context.map(doc => doc.pageContent).join(\"\\n\");\n", " const messages = await promptTemplate.invoke({ question: state.question, context: docsContent });\n", - " const stream = await llm.stream(messages);\n", - " let gathered = undefined;\n", - " for await (const chunk of stream) {\n", - " gathered = gathered !== undefined ? concat(gathered, chunk) : chunk;\n", - " }\n", - " return { answer: gathered.content };\n", + " const response = await llm.invoke(messages);\n", + " return { answer: response.content };\n", "};" ] }, @@ -629,9 +619,7 @@ "source": [ "import { StateGraph } from \"@langchain/langgraph\";\n", "\n", - "const graph = new StateGraph({\n", - " stateSchema: StateAnnotation,\n", - "})\n", + "const graph = new StateGraph(StateAnnotation)\n", " .addNode(\"retrieve\", retrieve)\n", " .addNode(\"generate\", generate)\n", " .addEdge(\"__start__\", \"retrieve\")\n", @@ -810,20 +798,20 @@ "id": "f860142d-d50b-4526-a03f-a59a763117fe", "metadata": {}, "source": [ - "Stream [tokens](/docs/concepts/tokens/):" + "Stream [tokens](/docs/concepts/tokens/) (requires `@langchain/core` >= 0.3.24 and `@langchain/langgraph` >= 0.2.34 with above implementation):" ] }, { "cell_type": "code", "execution_count": 16, - "id": "28625cc3-0f77-4143-af51-ce0fd6682120", + "id": "acb80ba0-d5d6-4425-9683-aaeab7081e6c", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "|Task| decomposition| is| the| process| of| breaking| down| complex| tasks| into| smaller|,| more| manageable| steps|.| This| can| be| achieved| through| techniques| like| Chain| of| Thought| (|Co|T|),| which| guides| models| to| think| step| by| step|,| or| by| using| specific| prompts| and| human| inputs|.| The| Tree| of| Thoughts| approach| further| enhances| this| by| exploring| multiple| reasoning| possibilities| at| each| step|,| creating| a| structured| tree| of| thoughts|.||" + "|Task| decomposition| is| the| process| of| breaking| down| complex| tasks| into| smaller|,| more| manageable| steps|.| This| can| be| achieved| through| various| methods|,| including| prompting| large| language| models| (|LL|Ms|)| to| outline| steps| or| using| task|-specific| instructions|.| Techniques| like| Chain| of| Thought| (|Co|T|)| and| Tree| of| Thoughts| further| enhance| this| process| by| struct|uring| reasoning| and| exploring| multiple| possibilities| at| each| step|.||" ] } ], @@ -838,6 +826,20 @@ "}" ] }, + { + "cell_type": "markdown", + "id": "5aeb45ad-9bd5-4ee4-8356-9dca9ece76c5", + "metadata": {}, + "source": [ + "```{=mdx}\n", + ":::note\n", + "\n", + "Streaming tokens with the current implementation, using `.invoke` in the `generate` step, requires `@langchain/core` >= 0.3.24 and `@langchain/langgraph` >= 0.2.34. See details [here](https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/).\n", + "\n", + ":::\n", + "```" + ] + }, { "cell_type": "markdown", "id": "406534d4-66a3-4c27-b277-2bd2f5930cf5", @@ -946,7 +948,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 18, "id": "5daf62fd-4086-49ad-8b3a-514c4fa214ea", "metadata": {}, "outputs": [], @@ -967,7 +969,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 19, "id": "7b4864dd-172a-441f-8224-0661b156ed29", "metadata": {}, "outputs": [], @@ -993,7 +995,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 20, "id": "afdbe4e7-b1e1-41ff-9c9e-0194ebd73049", "metadata": {}, "outputs": [], @@ -1038,11 +1040,7 @@ "\n", "\n", "\n", - "const graphQA = new StateGraph({\n", - " input: InputStateAnnotation,\n", - " output: StateAnnotationQA,\n", - " stateSchema: StateAnnotationQA,\n", - "})\n", + "const graphQA = new StateGraph(StateAnnotationQA)\n", " .addNode(\"analyzeQuery\", analyzeQuery)\n", " .addNode(\"retrieveQA\", retrieveQA)\n", " .addNode(\"generateQA\", generateQA)\n", @@ -1062,7 +1060,7 @@ "// Note: tslab only works inside a jupyter notebook. Don't worry about running this code yourself!\n", "import * as tslab from \"tslab\";\n", "\n", - "const image = await graph.getGraph().drawMermaidPng();\n", + "const image = await graphQA.getGraph().drawMermaidPng();\n", "const arrayBuffer = await image.arrayBuffer();\n", "\n", "await tslab.display.png(new Uint8Array(arrayBuffer));\n",