From 13fdd961c0f38bbd6ae772d17be8b3e45512ecd4 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Thu, 15 Feb 2024 17:31:20 -0500 Subject: [PATCH] x --- docs/docs/modules/model_io/chat/custom_chat_model.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/modules/model_io/chat/custom_chat_model.ipynb b/docs/docs/modules/model_io/chat/custom_chat_model.ipynb index 28d9767cba4ae..2201e382bd94c 100644 --- a/docs/docs/modules/model_io/chat/custom_chat_model.ipynb +++ b/docs/docs/modules/model_io/chat/custom_chat_model.ipynb @@ -13,10 +13,10 @@ "\n", "As an bonus, you're LLM will automatically become a `LangChain Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support (e.g., `ainvoke`, `abatch`), `astream_events` API etc.\n", "\n", - "You have 2 options to provide the implementation.\n", + "You have 2 options to provide an implementation:\n", "\n", - "1. `SimpleChatModel`: Primarily meant for prototyping. This won't allow you to implement a bunch of features, but might be good enough for your use case.\n", - "2. `BaseChatModel`: Best suited for a full implementation that supports all features (e.g., streaming, function calling)." + "1. Using `SimpleChatModel`: Primarily meant for prototyping. This won't allow you to implement a bunch of features, but might be good enough for your use case.\n", + "2. Using `BaseChatModel`: Best suited for a full implementation that supports all features (e.g., streaming, function calling)." ] }, {