From 5f839beab94f5a0da9d4913889199e77a829a0ba Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Mon, 18 Dec 2023 13:49:46 -0800 Subject: [PATCH] community: replace deprecated davinci models (#14860) This is technically a breaking change because it'll switch out default models from `text-davinci-003` to `gpt-3.5-turbo-instruct`, but OpenAI is shutting off those endpoints on 1/4 anyways. Feels less disruptive to switch out the default instead. --- cookbook/learned_prompt_optimization.ipynb | 2 +- cookbook/tree_of_thought.ipynb | 2 +- docs/docs/guides/safety/moderation.mdx | 4 ++-- docs/docs/integrations/callbacks/promptlayer.ipynb | 2 +- .../integrations/callbacks/sagemaker_tracking.ipynb | 2 +- docs/docs/integrations/llms/azure_openai.ipynb | 4 ++-- docs/docs/integrations/llms/edenai.ipynb | 2 +- docs/docs/integrations/llms/javelin.ipynb | 2 +- docs/docs/integrations/llms/llm_caching.ipynb | 10 +++++----- docs/docs/integrations/providers/log10.mdx | 2 +- docs/docs/integrations/providers/predictionguard.mdx | 2 +- docs/docs/integrations/toolkits/openapi.ipynb | 2 +- docs/docs/integrations/toolkits/openapi_nla.ipynb | 4 ++-- docs/docs/integrations/toolkits/powerbi.ipynb | 2 +- .../modules/agents/agent_types/react_docstore.ipynb | 2 +- .../modules/agents/how_to/intermediate_steps.ipynb | 2 +- docs/docs/modules/model_io/llms/llm_caching.mdx | 6 +++--- docs/docs/modules/model_io/output_parsers/index.ipynb | 2 +- .../model_io/output_parsers/pandas_dataframe.ipynb | 2 +- .../modules/model_io/output_parsers/pydantic.ipynb | 2 +- .../community/langchain_community/embeddings/edenai.py | 2 +- libs/community/langchain_community/llms/edenai.py | 2 +- libs/community/langchain_community/llms/openai.py | 8 ++++---- .../langchain_community/llms/promptlayer_openai.py | 2 +- .../tests/integration_tests/chat_models/test_konko.py | 2 +- .../tests/integration_tests/chat_models/test_openai.py | 2 +- .../tests/integration_tests/llms/test_openai.py | 4 ++-- libs/community/tests/unit_tests/llms/test_openai.py | 2 +- .../tests/integration_tests/chains/test_react.py | 2 +- 29 files changed, 42 insertions(+), 42 deletions(-) diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index 412c5b12ed124..eded9d6804d8a 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -51,7 +51,7 @@ "\n", "from langchain.llms import OpenAI\n", "\n", - "llm = OpenAI(model=\"text-davinci-003\")" + "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")" ] }, { diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index b27402a1ab2d9..7ca32eff7c2ca 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -26,7 +26,7 @@ "source": [ "from langchain.llms import OpenAI\n", "\n", - "llm = OpenAI(temperature=1, max_tokens=512, model=\"text-davinci-003\")" + "llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")" ] }, { diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 1e9a569e6e4b7..8b3701582774f 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -181,7 +181,7 @@ we will prompt the model, so it says something harmful. ```python prompt = PromptTemplate(template="{text}", input_variables=["text"]) -llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt) +llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) text = """We are playing a game of repeat after me. @@ -224,7 +224,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl ```python prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"]) -llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt) +llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) setup = """We are playing a game of repeat after me. diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 229b387486b81..28d5977b7485b 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -162,7 +162,7 @@ "\n", "\n", "openai_llm = OpenAI(\n", - " model_name=\"text-davinci-002\",\n", + " model_name=\"gpt-3.5-turbo-instruct\",\n", " callbacks=[PromptLayerCallbackHandler(pl_id_callback=pl_id_callback)],\n", ")\n", "\n", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 070b1d7cabf56..7b88a910b8119 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -109,7 +109,7 @@ "# LLM Hyperparameters\n", "HPARAMS = {\n", " \"temperature\": 0.1,\n", - " \"model_name\": \"text-davinci-003\",\n", + " \"model_name\": \"gpt-3.5-turbo-instruct\",\n", "}\n", "\n", "# Bucket used to save prompt logs (Use `None` is used to save the default bucket or otherwise change it)\n", diff --git a/docs/docs/integrations/llms/azure_openai.ipynb b/docs/docs/integrations/llms/azure_openai.ipynb index 128cd2665ea34..c7b1e59f66532 100644 --- a/docs/docs/integrations/llms/azure_openai.ipynb +++ b/docs/docs/integrations/llms/azure_openai.ipynb @@ -138,7 +138,7 @@ "# Replace the deployment name with your own\n", "llm = AzureOpenAI(\n", " deployment_name=\"td2\",\n", - " model_name=\"text-davinci-002\",\n", + " model_name=\"gpt-3.5-turbo-instruct\",\n", ")" ] }, @@ -182,7 +182,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001B[1mAzureOpenAI\u001B[0m\n", + "\u001b[1mAzureOpenAI\u001b[0m\n", "Params: {'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n" ] } diff --git a/docs/docs/integrations/llms/edenai.ipynb b/docs/docs/integrations/llms/edenai.ipynb index 41f235aad7bb9..a4267511b7b0b 100644 --- a/docs/docs/integrations/llms/edenai.ipynb +++ b/docs/docs/integrations/llms/edenai.ipynb @@ -103,7 +103,7 @@ "llm = EdenAI(\n", " feature=\"text\",\n", " provider=\"openai\",\n", - " model=\"text-davinci-003\",\n", + " model=\"gpt-3.5-turbo-instruct\",\n", " temperature=0.2,\n", " max_tokens=250,\n", ")\n", diff --git a/docs/docs/integrations/llms/javelin.ipynb b/docs/docs/integrations/llms/javelin.ipynb index 6942ddf2ad9da..e067a066db53b 100644 --- a/docs/docs/integrations/llms/javelin.ipynb +++ b/docs/docs/integrations/llms/javelin.ipynb @@ -100,7 +100,7 @@ "gateway = JavelinAIGateway(\n", " gateway_uri=\"http://localhost:8000\", # replace with service URL or host/port of Javelin\n", " route=route_completions,\n", - " model_name=\"text-davinci-003\",\n", + " model_name=\"gpt-3.5-turbo-instruct\",\n", ")\n", "\n", "prompt = PromptTemplate(\"Translate the following English text to French: {text}\")\n", diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb index 53272bacc0583..0b69cf66afd04 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llms/llm_caching.ipynb @@ -21,7 +21,7 @@ "from langchain.llms import OpenAI\n", "\n", "# To make the caching really obvious, lets use a slower model.\n", - "llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)" + "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)" ] }, { @@ -1159,7 +1159,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n", @@ -1358,7 +1358,7 @@ "metadata": {}, "outputs": [], "source": [ - "llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2, cache=False)" + "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2, cache=False)" ] }, { @@ -1442,8 +1442,8 @@ "metadata": {}, "outputs": [], "source": [ - "llm = OpenAI(model_name=\"text-davinci-002\")\n", - "no_cache_llm = OpenAI(model_name=\"text-davinci-002\", cache=False)" + "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n", + "no_cache_llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", cache=False)" ] }, { diff --git a/docs/docs/integrations/providers/log10.mdx b/docs/docs/integrations/providers/log10.mdx index d458435e55286..4f12b11ef86f1 100644 --- a/docs/docs/integrations/providers/log10.mdx +++ b/docs/docs/integrations/providers/log10.mdx @@ -63,7 +63,7 @@ llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0. llm.predict_messages(messages) print(completion) -llm = OpenAI(model_name="text-davinci-003", callbacks=[log10_callback], temperature=0.5) +llm = OpenAI(model_name="gpt-3.5-turbo-instruct", callbacks=[log10_callback], temperature=0.5) completion = llm.predict("You are a ping pong machine.\nPing?\n") print(completion) ``` diff --git a/docs/docs/integrations/providers/predictionguard.mdx b/docs/docs/integrations/providers/predictionguard.mdx index 0dfb744585c7f..09482cdb0562b 100644 --- a/docs/docs/integrations/providers/predictionguard.mdx +++ b/docs/docs/integrations/providers/predictionguard.mdx @@ -88,7 +88,7 @@ os.environ["OPENAI_API_KEY"] = "" # Your Prediction Guard API key. Get one at predictionguard.com os.environ["PREDICTIONGUARD_TOKEN"] = "" -pgllm = PredictionGuard(model="OpenAI-text-davinci-003") +pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct") template = """Question: {question} diff --git a/docs/docs/integrations/toolkits/openapi.ipynb b/docs/docs/integrations/toolkits/openapi.ipynb index 3460495be8c3d..696e940b7ef24 100644 --- a/docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/docs/integrations/toolkits/openapi.ipynb @@ -222,7 +222,7 @@ "source": [ "import tiktoken\n", "\n", - "enc = tiktoken.encoding_for_model(\"text-davinci-003\")\n", + "enc = tiktoken.encoding_for_model(\"gpt-4\")\n", "\n", "\n", "def count_tokens(s):\n", diff --git a/docs/docs/integrations/toolkits/openapi_nla.ipynb b/docs/docs/integrations/toolkits/openapi_nla.ipynb index 212299e14c953..221db17c8572a 100644 --- a/docs/docs/integrations/toolkits/openapi_nla.ipynb +++ b/docs/docs/integrations/toolkits/openapi_nla.ipynb @@ -40,9 +40,9 @@ }, "outputs": [], "source": [ - "# Select the LLM to use. Here, we use text-davinci-003\n", + "# Select the LLM to use. Here, we use gpt-3.5-turbo-instruct\n", "llm = OpenAI(\n", - " temperature=0, max_tokens=700\n", + " temperature=0, max_tokens=700, model_name=\"gpt-3.5-turbo-instruct\"\n", ") # You can swap between different core LLM's here." ] }, diff --git a/docs/docs/integrations/toolkits/powerbi.ipynb b/docs/docs/integrations/toolkits/powerbi.ipynb index c74aa2d41162d..0ed596be97fd0 100644 --- a/docs/docs/integrations/toolkits/powerbi.ipynb +++ b/docs/docs/integrations/toolkits/powerbi.ipynb @@ -15,7 +15,7 @@ "- It relies on authentication with the azure.identity package, which can be installed with `pip install azure-identity`. Alternatively you can create the powerbi dataset with a token as a string without supplying the credentials.\n", "- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n", "- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n", - "- Testing was done mostly with a `text-davinci-003` model, codex models did not seem to perform ver well." + "- Testing was done mostly with a `gpt-3.5-turbo-instruct` model, codex models did not seem to perform ver well." ] }, { diff --git a/docs/docs/modules/agents/agent_types/react_docstore.ipynb b/docs/docs/modules/agents/agent_types/react_docstore.ipynb index 1095e662f0193..4f7c511879853 100644 --- a/docs/docs/modules/agents/agent_types/react_docstore.ipynb +++ b/docs/docs/modules/agents/agent_types/react_docstore.ipynb @@ -36,7 +36,7 @@ " ),\n", "]\n", "\n", - "llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n", + "llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n", "react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)" ] }, diff --git a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb index 0b0010d64696d..6397a9411fdc1 100644 --- a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb +++ b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n", + "llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)" ] }, diff --git a/docs/docs/modules/model_io/llms/llm_caching.mdx b/docs/docs/modules/model_io/llms/llm_caching.mdx index 93fcd36340aa6..891b9f45f7d12 100644 --- a/docs/docs/modules/model_io/llms/llm_caching.mdx +++ b/docs/docs/modules/model_io/llms/llm_caching.mdx @@ -9,7 +9,7 @@ from langchain.globals import set_llm_cache from langchain.llms import OpenAI # To make the caching really obvious, lets use a slower model. -llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2) +llm = OpenAI(model_name="gpt-3.5-turbo-instruct", n=2, best_of=2) ``` ## In Memory Cache @@ -110,8 +110,8 @@ As an example, we will load a summarizer map-reduce chain. We will cache results ```python -llm = OpenAI(model_name="text-davinci-002") -no_cache_llm = OpenAI(model_name="text-davinci-002", cache=False) +llm = OpenAI(model_name="gpt-3.5-turbo-instruct") +no_cache_llm = OpenAI(model_name="gpt-3.5-turbo-instruct", cache=False) ``` diff --git a/docs/docs/modules/model_io/output_parsers/index.ipynb b/docs/docs/modules/model_io/output_parsers/index.ipynb index b2913c22cbbe9..6909e66f67d68 100644 --- a/docs/docs/modules/model_io/output_parsers/index.ipynb +++ b/docs/docs/modules/model_io/output_parsers/index.ipynb @@ -55,7 +55,7 @@ "from langchain.prompts import PromptTemplate\n", "from langchain_core.pydantic_v1 import BaseModel, Field, validator\n", "\n", - "model = OpenAI(model_name=\"text-davinci-003\", temperature=0.0)\n", + "model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n", "\n", "\n", "# Define your desired data structure.\n", diff --git a/docs/docs/modules/model_io/output_parsers/pandas_dataframe.ipynb b/docs/docs/modules/model_io/output_parsers/pandas_dataframe.ipynb index ea0e32ed90326..e4bf709399c68 100644 --- a/docs/docs/modules/model_io/output_parsers/pandas_dataframe.ipynb +++ b/docs/docs/modules/model_io/output_parsers/pandas_dataframe.ipynb @@ -34,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = \"text-davinci-003\"\n", + "model_name = \"gpt-3.5-turbo-instruct\"\n", "temperature = 0.5\n", "model = OpenAI(model_name=model_name, temperature=temperature)" ] diff --git a/docs/docs/modules/model_io/output_parsers/pydantic.ipynb b/docs/docs/modules/model_io/output_parsers/pydantic.ipynb index 7b0290f1cd0cd..b79e97f551142 100644 --- a/docs/docs/modules/model_io/output_parsers/pydantic.ipynb +++ b/docs/docs/modules/model_io/output_parsers/pydantic.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = \"text-davinci-003\"\n", + "model_name = \"gpt-3.5-turbo-instruct\"\n", "temperature = 0.0\n", "model = OpenAI(model_name=model_name, temperature=temperature)" ] diff --git a/libs/community/langchain_community/embeddings/edenai.py b/libs/community/langchain_community/embeddings/edenai.py index 5cb92e630071f..9d12376fc2d55 100644 --- a/libs/community/langchain_community/embeddings/edenai.py +++ b/libs/community/langchain_community/embeddings/edenai.py @@ -20,7 +20,7 @@ class EdenAiEmbeddings(BaseModel, Embeddings): model: Optional[str] = None """ - model name for above provider (eg: 'text-davinci-003' for openai) + model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai) available models are shown on https://docs.edenai.co/ under 'available providers' """ diff --git a/libs/community/langchain_community/llms/edenai.py b/libs/community/langchain_community/llms/edenai.py index 4a116235a623c..fd1842d72a54f 100644 --- a/libs/community/langchain_community/llms/edenai.py +++ b/libs/community/langchain_community/llms/edenai.py @@ -45,7 +45,7 @@ class EdenAI(LLM): model: Optional[str] = None """ - model name for above provider (eg: 'text-davinci-003' for openai) + model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai) available models are shown on https://docs.edenai.co/ under 'available providers' """ diff --git a/libs/community/langchain_community/llms/openai.py b/libs/community/langchain_community/llms/openai.py index 3e325bbb2c0eb..b9fb5aa4ef13e 100644 --- a/libs/community/langchain_community/llms/openai.py +++ b/libs/community/langchain_community/llms/openai.py @@ -173,7 +173,7 @@ def is_lc_serializable(cls) -> bool: client: Any = Field(default=None, exclude=True) #: :meta private: async_client: Any = Field(default=None, exclude=True) #: :meta private: - model_name: str = Field(default="text-davinci-003", alias="model") + model_name: str = Field(default="gpt-3.5-turbo-instruct", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" @@ -657,7 +657,7 @@ def modelname_to_contextsize(modelname: str) -> int: Example: .. code-block:: python - max_tokens = openai.modelname_to_contextsize("text-davinci-003") + max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct") """ model_token_mapping = { "gpt-4": 8192, @@ -737,7 +737,7 @@ class OpenAI(BaseOpenAI): .. code-block:: python from langchain_community.llms import OpenAI - openai = OpenAI(model_name="text-davinci-003") + openai = OpenAI(model_name="gpt-3.5-turbo-instruct") """ @classmethod @@ -763,7 +763,7 @@ class AzureOpenAI(BaseOpenAI): .. code-block:: python from langchain_community.llms import AzureOpenAI - openai = AzureOpenAI(model_name="text-davinci-003") + openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct") """ azure_endpoint: Union[str, None] = None diff --git a/libs/community/langchain_community/llms/promptlayer_openai.py b/libs/community/langchain_community/llms/promptlayer_openai.py index f6944d1725d53..cb904476e8ba3 100644 --- a/libs/community/langchain_community/llms/promptlayer_openai.py +++ b/libs/community/langchain_community/llms/promptlayer_openai.py @@ -31,7 +31,7 @@ class PromptLayerOpenAI(OpenAI): .. code-block:: python from langchain_community.llms import PromptLayerOpenAI - openai = PromptLayerOpenAI(model_name="text-davinci-003") + openai = PromptLayerOpenAI(model_name="gpt-3.5-turbo-instruct") """ pl_tags: Optional[List[str]] diff --git a/libs/community/tests/integration_tests/chat_models/test_konko.py b/libs/community/tests/integration_tests/chat_models/test_konko.py index 7cfb5be1c25f2..47554199348a8 100644 --- a/libs/community/tests/integration_tests/chat_models/test_konko.py +++ b/libs/community/tests/integration_tests/chat_models/test_konko.py @@ -163,7 +163,7 @@ def test_konko_additional_args_test() -> None: ChatKonko(model_kwargs={"temperature": 0.2}) with pytest.raises(ValueError): - ChatKonko(model_kwargs={"model": "text-davinci-003"}) + ChatKonko(model_kwargs={"model": "gpt-3.5-turbo-instruct"}) def test_konko_token_streaming_test() -> None: diff --git a/libs/community/tests/integration_tests/chat_models/test_openai.py b/libs/community/tests/integration_tests/chat_models/test_openai.py index 40eed8670a048..9274ad8e83166 100644 --- a/libs/community/tests/integration_tests/chat_models/test_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_openai.py @@ -261,7 +261,7 @@ def test_chat_openai_extra_kwargs() -> None: # Test that "model" cannot be specified in kwargs with pytest.raises(ValueError): - ChatOpenAI(model_kwargs={"model": "text-davinci-003"}) + ChatOpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"}) @pytest.mark.scheduled diff --git a/libs/community/tests/integration_tests/llms/test_openai.py b/libs/community/tests/integration_tests/llms/test_openai.py index d89857b51a1d6..9d5b8d3cbecaa 100644 --- a/libs/community/tests/integration_tests/llms/test_openai.py +++ b/libs/community/tests/integration_tests/llms/test_openai.py @@ -50,7 +50,7 @@ def test_openai_extra_kwargs() -> None: # Test that "model" cannot be specified in kwargs with pytest.raises(ValueError): - OpenAI(model_kwargs={"model": "text-davinci-003"}) + OpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"}) def test_openai_llm_output_contains_model_name() -> None: @@ -286,7 +286,7 @@ def mock_completion() -> dict: "id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ", "object": "text_completion", "created": 1689989000, - "model": "text-davinci-003", + "model": "gpt-3.5-turbo-instruct", "choices": [ {"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"} ], diff --git a/libs/community/tests/unit_tests/llms/test_openai.py b/libs/community/tests/unit_tests/llms/test_openai.py index a14cc9651cb7b..302cc4fa0b14e 100644 --- a/libs/community/tests/unit_tests/llms/test_openai.py +++ b/libs/community/tests/unit_tests/llms/test_openai.py @@ -48,7 +48,7 @@ def mock_completion() -> dict: "id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ", "object": "text_completion", "created": 1689989000, - "model": "text-davinci-003", + "model": "gpt-3.5-turbo-instruct", "choices": [ {"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"} ], diff --git a/libs/langchain/tests/integration_tests/chains/test_react.py b/libs/langchain/tests/integration_tests/chains/test_react.py index 76a93609f7b3b..1415df67b05b2 100644 --- a/libs/langchain/tests/integration_tests/chains/test_react.py +++ b/libs/langchain/tests/integration_tests/chains/test_react.py @@ -7,7 +7,7 @@ def test_react() -> None: """Test functionality on a prompt.""" - llm = OpenAI(temperature=0, model_name="text-davinci-002") + llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct") react = ReActChain(llm=llm, docstore=Wikipedia()) question = ( "Author David Chanoff has collaborated with a U.S. Navy admiral "