From 8b207a153b508f8847cdc867c4af8c2f904920ef Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 08:52:08 -0800 Subject: [PATCH 01/12] works --- libs/langchain/langchain/adapters/openai.py | 16 ++++----- .../langchain/langchain/chat_models/openai.py | 33 ++++++++++--------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/libs/langchain/langchain/adapters/openai.py b/libs/langchain/langchain/adapters/openai.py index f697851fc6544..b29c8570d46cc 100644 --- a/libs/langchain/langchain/adapters/openai.py +++ b/libs/langchain/langchain/adapters/openai.py @@ -47,24 +47,24 @@ def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: Returns: The LangChain message. """ - role = _dict["role"] + role = _dict.role if role == "user": - return HumanMessage(content=_dict["content"]) + return HumanMessage(content=_dict.content) elif role == "assistant": # Fix for azure # Also OpenAI returns None for tool invocations - content = _dict.get("content", "") or "" - if _dict.get("function_call"): - additional_kwargs = {"function_call": dict(_dict["function_call"])} + content = _dict.content or "" + if _dict.function_call: + additional_kwargs = {"function_call": dict(_dict.function_call)} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": - return SystemMessage(content=_dict["content"]) + return SystemMessage(content=_dict.content) elif role == "function": - return FunctionMessage(content=_dict["content"], name=_dict["name"]) + return FunctionMessage(content=_dict.content, name=_dict.name) else: - return ChatMessage(content=_dict["content"], role=role) + return ChatMessage(content=_dict.content, role=role) def convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 7f96a1befbd03..a5e2ff6191362 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -74,11 +74,12 @@ def _create_retry_decorator( import openai errors = [ - openai.error.Timeout, - openai.error.APIError, - openai.error.APIConnectionError, - openai.error.RateLimitError, - openai.error.ServiceUnavailableError, + ValueError + # openai.error.Timeout, + # openai.error.APIError, + # openai.error.APIConnectionError, + # openai.error.RateLimitError, + # openai.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager @@ -264,7 +265,7 @@ def validate_environment(cls, values: Dict) -> Dict: "Please install it with `pip install openai`." ) try: - values["client"] = openai.ChatCompletion + values["client"] = openai.OpenAI() except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " @@ -282,7 +283,7 @@ def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, - "request_timeout": self.request_timeout, + # "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, @@ -298,7 +299,7 @@ def completion_with_retry( @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: - return self.client.create(**kwargs) + return self.client.chat.completions.create(**kwargs) return _completion_with_retry(**kwargs) @@ -308,7 +309,7 @@ def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: if output is None: # Happens in streaming continue - token_usage = output["token_usage"] + token_usage = output["token_usage"].__dict__ for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v @@ -380,14 +381,14 @@ def _create_message_dicts( def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] - for res in response["choices"]: - message = convert_dict_to_message(res["message"]) + for res in response.choices: + message = convert_dict_to_message(res.message) gen = ChatGeneration( message=message, - generation_info=dict(finish_reason=res.get("finish_reason")), + generation_info=dict(finish_reason=res.finish_reason), ) generations.append(gen) - token_usage = response.get("usage", {}) + token_usage = response.usage or {} llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) @@ -452,9 +453,9 @@ def _identifying_params(self) -> Dict[str, Any]: def _client_params(self) -> Dict[str, Any]: """Get the parameters used for the openai client.""" openai_creds: Dict[str, Any] = { - "api_key": self.openai_api_key, - "api_base": self.openai_api_base, - "organization": self.openai_organization, + # "api_key": self.openai_api_key, + # "api_base": self.openai_api_base, + # "organization": self.openai_organization, "model": self.model_name, } if self.openai_proxy: From a912f19a51331f43103887f0c1e25efbbf969a58 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 08:55:39 -0800 Subject: [PATCH 02/12] dict --- libs/langchain/langchain/adapters/openai.py | 16 ++++++++-------- libs/langchain/langchain/chat_models/openai.py | 12 +++++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/libs/langchain/langchain/adapters/openai.py b/libs/langchain/langchain/adapters/openai.py index b29c8570d46cc..f697851fc6544 100644 --- a/libs/langchain/langchain/adapters/openai.py +++ b/libs/langchain/langchain/adapters/openai.py @@ -47,24 +47,24 @@ def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: Returns: The LangChain message. """ - role = _dict.role + role = _dict["role"] if role == "user": - return HumanMessage(content=_dict.content) + return HumanMessage(content=_dict["content"]) elif role == "assistant": # Fix for azure # Also OpenAI returns None for tool invocations - content = _dict.content or "" - if _dict.function_call: - additional_kwargs = {"function_call": dict(_dict.function_call)} + content = _dict.get("content", "") or "" + if _dict.get("function_call"): + additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": - return SystemMessage(content=_dict.content) + return SystemMessage(content=_dict["content"]) elif role == "function": - return FunctionMessage(content=_dict.content, name=_dict.name) + return FunctionMessage(content=_dict["content"], name=_dict["name"]) else: - return ChatMessage(content=_dict.content, role=role) + return ChatMessage(content=_dict["content"], role=role) def convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index a5e2ff6191362..178d0c9780178 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -309,7 +309,7 @@ def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: if output is None: # Happens in streaming continue - token_usage = output["token_usage"].__dict__ + token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v @@ -381,14 +381,16 @@ def _create_message_dicts( def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] - for res in response.choices: - message = convert_dict_to_message(res.message) + if not isinstance(response, dict): + response = response.dict() + for res in response["choices"]: + message = convert_dict_to_message(res["message"]) gen = ChatGeneration( message=message, - generation_info=dict(finish_reason=res.finish_reason), + generation_info=dict(finish_reason=res.get("finish_reason")), ) generations.append(gen) - token_usage = response.usage or {} + token_usage = response.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) From 6397e568ce186e8ed2a60575b9f7f854b2132b81 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 10:27:18 -0800 Subject: [PATCH 03/12] wip --- .../langchain/langchain/chat_models/openai.py | 45 ++++++++++++------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 178d0c9780178..f62ae12bc1575 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -47,6 +47,7 @@ from langchain.utils import get_from_dict_or_env, get_pydantic_field_names if TYPE_CHECKING: + import httpx import tiktoken @@ -74,12 +75,11 @@ def _create_retry_decorator( import openai errors = [ - ValueError - # openai.error.Timeout, - # openai.error.APIError, - # openai.error.APIConnectionError, - # openai.error.RateLimitError, - # openai.error.ServiceUnavailableError, + openai.APITimeoutError, + openai.APIError, + openai.APIConnectionError, + openai.RateLimitError, + openai.APIStatusError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager @@ -97,7 +97,7 @@ async def acompletion_with_retry( @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api - return await llm.client.acreate(**kwargs) + return await llm.async_client.create(**kwargs) return await _completion_with_retry(**kwargs) @@ -167,20 +167,23 @@ def is_lc_serializable(cls) -> bool: return True client: Any = None #: :meta private: + async_client: Any = None #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" - openai_api_key: Optional[str] = None + openai_api_key: Optional[str] = Field(default=None, alias="api_key") """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" - openai_api_base: Optional[str] = None - openai_organization: Optional[str] = None + openai_api_base: Optional[str] = Field(default=None, alias="base_url") + openai_organization: Optional[str] = Field(default=None, alias="organization") # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None - request_timeout: Optional[Union[float, Tuple[float, float]]] = None + request_timeout: Union[float, Tuple[float, float], httpx.Timeout, None] = Field( + default=None, alias="timeout" + ) """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" @@ -265,7 +268,20 @@ def validate_environment(cls, values: Dict) -> Dict: "Please install it with `pip install openai`." ) try: - values["client"] = openai.OpenAI() + values["client"] = openai.OpenAI( + api_key=values["openai_api_key"], + timeout=values["request_timeout"], + max_retries=values["max_retries"], + organization=values["openai_organization"], + base_url=values["openai_api_base"], + ).chat.completions + values["async_client"] = openai.AsyncOpenAI( + api_key=values["openai_api_key"], + timeout=values["request_timeout"], + max_retries=values["max_retries"], + organization=values["openai_organization"], + base_url=values["openai_api_base"], + ).chat.completions except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " @@ -283,7 +299,6 @@ def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, - # "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, @@ -299,7 +314,7 @@ def completion_with_retry( @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: - return self.client.chat.completions.create(**kwargs) + return self.client.create(**kwargs) return _completion_with_retry(**kwargs) @@ -379,7 +394,7 @@ def _create_message_dicts( message_dicts = [convert_message_to_dict(m) for m in messages] return message_dicts, params - def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: + def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: generations = [] if not isinstance(response, dict): response = response.dict() From bf52a2fb14eceecbc440df83504c87c9f5a4a5f2 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 10:56:36 -0800 Subject: [PATCH 04/12] wip --- .../langchain/langchain/chat_models/openai.py | 40 +++++++++++++------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index f62ae12bc1575..df719928d15a4 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -3,6 +3,7 @@ import logging import sys +from importlib.metadata import version from typing import ( TYPE_CHECKING, Any, @@ -19,6 +20,8 @@ Union, ) +from packaging.version import Version, parse + from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -44,7 +47,10 @@ ) from langchain.schema.output import ChatGenerationChunk from langchain.schema.runnable import Runnable -from langchain.utils import get_from_dict_or_env, get_pydantic_field_names +from langchain.utils import ( + get_from_dict_or_env, + get_pydantic_field_names, +) if TYPE_CHECKING: import httpx @@ -75,11 +81,11 @@ def _create_retry_decorator( import openai errors = [ - openai.APITimeoutError, - openai.APIError, - openai.APIConnectionError, - openai.RateLimitError, - openai.APIStatusError, + openai.error.Timeout, + openai.error.APIError, + openai.error.APIConnectionError, + openai.error.RateLimitError, + openai.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager @@ -92,6 +98,9 @@ async def acompletion_with_retry( **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" + if _is_openai_v1(): + return await llm.async_client.acreate(**kwargs) + retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator @@ -126,6 +135,11 @@ def _convert_delta_to_message_chunk( return default_class(content=content) +def _is_openai_v1() -> bool: + _version = parse(version("openai")) + return _version >= Version("1.0.0") + + class ChatOpenAI(BaseChatModel): """`OpenAI` Chat large language models API. @@ -267,7 +281,8 @@ def validate_environment(cls, values: Dict) -> Dict: "Could not import openai python package. " "Please install it with `pip install openai`." ) - try: + + if _is_openai_v1(): values["client"] = openai.OpenAI( api_key=values["openai_api_key"], timeout=values["request_timeout"], @@ -282,12 +297,8 @@ def validate_environment(cls, values: Dict) -> Dict: organization=values["openai_organization"], base_url=values["openai_api_base"], ).chat.completions - except AttributeError: - raise ValueError( - "`openai` has no `ChatCompletion` attribute, this is likely " - "due to an old version of the openai package. Try upgrading it " - "with `pip install --upgrade openai`." - ) + else: + values["client"] = openai.ChatCompletion if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: @@ -310,6 +321,9 @@ def completion_with_retry( self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any ) -> Any: """Use tenacity to retry the completion call.""" + if _is_openai_v1(): + return self.client.create(**kwargs) + retry_decorator = _create_retry_decorator(self, run_manager=run_manager) @retry_decorator From 623d96de1b0f54219d4c01947584acef495cbeff Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 11:07:56 -0800 Subject: [PATCH 05/12] async --- libs/langchain/langchain/chat_models/openai.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index df719928d15a4..fdb5778fb8496 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -99,14 +99,14 @@ async def acompletion_with_retry( ) -> Any: """Use tenacity to retry the async completion call.""" if _is_openai_v1(): - return await llm.async_client.acreate(**kwargs) + return await llm.async_client.create(**kwargs) retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api - return await llm.async_client.create(**kwargs) + return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) @@ -284,11 +284,11 @@ def validate_environment(cls, values: Dict) -> Dict: if _is_openai_v1(): values["client"] = openai.OpenAI( - api_key=values["openai_api_key"], - timeout=values["request_timeout"], - max_retries=values["max_retries"], - organization=values["openai_organization"], - base_url=values["openai_api_base"], + # api_key=values["openai_api_key"], + # timeout=values["request_timeout"], + # max_retries=values["max_retries"], + # organization=values["openai_organization"], + # base_url=values["openai_api_base"], ).chat.completions values["async_client"] = openai.AsyncOpenAI( api_key=values["openai_api_key"], From eb957ba98d751f790a37102c768d50b67640eac0 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 11:20:12 -0800 Subject: [PATCH 06/12] works --- .../langchain/langchain/chat_models/openai.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index fdb5778fb8496..e1600b6cb4780 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -284,18 +284,18 @@ def validate_environment(cls, values: Dict) -> Dict: if _is_openai_v1(): values["client"] = openai.OpenAI( - # api_key=values["openai_api_key"], - # timeout=values["request_timeout"], - # max_retries=values["max_retries"], - # organization=values["openai_organization"], - # base_url=values["openai_api_base"], + api_key=values["openai_api_key"], + timeout=values["request_timeout"], + max_retries=values["max_retries"], + organization=values["openai_organization"], + base_url=values["openai_api_base"] or None, ).chat.completions values["async_client"] = openai.AsyncOpenAI( api_key=values["openai_api_key"], timeout=values["request_timeout"], max_retries=values["max_retries"], organization=values["openai_organization"], - base_url=values["openai_api_base"], + base_url=values["openai_api_base"] or None, ).chat.completions else: values["client"] = openai.ChatCompletion @@ -487,8 +487,18 @@ def _client_params(self) -> Dict[str, Any]: # "api_key": self.openai_api_key, # "api_base": self.openai_api_base, # "organization": self.openai_organization, + } + openai_creds: Dict[str, Any] = { "model": self.model_name, } + if not _is_openai_v1(): + openai_creds.update( + { + "api_key": self.openai_api_key, + "api_base": self.openai_api_base, + "organization": self.openai_organization, + } + ) if self.openai_proxy: import openai From 187fedb456d057e625104f533f8a7786aecf3067 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 11:20:59 -0800 Subject: [PATCH 07/12] clean --- libs/langchain/langchain/chat_models/openai.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index e1600b6cb4780..c3f3ce5c37664 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -483,11 +483,6 @@ def _identifying_params(self) -> Dict[str, Any]: @property def _client_params(self) -> Dict[str, Any]: """Get the parameters used for the openai client.""" - openai_creds: Dict[str, Any] = { - # "api_key": self.openai_api_key, - # "api_base": self.openai_api_base, - # "organization": self.openai_organization, - } openai_creds: Dict[str, Any] = { "model": self.model_name, } From d02241bd58c11d0b68752aed098c929ae8cbec91 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 11:44:51 -0800 Subject: [PATCH 08/12] stream --- libs/langchain/langchain/chat_models/openai.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index df58195a1b505..62097616b6d94 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -118,6 +118,8 @@ def _convert_delta_to_message_chunk( content = _dict.get("content") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} + if additional_kwargs["function_call"]["name"] is None: + additional_kwargs["function_call"]["name"] = "" else: additional_kwargs = {} @@ -202,7 +204,7 @@ def is_lc_serializable(cls) -> bool: default=None, alias="timeout" ) """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" - max_retries: int = 6 + max_retries: int = 2 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" @@ -363,6 +365,8 @@ def _stream( for chunk in self.completion_with_retry( messages=message_dicts, run_manager=run_manager, **params ): + if not isinstance(chunk, dict): + chunk = chunk.dict() if len(chunk["choices"]) == 0: continue choice = chunk["choices"][0] @@ -440,6 +444,8 @@ async def _astream( async for chunk in await acompletion_with_retry( self, messages=message_dicts, run_manager=run_manager, **params ): + if not isinstance(chunk, dict): + chunk = chunk.dict() if len(chunk["choices"]) == 0: continue choice = chunk["choices"][0] From ea819b2dcee552fc6e0845a0d8100471e2b8e270 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 12:33:24 -0800 Subject: [PATCH 09/12] azure --- .../langchain/chat_models/azure_openai.py | 71 ++++++++++++------- libs/langchain/poetry.lock | 48 +++++++++---- libs/langchain/pyproject.toml | 4 +- 3 files changed, 82 insertions(+), 41 deletions(-) diff --git a/libs/langchain/langchain/chat_models/azure_openai.py b/libs/langchain/langchain/chat_models/azure_openai.py index 9e232224deb58..fe52489e10db4 100644 --- a/libs/langchain/langchain/chat_models/azure_openai.py +++ b/libs/langchain/langchain/chat_models/azure_openai.py @@ -2,10 +2,10 @@ from __future__ import annotations import logging -from typing import Any, Dict, Mapping +from typing import Any, Dict, Union -from langchain.chat_models.openai import ChatOpenAI -from langchain.pydantic_v1 import root_validator +from langchain.chat_models.openai import ChatOpenAI, _is_openai_v1 +from langchain.pydantic_v1 import BaseModel, Field, root_validator from langchain.schema import ChatResult from langchain.utils import get_from_dict_or_env @@ -51,13 +51,13 @@ class AzureChatOpenAI(ChatOpenAI): in, even if not explicitly saved on this class. """ - deployment_name: str = "" + deployment_name: str = Field(default="", alias="azure_deployment") model_version: str = "" openai_api_type: str = "" - openai_api_base: str = "" - openai_api_version: str = "" - openai_api_key: str = "" - openai_organization: str = "" + openai_api_base: str = Field(default="", alias="azure_endpoint") + openai_api_version: str = Field(default="", alias="api_version") + openai_api_key: str = Field(default="", alias="api_key") + openai_organization: str = Field(default="", alias="organization") openai_proxy: str = "" @root_validator() @@ -101,14 +101,27 @@ def validate_environment(cls, values: Dict) -> Dict: "Could not import openai python package. " "Please install it with `pip install openai`." ) - try: + if _is_openai_v1(): + values["client"] = openai.AzureOpenAI( + azure_endpoint=values["openai_api_base"], + api_key=values["openai_api_key"], + timeout=values["request_timeout"], + max_retries=values["max_retries"], + organization=values["openai_organization"], + api_version=values["openai_api_version"], + azure_deployment=values["deployment_name"], + ).chat.completions + values["async_client"] = openai.AsyncAzureOpenAI( + azure_endpoint=values["openai_api_base"], + api_key=values["openai_api_key"], + timeout=values["request_timeout"], + max_retries=values["max_retries"], + organization=values["openai_organization"], + api_version=values["openai_api_version"], + azure_deployment=values["deployment_name"], + ).chat.completions + else: values["client"] = openai.ChatCompletion - except AttributeError: - raise ValueError( - "`openai` has no `ChatCompletion` attribute, this is likely " - "due to an old version of the openai package. Try upgrading it " - "with `pip install --upgrade openai`." - ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: @@ -118,10 +131,13 @@ def validate_environment(cls, values: Dict) -> Dict: @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" - return { - **super()._default_params, - "engine": self.deployment_name, - } + if _is_openai_v1(): + return super()._default_params + else: + return { + **super()._default_params, + "engine": self.deployment_name, + } @property def _identifying_params(self) -> Dict[str, Any]: @@ -131,11 +147,14 @@ def _identifying_params(self) -> Dict[str, Any]: @property def _client_params(self) -> Dict[str, Any]: """Get the config params used for the openai client.""" - return { - **super()._client_params, - "api_type": self.openai_api_type, - "api_version": self.openai_api_version, - } + if _is_openai_v1(): + return super()._client_params + else: + return { + **super()._client_params, + "api_type": self.openai_api_type, + "api_version": self.openai_api_version, + } @property def _llm_type(self) -> str: @@ -148,7 +167,9 @@ def lc_attributes(self) -> Dict[str, Any]: "openai_api_version": self.openai_api_version, } - def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: + def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: + if not isinstance(response, dict): + response = response.dict() for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError( diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 2fa7da10fcc3e..c3fa5296a4a2e 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -2892,7 +2892,7 @@ files = [ {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, - {file = "greenlet-3.0.0-cp311-universal2-macosx_10_9_universal2.whl", hash = "sha256:c3692ecf3fe754c8c0f2c95ff19626584459eab110eaab66413b1e7425cd84e9"}, + {file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"}, {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, @@ -2902,7 +2902,6 @@ files = [ {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, - {file = "greenlet-3.0.0-cp312-universal2-macosx_10_9_universal2.whl", hash = "sha256:553d6fb2324e7f4f0899e5ad2c427a4579ed4873f42124beba763f16032959af"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, @@ -2935,6 +2934,7 @@ files = [ {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, + {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, ] [package.extras] @@ -4557,6 +4557,16 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -5713,25 +5723,25 @@ sympy = "*" [[package]] name = "openai" -version = "0.27.10" -description = "Python client library for the OpenAI API" +version = "1.0.1" +description = "Client library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-0.27.10-py3-none-any.whl", hash = "sha256:beabd1757e3286fa166dde3b70ebb5ad8081af046876b47c14c41e203ed22a14"}, - {file = "openai-0.27.10.tar.gz", hash = "sha256:60e09edf7100080283688748c6803b7b3b52d5a55d21890f3815292a0552d83b"}, + {file = "openai-1.0.1-py3-none-any.whl", hash = "sha256:3bf0152da66821a3f539c93d1d3069f7ebc16d730384e836a58de77895829525"}, + {file = "openai-1.0.1.tar.gz", hash = "sha256:fe25079fc1264bf1356e9db80d1617de1c4547bfab4221ccfb9b87d1e7733b48"}, ] [package.dependencies] -aiohttp = "*" -requests = ">=2.20" -tqdm = "*" +anyio = ">=3.5.0,<4" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +tqdm = ">4" +typing-extensions = ">=4.5,<5" [package.extras] -datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] -embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] -wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "openapi-pydantic" @@ -7648,6 +7658,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -7655,8 +7666,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -7673,6 +7691,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -7680,6 +7699,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -11007,4 +11027,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "1c796fb6c531ed9803594973b7296fba64a22605f602f9646677bc07c5a39a85" +content-hash = "2c53d5b972f328cd4ae3522ae863fe29537f4d60ba58f84d05f199df96fd0814" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 9b1857668b0dc..971e437d7b3ca 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -46,7 +46,7 @@ dataclasses-json = ">= 0.5.7, < 0.7" tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"} tenacity = "^8.1.0" cohere = {version = "^4", optional = true} -openai = {version = "^0", optional = true} +openai = {version = ">=0, <2", optional = true} nlpcloud = {version = "^1", optional = true} nomic = {version = "^1.0.43", optional = true} huggingface_hub = {version = "^0", optional = true} @@ -186,7 +186,7 @@ optional = true # https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md#working-with-optional-dependencies pytest-vcr = "^1.0.2" wrapt = "^1.15.0" -openai = "^0.27.4" +openai = "^1.0.0" python-dotenv = "^1.0.0" cassio = "^0.1.0" tiktoken = "^0.3.2" From e22e0b7cb0821621650012284c06012136aa41e3 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 12:39:14 -0800 Subject: [PATCH 10/12] lint --- libs/langchain/langchain/chat_models/konko.py | 6 +++--- .../tests/unit_tests/chat_models/test_azureopenai.py | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/libs/langchain/langchain/chat_models/konko.py b/libs/langchain/langchain/chat_models/konko.py index aeb14c187ac3f..6c5c5ef2db50d 100644 --- a/libs/langchain/langchain/chat_models/konko.py +++ b/libs/langchain/langchain/chat_models/konko.py @@ -21,8 +21,8 @@ from langchain.callbacks.manager import ( CallbackManagerForLLMRun, ) -from langchain.chat_models.base import _generate_from_stream -from langchain.chat_models.openai import ChatOpenAI, _convert_delta_to_message_chunk +from langchain.chat_models.base import BaseChatModel, _generate_from_stream +from langchain.chat_models.openai import _convert_delta_to_message_chunk from langchain.pydantic_v1 import Field, root_validator from langchain.schema import ChatGeneration, ChatResult from langchain.schema.messages import AIMessageChunk, BaseMessage @@ -35,7 +35,7 @@ logger = logging.getLogger(__name__) -class ChatKonko(ChatOpenAI): +class ChatKonko(BaseChatModel): """`ChatKonko` Chat large language models API. To use, you should have the ``konko`` python package installed, and the diff --git a/libs/langchain/tests/unit_tests/chat_models/test_azureopenai.py b/libs/langchain/tests/unit_tests/chat_models/test_azureopenai.py index 921ec0ad68bf6..fd1ec775b00c9 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_azureopenai.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_azureopenai.py @@ -1,6 +1,5 @@ import json import os -from typing import Any, Mapping, cast from unittest import mock import pytest @@ -48,9 +47,8 @@ def test_model_name_set_on_chat_result_when_present_in_response( """ # convert sample_response_text to instance of Mapping[str, Any] sample_response = json.loads(sample_response_text) - mock_response = cast(Mapping[str, Any], sample_response) mock_chat = AzureChatOpenAI() - chat_result = mock_chat._create_chat_result(mock_response) + chat_result = mock_chat._create_chat_result(sample_response) assert ( chat_result.llm_output is not None and chat_result.llm_output["model_name"] == model_name From de3a8b01d884e28551d98597e9f0124509de059a Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 12:47:54 -0800 Subject: [PATCH 11/12] undo bump --- libs/langchain/poetry.lock | 48 ++++++++++------------------------- libs/langchain/pyproject.toml | 4 +-- 2 files changed, 16 insertions(+), 36 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index c3fa5296a4a2e..2fa7da10fcc3e 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -2892,7 +2892,7 @@ files = [ {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, - {file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"}, + {file = "greenlet-3.0.0-cp311-universal2-macosx_10_9_universal2.whl", hash = "sha256:c3692ecf3fe754c8c0f2c95ff19626584459eab110eaab66413b1e7425cd84e9"}, {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, @@ -2902,6 +2902,7 @@ files = [ {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, + {file = "greenlet-3.0.0-cp312-universal2-macosx_10_9_universal2.whl", hash = "sha256:553d6fb2324e7f4f0899e5ad2c427a4579ed4873f42124beba763f16032959af"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, @@ -2934,7 +2935,6 @@ files = [ {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, - {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, ] [package.extras] @@ -4557,16 +4557,6 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -5723,25 +5713,25 @@ sympy = "*" [[package]] name = "openai" -version = "1.0.1" -description = "Client library for the openai API" +version = "0.27.10" +description = "Python client library for the OpenAI API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.0.1-py3-none-any.whl", hash = "sha256:3bf0152da66821a3f539c93d1d3069f7ebc16d730384e836a58de77895829525"}, - {file = "openai-1.0.1.tar.gz", hash = "sha256:fe25079fc1264bf1356e9db80d1617de1c4547bfab4221ccfb9b87d1e7733b48"}, + {file = "openai-0.27.10-py3-none-any.whl", hash = "sha256:beabd1757e3286fa166dde3b70ebb5ad8081af046876b47c14c41e203ed22a14"}, + {file = "openai-0.27.10.tar.gz", hash = "sha256:60e09edf7100080283688748c6803b7b3b52d5a55d21890f3815292a0552d83b"}, ] [package.dependencies] -anyio = ">=3.5.0,<4" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -pydantic = ">=1.9.0,<3" -tqdm = ">4" -typing-extensions = ">=4.5,<5" +aiohttp = "*" +requests = ">=2.20" +tqdm = "*" [package.extras] -datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] +embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] +wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] [[package]] name = "openapi-pydantic" @@ -7658,7 +7648,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -7666,15 +7655,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -7691,7 +7673,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -7699,7 +7680,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -11027,4 +11007,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "2c53d5b972f328cd4ae3522ae863fe29537f4d60ba58f84d05f199df96fd0814" +content-hash = "1c796fb6c531ed9803594973b7296fba64a22605f602f9646677bc07c5a39a85" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 971e437d7b3ca..9b1857668b0dc 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -46,7 +46,7 @@ dataclasses-json = ">= 0.5.7, < 0.7" tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"} tenacity = "^8.1.0" cohere = {version = "^4", optional = true} -openai = {version = ">=0, <2", optional = true} +openai = {version = "^0", optional = true} nlpcloud = {version = "^1", optional = true} nomic = {version = "^1.0.43", optional = true} huggingface_hub = {version = "^0", optional = true} @@ -186,7 +186,7 @@ optional = true # https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md#working-with-optional-dependencies pytest-vcr = "^1.0.2" wrapt = "^1.15.0" -openai = "^1.0.0" +openai = "^0.27.4" python-dotenv = "^1.0.0" cassio = "^0.1.0" tiktoken = "^0.3.2" From 25db791682cd00e8a2d77a6527629a6d3187c011 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 6 Nov 2023 13:04:43 -0800 Subject: [PATCH 12/12] test --- libs/langchain/langchain/chat_models/openai.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 62097616b6d94..5fc9e6c41e00d 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -118,7 +118,10 @@ def _convert_delta_to_message_chunk( content = _dict.get("content") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} - if additional_kwargs["function_call"]["name"] is None: + if ( + "name" in additional_kwargs["function_call"] + and additional_kwargs["function_call"]["name"] is None + ): additional_kwargs["function_call"]["name"] = "" else: additional_kwargs = {}