From f071581aea42f4ccc8b9dc224d05f6422da994f7 Mon Sep 17 00:00:00 2001 From: Roman Solomatin <36135455+Samoed@users.noreply.github.com> Date: Sat, 13 Jul 2024 04:53:33 +0500 Subject: [PATCH] openai[patch]: update openai params (#23691) **Description:** Explicitly add parameters from openai API - [X] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ --------- Co-authored-by: Erick Friis --- .../langchain_openai/chat_models/base.py | 23 +++++++++++++++++++ .../openai/langchain_openai/llms/base.py | 7 ++++++ 2 files changed, 30 insertions(+) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index fc94175efa784..6f6f19f71fc4b 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -319,10 +319,26 @@ class BaseChatOpenAI(BaseChatModel): None.""" max_retries: int = 2 """Maximum number of retries to make when generating.""" + presence_penalty: Optional[float] = None + """Penalizes repeated tokens.""" + frequency_penalty: Optional[float] = None + """Penalizes repeated tokens according to frequency.""" + seed: Optional[int] = None + """Seed for generation""" + logprobs: Optional[bool] = False + """Whether to return logprobs.""" + top_logprobs: Optional[int] = None + """Number of most likely tokens to return at each token position, each with + an associated log probability. `logprobs` must be set to true + if this parameter is used.""" + logit_bias: Optional[Dict[int, int]] = None + """Modify the likelihood of specified tokens appearing in the completion.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" + top_p: Optional[float] = None + """Total probability mass of tokens to consider at each step.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" tiktoken_model_name: Optional[str] = None @@ -444,6 +460,13 @@ def _default_params(self) -> Dict[str, Any]: "stream": self.streaming, "n": self.n, "temperature": self.temperature, + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "seed": self.seed, + "top_p": self.top_p, + "logprobs": self.logprobs, + "top_logprobs": self.top_logprobs, + "logit_bias": self.logit_bias, **self.model_kwargs, } if self.max_tokens is not None: diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index ca3ff4eafae7e..1f66c1c2f82fb 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -110,6 +110,11 @@ class BaseOpenAI(BaseLLM): """Adjust the probability of specific tokens being generated.""" max_retries: int = 2 """Maximum number of retries to make when generating.""" + seed: Optional[int] = None + """Seed for generation""" + logprobs: Optional[int] = None + """Include the log probabilities on the logprobs most likely output tokens, + as well the chosen tokens.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() @@ -220,6 +225,8 @@ def _default_params(self) -> Dict[str, Any]: "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, + "seed": self.seed, + "logprobs": self.logprobs, } if self.max_tokens is not None: