Skip to content

Commit

Permalink
openai[patch]: update openai params (#23691)
Browse files Browse the repository at this point in the history
**Description:** Explicitly add parameters from openai API



- [X] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

---------

Co-authored-by: Erick Friis <[email protected]>
  • Loading branch information
Samoed and efriis authored Jul 12, 2024
1 parent f0a7581 commit f071581
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 0 deletions.
23 changes: 23 additions & 0 deletions libs/partners/openai/langchain_openai/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,10 +319,26 @@ class BaseChatOpenAI(BaseChatModel):
None."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens."""
frequency_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
seed: Optional[int] = None
"""Seed for generation"""
logprobs: Optional[bool] = False
"""Whether to return logprobs."""
top_logprobs: Optional[int] = None
"""Number of most likely tokens to return at each token position, each with
an associated log probability. `logprobs` must be set to true
if this parameter is used."""
logit_bias: Optional[Dict[int, int]] = None
"""Modify the likelihood of specified tokens appearing in the completion."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
Expand Down Expand Up @@ -444,6 +460,13 @@ def _default_params(self) -> Dict[str, Any]:
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"seed": self.seed,
"top_p": self.top_p,
"logprobs": self.logprobs,
"top_logprobs": self.top_logprobs,
"logit_bias": self.logit_bias,
**self.model_kwargs,
}
if self.max_tokens is not None:
Expand Down
7 changes: 7 additions & 0 deletions libs/partners/openai/langchain_openai/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,11 @@ class BaseOpenAI(BaseLLM):
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
seed: Optional[int] = None
"""Seed for generation"""
logprobs: Optional[int] = None
"""Include the log probabilities on the logprobs most likely output tokens,
as well the chosen tokens."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
Expand Down Expand Up @@ -220,6 +225,8 @@ def _default_params(self) -> Dict[str, Any]:
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
"seed": self.seed,
"logprobs": self.logprobs,
}

if self.max_tokens is not None:
Expand Down

0 comments on commit f071581

Please sign in to comment.