Skip to content

Commit

Permalink
openai[patch]: type reasoning_effort (#28825)
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan authored Dec 19, 2024
1 parent 6a37899 commit 1378ddf
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 18 deletions.
11 changes: 11 additions & 0 deletions libs/partners/openai/langchain_openai/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,16 @@ class BaseChatOpenAI(BaseChatModel):
"""Total probability mass of tokens to consider at each step."""
max_tokens: Optional[int] = Field(default=None)
"""Maximum number of tokens to generate."""
reasoning_effort: Optional[str] = None
"""Constrains effort on reasoning for reasoning models.
o1 models only.
Currently supported values are low, medium, and high. Reducing reasoning effort
can result in faster responses and fewer tokens used on reasoning in a response.
.. versionadded:: 0.2.14
"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
Expand Down Expand Up @@ -599,6 +609,7 @@ def _default_params(self) -> Dict[str, Any]:
"stop": self.stop or None, # also exclude empty list for this
"max_tokens": self.max_tokens,
"extra_body": self.extra_body,
"reasoning_effort": self.reasoning_effort,
}

params = {
Expand Down
8 changes: 4 additions & 4 deletions libs/partners/openai/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion libs/partners/openai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ ignore_missing_imports = true
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.27"
openai = "^1.55.3"
openai = "^1.58.1"
tiktoken = ">=0.7,<1"

[tool.ruff.lint]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1089,19 +1089,13 @@ async def test_astream_response_format() -> None:
pass


def test_o1_max_tokens() -> None:
response = ChatOpenAI(model="o1-mini", max_tokens=10).invoke("how are you") # type: ignore[call-arg]
assert isinstance(response, AIMessage)

response = ChatOpenAI(model="gpt-4o", max_completion_tokens=10).invoke(
"how are you"
)
assert isinstance(response, AIMessage)


def test_developer_message() -> None:
llm = ChatOpenAI(model="o1", max_tokens=10) # type: ignore[call-arg]
response = llm.invoke(
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
def test_o1(use_max_completion_tokens: bool) -> None:
if use_max_completion_tokens:
kwargs: dict = {"max_completion_tokens": 10}
else:
kwargs = {"max_tokens": 10}
response = ChatOpenAI(model="o1", reasoning_effort="low", **kwargs).invoke(
[
{"role": "developer", "content": "respond in all caps"},
{"role": "user", "content": "HOW ARE YOU"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -882,3 +882,9 @@ def test__get_request_payload() -> None:
}
payload = llm._get_request_payload(messages)
assert payload == expected


def test_init_o1() -> None:
with pytest.warns(None) as record: # type: ignore[call-overload]
ChatOpenAI(model="o1", reasoning_effort="medium")
assert len(record) == 0

0 comments on commit 1378ddf

Please sign in to comment.