From 6c3fec9637a9a5f38efc3fae45e94cefe89a2c9f Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Mon, 22 Apr 2024 10:42:26 +0800 Subject: [PATCH 01/15] feat: Implement `bind_tools` for ChatTongyi --- .../langchain_community/chat_models/tongyi.py | 69 ++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 943cace9733e9..504a67c0da137 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -10,8 +10,11 @@ Dict, Iterator, List, + Literal, Mapping, Optional, + Sequence, + Type, Union, cast, ) @@ -20,6 +23,7 @@ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) +from langchain_core.language_models import LanguageModelInput from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import ( AIMessage, @@ -42,8 +46,11 @@ ChatGenerationChunk, ChatResult, ) -from langchain_core.pydantic_v1 import Field, SecretStr, root_validator +from langchain_core.pydantic_v1 import Field, SecretStr, root_validator, BaseModel +from langchain_core.tools import BaseTool from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env +from langchain_core.utils.function_calling import convert_to_openai_tool +from langchain_core.runnables import Runnable from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, @@ -470,3 +477,63 @@ def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration: message=convert_message_chunk_to_message(chunk.message), generation_info=chunk.generation_info, ) + + def bind_tools( + self, + tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + *, + tool_choice: Optional[Union[dict, str, Literal["auto", "none"], bool]] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + """Bind tool-like objects to this chat model. + + Assumes model is compatible with OpenAI tool-calling API. + + Args: + tools: A list of tool definitions to bind to this chat model. + Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic + models, callables, and BaseTools will be automatically converted to + their schema dictionary representation. + tool_choice: Which tool to require the model to call. + Must be the name of the single provided function or + "auto" to automatically determine which function to call + (if any), or a dict of the form: + {"type": "function", "function": {"name": <>}}. + **kwargs: Any additional parameters to pass to the + :class:`~langchain.runnable.Runnable` constructor. + """ + + formatted_tools = [convert_to_openai_tool(tool) for tool in tools] + if tool_choice is not None and tool_choice: + if len(formatted_tools) != 1: + raise ValueError( + "When specifying `tool_choice`, you must provide exactly one " + f"tool. Received {len(formatted_tools)} tools." + ) + if isinstance(tool_choice, str): + if tool_choice not in ("auto", "none"): + tool_choice = { + "type": "function", + "function": {"name": tool_choice}, + } + elif isinstance(tool_choice, bool): + tool_choice = { + "type": "function", + "function": {"name": formatted_tools[0]["function"]["name"]}, + } + elif isinstance(tool_choice, dict): + if ( + formatted_tools[0]["function"]["name"] + != tool_choice["function"]["name"] + ): + raise ValueError( + f"Tool choice {tool_choice} was specified, but the only " + f"provided tool was {formatted_tools[0]['function']['name']}." + ) + else: + raise ValueError( + f"Unrecognized tool_choice type. Expected str, bool or dict. " + f"Received: {tool_choice}" + ) + kwargs["tool_choice"] = tool_choice + return super().bind(tools=formatted_tools, **kwargs) From f22c07177dcaf83d13deaf991f2f61404e93845c Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:01:31 +0800 Subject: [PATCH 02/15] feat: add tests and fix issues --- .../langchain_community/chat_models/tongyi.py | 124 +++++++++++------- .../chat_models/test_tongyi.py | 80 ++++++++++- 2 files changed, 154 insertions(+), 50 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 504a67c0da137..679d27c5755f5 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -2,6 +2,7 @@ import asyncio import functools +import json import logging from typing import ( Any, @@ -36,6 +37,8 @@ HumanMessageChunk, SystemMessage, SystemMessageChunk, + ToolMessage, + ToolMessageChunk, ) from langchain_core.output_parsers.openai_tools import ( make_invalid_tool_call, @@ -95,8 +98,14 @@ def convert_dict_to_message( ) else: additional_kwargs = {} + return ( - AIMessageChunk(content=content) + AIMessageChunk( + content=content, + additional_kwargs=additional_kwargs, + tool_calls=tool_calls, + invalid_tool_calls=invalid_tool_calls, + ) if is_chunk else AIMessage( content=content, @@ -111,6 +120,23 @@ def convert_dict_to_message( if is_chunk else SystemMessage(content=content) ) + elif role == "tool": + additional_kwargs = {} + if "name" in _dict: + additional_kwargs["name"] = _dict["name"] + return ( + ToolMessageChunk( + content=_dict.get("content", ""), + tool_call_id=_dict.get("tool_call_id"), + additional_kwargs=additional_kwargs, + ) + if is_chunk + else ToolMessage( + content=_dict.get("content", ""), + tool_call_id=_dict.get("tool_call_id"), + additional_kwargs=additional_kwargs, + ) + ) else: return ( ChatMessageChunk(role=role, content=content) @@ -124,11 +150,25 @@ def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMes if isinstance(message_chunk, HumanMessageChunk): return HumanMessage(content=message_chunk.content) elif isinstance(message_chunk, AIMessageChunk): - return AIMessage(content=message_chunk.content) + # assert message_chunk is None + return ( + AIMessage( + content=message_chunk.content, + tool_calls=message_chunk.additional_kwargs["tool_calls"], + ) + if "tool_calls" in message_chunk.additional_kwargs + else AIMessage(content=message_chunk.content) + ) elif isinstance(message_chunk, SystemMessageChunk): return SystemMessage(content=message_chunk.content) elif isinstance(message_chunk, ChatMessageChunk): return ChatMessage(role=message_chunk.role, content=message_chunk.content) + elif isinstance(message_chunk, ToolMessageChunk): + return ToolMessage( + content=message_chunk.content, + tool_call_id=message_chunk.tool_call_id, + name=message_chunk.name, + ) else: raise TypeError(f"Got unknown type {message_chunk}") @@ -143,8 +183,17 @@ def convert_message_to_dict(message: BaseMessage) -> dict: message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} + if "tool_calls" in message.additional_kwargs: + message_dict["tool_calls"] = message.additional_kwargs["tool_calls"] elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} + elif isinstance(message, ToolMessage): + message_dict = { + "role": "tool", + "tool_call_id": message.tool_call_id, + "content": message.content, + "name": message.name, + } else: raise TypeError(f"Got unknown type {message}") return message_dict @@ -380,9 +429,30 @@ def _stream( params: Dict[str, Any] = self._invocation_params( messages=messages, stop=stop, stream=True, **kwargs ) + prev_msg_content = "" + for stream_resp, is_last_chunk in generate_with_last_element_mark( self.stream_completion_with_retry(**params) ): + choice = stream_resp["output"]["choices"][0] + message = choice["message"] + if ( + choice["finish_reason"] == "null" + and message["content"] == "" + and "tool_calls" not in message + ): + continue + + # If it's a tool call response, wait until it's finished + if "tool_calls" in message and choice["finish_reason"] == "null": + continue + + # If we are streaming without `incremental_output = True`, + # we need to chop off the previous message content + if not params.get("incremental_output", False): + message["content"] = message["content"].replace(prev_msg_content, "") + prev_msg_content += message["content"] + chunk = ChatGenerationChunk( **self._chat_generation_from_qwen_resp( stream_resp, is_chunk=True, is_last_chunk=is_last_chunk @@ -420,14 +490,13 @@ def _invocation_params( params = {**self._default_params, **kwargs} if stop is not None: params["stop"] = stop - if params.get("stream"): + # According to the Tongyi official docs, + # `incremental_output` with `tools` is not supported yet + if params.get("stream") and not params.get("tools"): params["incremental_output"] = True message_dicts = [convert_message_to_dict(m) for m in messages] - # According to the docs, the last message should be a `user` message - if message_dicts[-1]["role"] != "user": - raise ValueError("Last message should be user message.") # And the `system` message should be the first message if present system_message_indices = [ i for i, m in enumerate(message_dicts) if m["role"] == "system" @@ -477,63 +546,22 @@ def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration: message=convert_message_chunk_to_message(chunk.message), generation_info=chunk.generation_info, ) - + def bind_tools( self, tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], - *, - tool_choice: Optional[Union[dict, str, Literal["auto", "none"], bool]] = None, **kwargs: Any, ) -> Runnable[LanguageModelInput, BaseMessage]: """Bind tool-like objects to this chat model. - Assumes model is compatible with OpenAI tool-calling API. - Args: tools: A list of tool definitions to bind to this chat model. Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic models, callables, and BaseTools will be automatically converted to their schema dictionary representation. - tool_choice: Which tool to require the model to call. - Must be the name of the single provided function or - "auto" to automatically determine which function to call - (if any), or a dict of the form: - {"type": "function", "function": {"name": <>}}. **kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. """ formatted_tools = [convert_to_openai_tool(tool) for tool in tools] - if tool_choice is not None and tool_choice: - if len(formatted_tools) != 1: - raise ValueError( - "When specifying `tool_choice`, you must provide exactly one " - f"tool. Received {len(formatted_tools)} tools." - ) - if isinstance(tool_choice, str): - if tool_choice not in ("auto", "none"): - tool_choice = { - "type": "function", - "function": {"name": tool_choice}, - } - elif isinstance(tool_choice, bool): - tool_choice = { - "type": "function", - "function": {"name": formatted_tools[0]["function"]["name"]}, - } - elif isinstance(tool_choice, dict): - if ( - formatted_tools[0]["function"]["name"] - != tool_choice["function"]["name"] - ): - raise ValueError( - f"Tool choice {tool_choice} was specified, but the only " - f"provided tool was {formatted_tools[0]['function']['name']}." - ) - else: - raise ValueError( - f"Unrecognized tool_choice type. Expected str, bool or dict. " - f"Received: {tool_choice}" - ) - kwargs["tool_choice"] = tool_choice return super().bind(tools=formatted_tools, **kwargs) diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index 73591bb4e3d66..924bd36e6845a 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -1,11 +1,14 @@ """Test Alibaba Tongyi Chat Model.""" -from typing import Any, cast + +from typing import Any, List, cast from langchain_core.callbacks import CallbackManager from langchain_core.messages import AIMessage, BaseMessage, HumanMessage +from langchain_core.messages.ai import AIMessageChunk +from langchain_core.messages.tool import ToolCall, ToolMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate -from langchain_core.pydantic_v1 import SecretStr +from langchain_core.pydantic_v1 import SecretStr, BaseModel from pytest import CaptureFixture from langchain_community.chat_models.tongyi import ChatTongyi @@ -138,3 +141,76 @@ def test_multiple_messages() -> None: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content + + +class GenerateUsername(BaseModel): + "Get a username based on someone's name and hair color." + + name: str + hair_color: str + + +def test_tool_use() -> None: + llm = ChatTongyi(model="qwen-turbo", temperature=0) + llm_with_tool = llm.bind_tools(tools=[GenerateUsername]) + msgs: List = [HumanMessage("Sally has green hair, what would her username be?")] + ai_msg = llm_with_tool.invoke(msgs) + # assert ai_msg is None + # ai_msg.content = " " + + assert isinstance(ai_msg, AIMessage) + assert isinstance(ai_msg.tool_calls, list) + assert len(ai_msg.tool_calls) == 1 + tool_call = ai_msg.tool_calls[0] + assert "args" in tool_call + + tool_msg = ToolMessage( + "sally_green_hair", + tool_call_id=ai_msg.tool_calls[0]["id"], + name=ai_msg.tool_calls[0]["name"], + ) + msgs.extend([ai_msg, tool_msg]) + llm_with_tool.invoke(msgs) + + # Test streaming + ai_messages = llm_with_tool.stream(msgs) + first = True + for message in ai_messages: + if first: + gathered = message + first = False + else: + gathered = gathered + message # type: ignore + assert isinstance(gathered, AIMessageChunk) + + streaming_tool_msg = ToolMessage( + "sally_green_hair", + name=tool_call["name"], + tool_call_id=tool_call["id"] if tool_call["id"] else " ", + ) + msgs.extend([gathered, streaming_tool_msg]) + llm_with_tool.invoke(msgs) + + +def test_manual_tool_call_msg() -> None: + """Test passing in manually construct tool call message.""" + llm = ChatTongyi(model="qwen-turbo", temperature=0) + llm_with_tool = llm.bind_tools(tools=[GenerateUsername]) + msgs: List = [ + HumanMessage("Sally has green hair, what would her username be?"), + AIMessage( + content=" ", + tool_calls=[ + ToolCall( + name="GenerateUsername", + args={"name": "Sally", "hair_color": "green"}, + id="foo", + ) + ], + ), + ToolMessage("sally_green_hair", tool_call_id="foo"), + ] + output: AIMessage = cast(AIMessage, llm_with_tool.invoke(msgs)) + assert output.content + # Should not have called the tool again. + assert not output.tool_calls and not output.invalid_tool_calls From ed51e393ff6f18ee7f6e41d41ff15a60f3b17a13 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:22:34 +0800 Subject: [PATCH 03/15] docs: add an example of `bind_tools` for ChatTongyi --- docs/docs/integrations/chat/tongyi.ipynb | 89 ++++++++++++++++++------ 1 file changed, 66 insertions(+), 23 deletions(-) diff --git a/docs/docs/integrations/chat/tongyi.ipynb b/docs/docs/integrations/chat/tongyi.ipynb index a80f876ac32e3..f2e0681817029 100644 --- a/docs/docs/integrations/chat/tongyi.ipynb +++ b/docs/docs/integrations/chat/tongyi.ipynb @@ -26,14 +26,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "collapsed": false, "jupyter": { "outputs_hidden": false } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "# Install the package\n", "%pip install --upgrade --quiet dashscope" @@ -41,22 +49,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 6, "metadata": { "collapsed": false, "jupyter": { "outputs_hidden": false } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " ········\n" - ] - } - ], + "outputs": [], "source": [ "# Get a new token: https://help.aliyun.com/document_detail/611472.html?spm=a2c4g.2399481.0.0\n", "from getpass import getpass\n", @@ -66,7 +66,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 7, "metadata": { "collapsed": false, "jupyter": { @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 8, "metadata": { "collapsed": false, "jupyter": { @@ -94,8 +94,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "chat resp: content='Hello! How' additional_kwargs={} example=False\n", - "chat resp: content=' can I assist you today?' additional_kwargs={} example=False\n" + "chat resp: content='Hello' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", + "chat resp: content='!' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", + "chat resp: content=' How' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", + "chat resp: content=' can I assist you today' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", + "chat resp: content='?' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", + "chat resp: content='' response_metadata={'finish_reason': 'stop', 'request_id': '2ffa6db7-09d7-96b7-8bb2-4c59dadf467b', 'token_usage': {'input_tokens': 20, 'output_tokens': 9, 'total_tokens': 29}} id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n" ] } ], @@ -113,16 +117,24 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 9, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/cheese/PARA/Projects/langchain-contribution/langchain/libs/core/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `__call__` was deprecated in LangChain 0.1.7 and will be removed in 0.2.0. Use invoke instead.\n", + " warn_deprecated(\n" + ] + }, { "data": { "text/plain": [ - "AIMessageChunk(content=\"J'aime programmer.\", additional_kwargs={}, example=False)" + "AIMessage(content=\"J'aime programmer.\", response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': 'e5533cb5-a2ab-9204-b318-e6b056e24cbe', 'token_usage': {'input_tokens': 36, 'output_tokens': 5, 'total_tokens': 41}}, id='run-44282599-b1a6-4e2b-a192-f707b286b5d8-0')" ] }, - "execution_count": 5, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -141,12 +153,43 @@ "chatLLM(messages)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tool calling" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "content='' additional_kwargs={'tool_calls': [{'function': {'name': 'multiply', 'arguments': '{\"first_int\": 5, \"second_int\": 42}'}, 'id': '', 'type': 'function'}]} response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': '2830e26a-1a8f-94ac-834b-e4dfcd0ded56', 'token_usage': {'input_tokens': 200, 'output_tokens': 25, 'total_tokens': 225}} id='run-8bf406e7-e44b-481c-983c-ecba51fd1206-0' tool_calls=[{'name': 'multiply', 'args': {'first_int': 5, 'second_int': 42}, 'id': ''}]\n" + ] + } + ], + "source": [ + "from langchain_community.chat_models.tongyi import ChatTongyi\n", + "from langchain_core.tools import tool\n", + "\n", + "@tool\n", + "def multiply(first_int: int, second_int: int) -> int:\n", + " \"\"\"Multiply two integers together.\"\"\"\n", + " return first_int * second_int\n", + "\n", + "llm = ChatTongyi(model=\"qwen-turbo\")\n", + "\n", + "llm_with_tools = llm.bind_tools([multiply])\n", + "\n", + "msg = llm_with_tools.invoke(\"What's 5 times forty two\")\n", + "\n", + "print(msg)" + ] } ], "metadata": { @@ -165,7 +208,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.12.2" } }, "nbformat": 4, From cc76a34f4bb5ca530d32df98e375b3c4fd5881e3 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:25:10 +0800 Subject: [PATCH 04/15] chore: run `make format` --- libs/community/langchain_community/chat_models/tongyi.py | 4 ++-- .../tests/integration_tests/chat_models/test_tongyi.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 679d27c5755f5..6a31e0e842e1e 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -49,11 +49,11 @@ ChatGenerationChunk, ChatResult, ) -from langchain_core.pydantic_v1 import Field, SecretStr, root_validator, BaseModel +from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator +from langchain_core.runnables import Runnable from langchain_core.tools import BaseTool from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from langchain_core.utils.function_calling import convert_to_openai_tool -from langchain_core.runnables import Runnable from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index 924bd36e6845a..3c2c79ce1f829 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -8,7 +8,7 @@ from langchain_core.messages.tool import ToolCall, ToolMessage from langchain_core.outputs import ChatGeneration, LLMResult from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate -from langchain_core.pydantic_v1 import SecretStr, BaseModel +from langchain_core.pydantic_v1 import BaseModel, SecretStr from pytest import CaptureFixture from langchain_community.chat_models.tongyi import ChatTongyi @@ -163,7 +163,7 @@ def test_tool_use() -> None: assert len(ai_msg.tool_calls) == 1 tool_call = ai_msg.tool_calls[0] assert "args" in tool_call - + tool_msg = ToolMessage( "sally_green_hair", tool_call_id=ai_msg.tool_calls[0]["id"], From 311ea1c1912f0c31cc1c69a0ed49e4c5ae5495e0 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:27:31 +0800 Subject: [PATCH 05/15] fix: linting errors --- libs/community/langchain_community/chat_models/tongyi.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 6a31e0e842e1e..a51efe378f75a 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -2,7 +2,6 @@ import asyncio import functools -import json import logging from typing import ( Any, @@ -11,7 +10,6 @@ Dict, Iterator, List, - Literal, Mapping, Optional, Sequence, From a806c2004ed45544768bf13f0a6fe2c5420aeb77 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:35:35 +0800 Subject: [PATCH 06/15] feat(ChatTongyi): replace `tool_calls` with `tool_call_chunks` when streaming --- .../langchain_community/chat_models/tongyi.py | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index a51efe378f75a..f45eaf6762df1 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -76,6 +76,7 @@ def convert_dict_to_message( """Convert a dict to a message.""" role = _dict["role"] content = _dict["content"] + if role == "user": return ( HumanMessageChunk(content=content) @@ -87,13 +88,26 @@ def convert_dict_to_message( invalid_tool_calls = [] if "tool_calls" in _dict: additional_kwargs = {"tool_calls": _dict["tool_calls"]} - for raw_tool_call in _dict["tool_calls"]: - try: - tool_calls.append(parse_tool_call(raw_tool_call, return_id=True)) - except Exception as e: - invalid_tool_calls.append( - make_invalid_tool_call(raw_tool_call, str(e)) - ) + + for index, value in enumerate(_dict["tool_calls"]): + if is_chunk: + try: + tool_calls.append( + { + "name": value["function"].get("name"), + "args": value["function"].get("arguments"), + "id": value.get("id"), + # Tongyi does not respond with index, use index in the list instead + "index": index, + } + ) + except KeyError: + pass + else: + try: + tool_calls.append(parse_tool_call(value, return_id=True)) + except Exception as e: + invalid_tool_calls.append(make_invalid_tool_call(value, str(e))) else: additional_kwargs = {} @@ -101,8 +115,8 @@ def convert_dict_to_message( AIMessageChunk( content=content, additional_kwargs=additional_kwargs, - tool_calls=tool_calls, - invalid_tool_calls=invalid_tool_calls, + tool_call_chunks=tool_calls, + id=_dict.get("id"), ) if is_chunk else AIMessage( @@ -441,10 +455,6 @@ def _stream( ): continue - # If it's a tool call response, wait until it's finished - if "tool_calls" in message and choice["finish_reason"] == "null": - continue - # If we are streaming without `incremental_output = True`, # we need to chop off the previous message content if not params.get("incremental_output", False): From 57550d7f29c62759cb79677ae70469c0a0902df9 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 22:14:54 +0800 Subject: [PATCH 07/15] feat: subtract_client_response --- .../langchain_community/chat_models/tongyi.py | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index f45eaf6762df1..ab34f33a98acf 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -329,6 +329,37 @@ def _stream_completion_with_retry(**_kwargs: Any) -> Any: return _stream_completion_with_retry(**kwargs) + def subtract_client_response(self, resp, prev_resp: Any) -> Any: + """Subtract prev response from curr response. + + Useful when streaming without `incremental_output = True` + """ + + resp_copy = copy.deepcopy(resp) + choice = resp_copy["output"]["choices"][0] + message = choice.message + prev_choice = prev_resp["output"]["choices"][0] + prev_message = prev_choice.message + + message["content"] = message["content"].replace(prev_message["content"], "") + + if message.get("tool_calls"): + for index, tool_call in enumerate(message.tool_calls): + function = tool_call["function"] + function["id"] = str(index) + + if prev_message.get("tool_calls"): + prev_function = prev_message.tool_calls[index]["function"] + + function["name"] = function["name"].replace( + prev_function["name"], "" + ) + function["arguments"] = function["arguments"].replace( + prev_function["arguments"], "" + ) + + return resp_copy + async def astream_completion_with_retry(self, **kwargs: Any) -> Any: """Because the dashscope SDK doesn't provide an async API, we wrap `stream_generate_with_retry` with an async generator.""" From 31f7d5c1525e215194958eca046f6f7de513abf5 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 23:03:23 +0800 Subject: [PATCH 08/15] fix: change deep clone method --- .../langchain_community/chat_models/tongyi.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index ab34f33a98acf..42113d055ef9b 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -329,27 +329,28 @@ def _stream_completion_with_retry(**_kwargs: Any) -> Any: return _stream_completion_with_retry(**kwargs) - def subtract_client_response(self, resp, prev_resp: Any) -> Any: + def subtract_client_response(self, resp, prev_resp) -> Any: """Subtract prev response from curr response. Useful when streaming without `incremental_output = True` """ - resp_copy = copy.deepcopy(resp) + resp_copy = json.loads(json.dumps(resp)) choice = resp_copy["output"]["choices"][0] - message = choice.message - prev_choice = prev_resp["output"]["choices"][0] - prev_message = prev_choice.message + message = choice["message"] + + prev_resp_copy = json.loads(json.dumps(prev_resp)) + prev_choice = prev_resp_copy["output"]["choices"][0] + prev_message = prev_choice["message"] message["content"] = message["content"].replace(prev_message["content"], "") if message.get("tool_calls"): - for index, tool_call in enumerate(message.tool_calls): + for index, tool_call in enumerate(message["tool_calls"]): function = tool_call["function"] - function["id"] = str(index) if prev_message.get("tool_calls"): - prev_function = prev_message.tool_calls[index]["function"] + prev_function = prev_message["tool_calls"][index]["function"] function["name"] = function["name"].replace( prev_function["name"], "" From c1ecd425e8c3e547277dd595d0379a3229a04e92 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 23:11:30 +0800 Subject: [PATCH 09/15] refactor: with `subtract_client_response` --- .../langchain_community/chat_models/tongyi.py | 24 ++++++++++++------- .../langchain_community/llms/tongyi.py | 12 +++++----- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 42113d055ef9b..9e7a5a23d2652 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -2,6 +2,7 @@ import asyncio import functools +import json import logging from typing import ( Any, @@ -324,8 +325,22 @@ def stream_completion_with_retry(self, **kwargs: Any) -> Any: @retry_decorator def _stream_completion_with_retry(**_kwargs: Any) -> Any: responses = self.client.call(**_kwargs) + prev_resp = None + for resp in responses: - yield check_response(resp) + # If we are streaming without `incremental_output = True`, + # we need to calculate the delta response manually + if _kwargs.get("stream") and not _kwargs.get( + "incremental_output", False + ): + if prev_resp is None: + delta_resp = resp + else: + delta_resp = self.subtract_client_response(resp, prev_resp) + prev_resp = resp + yield check_response(delta_resp) + else: + yield check_response(resp) return _stream_completion_with_retry(**kwargs) @@ -473,7 +488,6 @@ def _stream( params: Dict[str, Any] = self._invocation_params( messages=messages, stop=stop, stream=True, **kwargs ) - prev_msg_content = "" for stream_resp, is_last_chunk in generate_with_last_element_mark( self.stream_completion_with_retry(**params) @@ -487,12 +501,6 @@ def _stream( ): continue - # If we are streaming without `incremental_output = True`, - # we need to chop off the previous message content - if not params.get("incremental_output", False): - message["content"] = message["content"].replace(prev_msg_content, "") - prev_msg_content += message["content"] - chunk = ChatGenerationChunk( **self._chat_generation_from_qwen_resp( stream_resp, is_chunk=True, is_last_chunk=is_last_chunk diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py index 6254609ece7f3..64c73a9ccd9a4 100644 --- a/libs/community/langchain_community/llms/tongyi.py +++ b/libs/community/langchain_community/llms/tongyi.py @@ -55,17 +55,17 @@ def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]: def check_response(resp: Any) -> Any: """Check the response from the completion call.""" - if resp.status_code == 200: + if resp["status_code"] == 200: return resp - elif resp.status_code in [400, 401]: + elif resp["status_code"] in [400, 401]: raise ValueError( - f"status_code: {resp.status_code} \n " - f"code: {resp.code} \n message: {resp.message}" + f"status_code: {resp["status_code"]} \n " + f"code: {resp["code"]} \n message: {resp["message"]}" ) else: raise HTTPError( - f"HTTP error occurred: status_code: {resp.status_code} \n " - f"code: {resp.code} \n message: {resp.message}", + f"HTTP error occurred: status_code: {resp["status_code"]} \n " + f"code: {resp["code"]} \n message: {resp["message"]}", response=resp, ) From b1343ba6369d8530db1989415be09e9293484715 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 23:13:33 +0800 Subject: [PATCH 10/15] refactor: refactor `convert_message_chunk_to_message` --- .../langchain_community/chat_models/tongyi.py | 54 ++++++++----------- 1 file changed, 23 insertions(+), 31 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 9e7a5a23d2652..80f9e53db2dd5 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -159,31 +159,23 @@ def convert_dict_to_message( def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage: - """Convert a message chunk to a message.""" - if isinstance(message_chunk, HumanMessageChunk): - return HumanMessage(content=message_chunk.content) - elif isinstance(message_chunk, AIMessageChunk): - # assert message_chunk is None - return ( - AIMessage( - content=message_chunk.content, - tool_calls=message_chunk.additional_kwargs["tool_calls"], - ) - if "tool_calls" in message_chunk.additional_kwargs - else AIMessage(content=message_chunk.content) - ) - elif isinstance(message_chunk, SystemMessageChunk): - return SystemMessage(content=message_chunk.content) - elif isinstance(message_chunk, ChatMessageChunk): - return ChatMessage(role=message_chunk.role, content=message_chunk.content) - elif isinstance(message_chunk, ToolMessageChunk): - return ToolMessage( - content=message_chunk.content, - tool_call_id=message_chunk.tool_call_id, - name=message_chunk.name, - ) - else: - raise TypeError(f"Got unknown type {message_chunk}") + """Convert a message chunk to a message. + + Args: + chunk: Message chunk to convert. + + Returns: + Message. + """ + if not isinstance(message_chunk, BaseMessageChunk): + return message_chunk + # chunk classes always have the equivalent non-chunk class as their first parent + ignore_keys = ["type"] + if isinstance(message_chunk, AIMessageChunk): + ignore_keys.append("tool_call_chunks") + return message_chunk.__class__.__mro__[1]( + **{k: v for k, v in message_chunk.__dict__.items() if k not in ignore_keys} + ) def convert_message_to_dict(message: BaseMessage) -> dict: @@ -416,16 +408,16 @@ def _generate( ) -> ChatResult: generations = [] if self.streaming: - generation: Optional[ChatGenerationChunk] = None + generation_chunk: Optional[ChatGenerationChunk] = None for chunk in self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ): - if generation is None: - generation = chunk + if generation_chunk is None: + generation_chunk = chunk else: - generation += chunk - assert generation is not None - generations.append(self._chunk_to_generation(generation)) + generation_chunk += chunk + assert generation_chunk is not None + generations.append(self._chunk_to_generation(generation_chunk)) else: params: Dict[str, Any] = self._invocation_params( messages=messages, stop=stop, **kwargs From 01f8156dedd389540058c613d95784ad4cd2db18 Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 23:27:40 +0800 Subject: [PATCH 11/15] docs: update notebook --- docs/docs/integrations/chat/tongyi.ipynb | 79 +++++++++++++++++++----- 1 file changed, 62 insertions(+), 17 deletions(-) diff --git a/docs/docs/integrations/chat/tongyi.ipynb b/docs/docs/integrations/chat/tongyi.ipynb index 3ac3426742593..cf9c36fc6c92c 100644 --- a/docs/docs/integrations/chat/tongyi.ipynb +++ b/docs/docs/integrations/chat/tongyi.ipynb @@ -26,7 +26,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "metadata": { "collapsed": false, "jupyter": { @@ -49,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 2, "metadata": { "collapsed": false, "jupyter": { @@ -66,7 +66,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "metadata": { "collapsed": false, "jupyter": { @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 4, "metadata": { "collapsed": false, "jupyter": { @@ -94,12 +94,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "chat resp: content='Hello' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", - "chat resp: content='!' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", - "chat resp: content=' How' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", - "chat resp: content=' can I assist you today' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", - "chat resp: content='?' id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n", - "chat resp: content='' response_metadata={'finish_reason': 'stop', 'request_id': '2ffa6db7-09d7-96b7-8bb2-4c59dadf467b', 'token_usage': {'input_tokens': 20, 'output_tokens': 9, 'total_tokens': 29}} id='run-1df2c54b-94c4-4f84-8716-ed2f52cd42a9'\n" + "chat resp: content='Hello' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n", + "chat resp: content='!' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n", + "chat resp: content=' How' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n", + "chat resp: content=' can I assist you today' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n", + "chat resp: content='?' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n", + "chat resp: content='' response_metadata={'finish_reason': 'stop', 'request_id': '921db2c5-4d53-9a89-8e87-e4ad6a671237', 'token_usage': {'input_tokens': 20, 'output_tokens': 9, 'total_tokens': 29}} id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n" ] } ], @@ -117,24 +117,24 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "/Users/cheese/PARA/Projects/langchain-contribution/langchain/libs/core/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `__call__` was deprecated in LangChain 0.1.7 and will be removed in 0.2.0. Use invoke instead.\n", + "/Users/cheese/PARA/Projects/langchain-contribution/langchain/libs/core/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The method `BaseChatModel.__call__` was deprecated in langchain-core 0.1.7 and will be removed in 0.2.0. Use invoke instead.\n", " warn_deprecated(\n" ] }, { "data": { "text/plain": [ - "AIMessage(content=\"J'aime programmer.\", response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': 'e5533cb5-a2ab-9204-b318-e6b056e24cbe', 'token_usage': {'input_tokens': 36, 'output_tokens': 5, 'total_tokens': 41}}, id='run-44282599-b1a6-4e2b-a192-f707b286b5d8-0')" + "AIMessage(content=\"J'adore programmer.\", response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': 'ae725086-0ffa-9728-8c72-b204c7bc7eeb', 'token_usage': {'input_tokens': 36, 'output_tokens': 6, 'total_tokens': 42}}, id='run-060cc103-ef5f-4c8a-af40-792ac7f40c26-0')" ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -161,18 +161,63 @@ "ChatTongyi supports tool calling API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use with `bind_tools`" + ] + }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "content='' additional_kwargs={'tool_calls': [{'function': {'name': 'multiply', 'arguments': '{\"first_int\": 5, \"second_int\": 42}'}, 'id': '', 'type': 'function'}]} response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': '4acf0e36-44af-987a-a0c0-8b5c5eaa1a8b', 'token_usage': {'input_tokens': 200, 'output_tokens': 25, 'total_tokens': 225}} id='run-0ecd0f09-1d20-4e55-a4f3-f14d1f710ae7-0' tool_calls=[{'name': 'multiply', 'args': {'first_int': 5, 'second_int': 42}, 'id': ''}]\n" + ] + } + ], + "source": [ + "from langchain_core.tools import tool\n", + "from langchain_community.chat_models.tongyi import ChatTongyi\n", + "\n", + "@tool\n", + "def multiply(first_int: int, second_int: int) -> int:\n", + " \"\"\"Multiply two integers together.\"\"\"\n", + " return first_int * second_int\n", + "\n", + "llm = ChatTongyi(model=\"qwen-turbo\")\n", + "\n", + "llm_with_tools = llm.bind_tools([multiply])\n", + "\n", + "msg = llm_with_tools.invoke(\"What's 5 times forty two\")\n", + "\n", + "print(msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Construct args manually" + ] + }, + { + "cell_type": "code", + "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': 'dae79197-8780-9b7e-8c15-6a83e2a53534', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-9e06f837-582b-473b-bb1f-5e99a68ecc10-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])" + "AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': '87ef33d2-5c6b-9457-91e2-39faad7120eb', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-7939ba7f-e3f7-46f8-980b-30499b52723c-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])" ] }, - "execution_count": 5, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } From 497e314bc91fd1df6132e35068c96abd2cd4ba6a Mon Sep 17 00:00:00 2001 From: Cheese <11363971+cheese-git@users.noreply.github.com> Date: Sun, 28 Apr 2024 23:38:57 +0800 Subject: [PATCH 12/15] fix: linting errors --- libs/community/langchain_community/chat_models/tongyi.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 80f9e53db2dd5..785bc79320657 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -98,7 +98,8 @@ def convert_dict_to_message( "name": value["function"].get("name"), "args": value["function"].get("arguments"), "id": value.get("id"), - # Tongyi does not respond with index, use index in the list instead + # Tongyi does not respond with index, + # use index in the list instead "index": index, } ) @@ -106,7 +107,9 @@ def convert_dict_to_message( pass else: try: - tool_calls.append(parse_tool_call(value, return_id=True)) + parsed_tool = parse_tool_call(value, return_id=True) + if parsed_tool: + tool_calls.append(parsed_tool) except Exception as e: invalid_tool_calls.append(make_invalid_tool_call(value, str(e))) else: @@ -336,7 +339,7 @@ def _stream_completion_with_retry(**_kwargs: Any) -> Any: return _stream_completion_with_retry(**kwargs) - def subtract_client_response(self, resp, prev_resp) -> Any: + def subtract_client_response(self, resp: Any, prev_resp: Any) -> Any: """Subtract prev response from curr response. Useful when streaming without `incremental_output = True` From 0d15e43501959916e18f43851849493f9f74c77a Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Tue, 7 May 2024 12:51:45 -0400 Subject: [PATCH 13/15] format --- libs/community/langchain_community/llms/tongyi.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py index 64c73a9ccd9a4..a8c685a17172b 100644 --- a/libs/community/langchain_community/llms/tongyi.py +++ b/libs/community/langchain_community/llms/tongyi.py @@ -59,13 +59,13 @@ def check_response(resp: Any) -> Any: return resp elif resp["status_code"] in [400, 401]: raise ValueError( - f"status_code: {resp["status_code"]} \n " - f"code: {resp["code"]} \n message: {resp["message"]}" + f"status_code: {resp['status_code']} \n " + f"code: {resp['code']} \n message: {resp['message']}" ) else: raise HTTPError( - f"HTTP error occurred: status_code: {resp["status_code"]} \n " - f"code: {resp["code"]} \n message: {resp["message"]}", + f"HTTP error occurred: status_code: {resp['status_code']} \n " + f"code: {resp["code"]} \n message: {resp['message']}", response=resp, ) From ff9f7718523cea2b88b0d6b3e94b7f17ebfec0cc Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Tue, 7 May 2024 12:54:06 -0400 Subject: [PATCH 14/15] format --- libs/community/langchain_community/llms/tongyi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py index a8c685a17172b..8e13b6e03f192 100644 --- a/libs/community/langchain_community/llms/tongyi.py +++ b/libs/community/langchain_community/llms/tongyi.py @@ -65,7 +65,7 @@ def check_response(resp: Any) -> Any: else: raise HTTPError( f"HTTP error occurred: status_code: {resp['status_code']} \n " - f"code: {resp["code"]} \n message: {resp['message']}", + f"code: {resp['code']} \n message: {resp['message']}", response=resp, ) From 336f3b852237119c9591e762535bc37095f1d949 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Tue, 7 May 2024 13:13:26 -0400 Subject: [PATCH 15/15] format docs --- docs/docs/integrations/chat/tongyi.ipynb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/docs/integrations/chat/tongyi.ipynb b/docs/docs/integrations/chat/tongyi.ipynb index cf9c36fc6c92c..6b517937c1799 100644 --- a/docs/docs/integrations/chat/tongyi.ipynb +++ b/docs/docs/integrations/chat/tongyi.ipynb @@ -182,14 +182,16 @@ } ], "source": [ - "from langchain_core.tools import tool\n", "from langchain_community.chat_models.tongyi import ChatTongyi\n", + "from langchain_core.tools import tool\n", + "\n", "\n", "@tool\n", "def multiply(first_int: int, second_int: int) -> int:\n", " \"\"\"Multiply two integers together.\"\"\"\n", " return first_int * second_int\n", "\n", + "\n", "llm = ChatTongyi(model=\"qwen-turbo\")\n", "\n", "llm_with_tools = llm.bind_tools([multiply])\n",