diff --git a/libs/community/langchain_community/chat_models/naver.py b/libs/community/langchain_community/chat_models/naver.py index c8cf9c864af883..79368d440edf70 100644 --- a/libs/community/langchain_community/chat_models/naver.py +++ b/libs/community/langchain_community/chat_models/naver.py @@ -15,13 +15,6 @@ ) import httpx -from httpx_sse import ( - EventSource, - ServerSentEvent, - SSEError, - aconnect_sse, - connect_sse, -) from langchain_core.callbacks import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -50,7 +43,7 @@ def _convert_chunk_to_message_chunk( - sse: ServerSentEvent, default_class: Type[BaseMessageChunk] + sse: Any, default_class: Type[BaseMessageChunk] ) -> BaseMessageChunk: sse_data = sse.json() message = sse_data.get("message") @@ -120,7 +113,7 @@ def _convert_naver_chat_message_to_message( async def _aiter_sse( - event_source_mgr: AsyncContextManager[EventSource], + event_source_mgr: AsyncContextManager[Any], ) -> AsyncIterator[Dict]: """Iterate over the server-sent events.""" async with event_source_mgr as event_source: @@ -364,6 +357,11 @@ def _create_message_dicts( return message_dicts, params def _completion_with_retry(self, **kwargs: Any) -> Any: + from httpx_sse import ( + ServerSentEvent, + SSEError, + connect_sse, + ) if "stream" not in kwargs: kwargs["stream"] = False @@ -399,6 +397,7 @@ async def _acompletion_with_retry( run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: + from httpx_sse import aconnect_sse """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(self, run_manager=run_manager) diff --git a/libs/community/tests/unit_tests/chat_models/test_naver.py b/libs/community/tests/unit_tests/chat_models/test_naver.py index 623ae7bfbe54f1..4f9e367c65bbbe 100644 --- a/libs/community/tests/unit_tests/chat_models/test_naver.py +++ b/libs/community/tests/unit_tests/chat_models/test_naver.py @@ -5,7 +5,6 @@ from unittest.mock import patch import pytest -from httpx_sse import ServerSentEvent from langchain_core.callbacks import BaseCallbackHandler from langchain_core.messages import ( AIMessage, @@ -131,7 +130,8 @@ async def mock_acompletion_with_retry(*args: Any, **kwargs: Any) -> Any: assert completed -def _make_completion_response_from_token(token: str) -> ServerSentEvent: +def _make_completion_response_from_token(token: str): + from httpx_sse import ServerSentEvent return ServerSentEvent( event="token", data=json.dumps( @@ -175,6 +175,7 @@ def on_llm_new_token(self, token: str, **kwargs: Any) -> None: "langchain_community.chat_models.ChatClovaX._completion_with_retry", new=mock_chat_stream, ) +@pytest.mark.requires("httpx_sse") def test_stream_with_callback() -> None: callback = MyCustomHandler() chat = ChatClovaX(callbacks=[callback]) @@ -186,6 +187,7 @@ def test_stream_with_callback() -> None: "langchain_community.chat_models.ChatClovaX._acompletion_with_retry", new=mock_chat_astream, ) +@pytest.mark.requires("httpx_sse") async def test_astream_with_callback() -> None: callback = MyCustomHandler() chat = ChatClovaX(callbacks=[callback])