From 0d4cbbcc8563abf1a316af4028a8544af2567f8c Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 18 Dec 2023 18:46:24 -0800 Subject: [PATCH] [Partner] Update google integration test (#14883) Gemini has decided that pickle rick is unsafe: https://github.com/langchain-ai/langchain/actions/runs/7256642294/job/19769249444#step:8:189 ![image](https://github.com/langchain-ai/langchain/assets/13333726/cfbf4312-53b6-4290-84ee-6ce0742e739e) --- .../tests/integration_tests/test_chat_models.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/libs/partners/google-genai/tests/integration_tests/test_chat_models.py b/libs/partners/google-genai/tests/integration_tests/test_chat_models.py index e3eef827a9fe3..c1afcdd54bd98 100644 --- a/libs/partners/google-genai/tests/integration_tests/test_chat_models.py +++ b/libs/partners/google-genai/tests/integration_tests/test_chat_models.py @@ -16,7 +16,7 @@ def test_chat_google_genai_stream() -> None: """Test streaming tokens from OpenAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) - for token in llm.stream("I'm Pickle Rick"): + for token in llm.stream("This is a test. Say 'foo'"): assert isinstance(token.content, str) @@ -24,7 +24,7 @@ async def test_chat_google_genai_astream() -> None: """Test streaming tokens from OpenAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) - async for token in llm.astream("I'm Pickle Rick"): + async for token in llm.astream("This is a test. Say 'foo'"): assert isinstance(token.content, str) @@ -32,7 +32,9 @@ async def test_chat_google_genai_abatch() -> None: """Test streaming tokens from ChatGoogleGenerativeAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) - result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"]) + result = await llm.abatch( + ["This is a test. Say 'foo'", "This is a test, say 'bar'"] + ) for token in result: assert isinstance(token.content, str) @@ -42,7 +44,7 @@ async def test_chat_google_genai_abatch_tags() -> None: llm = ChatGoogleGenerativeAI(model=_MODEL) result = await llm.abatch( - ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]} + ["This is a test", "This is another test"], config={"tags": ["foo"]} ) for token in result: assert isinstance(token.content, str) @@ -52,7 +54,7 @@ def test_chat_google_genai_batch() -> None: """Test batch tokens from ChatGoogleGenerativeAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) - result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) + result = llm.batch(["This is a test. Say 'foo'", "This is a test, say 'bar'"]) for token in result: assert isinstance(token.content, str) @@ -61,7 +63,7 @@ async def test_chat_google_genai_ainvoke() -> None: """Test invoke tokens from ChatGoogleGenerativeAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) - result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) + result = await llm.ainvoke("This is a test. Say 'foo'", config={"tags": ["foo"]}) assert isinstance(result.content, str) @@ -70,7 +72,7 @@ def test_chat_google_genai_invoke() -> None: llm = ChatGoogleGenerativeAI(model=_MODEL) result = llm.invoke( - "I'm Pickle Rick", + "This is a test. Say 'foo'", config=dict(tags=["foo"]), generation_config=dict(top_k=2, top_p=1, temperature=0.7), )