Skip to content

Commit

Permalink
fix: update for lint about chat_models.py
Browse files Browse the repository at this point in the history
  • Loading branch information
nobu007 committed Sep 1, 2024
1 parent 4f0087c commit 3a128e0
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 12 deletions.
2 changes: 1 addition & 1 deletion libs/genai/tests/unit_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ def test_parse_response_candidate(raw_candidate: Dict, expected: AIMessage) -> N


def test_serialize() -> None:
llm = ChatGoogleGenerativeAI(model="gemini-pro-1.5", google_api_key="test-key")
llm = ChatGoogleGenerativeAI(model="gemini-pro-1.5", api_key=SecretStr("test-key"))
serialized = dumps(llm)
llm_loaded = loads(
serialized,
Expand Down
15 changes: 6 additions & 9 deletions libs/genai/tests/unit_tests/test_genai_aqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@
import google.ai.generativelanguage as genai
import pytest

from langchain_google_genai import (
AqaInput,
GenAIAqa,
)
from langchain_google_genai import AqaInput, GenAIAqa
from langchain_google_genai import _genai_extension as genaix
from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory

# Make sure the tests do not hit actual production servers.
genaix.set_config(
Expand Down Expand Up @@ -54,8 +52,8 @@ def test_invoke(mock_generate_answer: MagicMock) -> None:
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE,
safety_settings=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
Expand All @@ -80,11 +78,10 @@ def test_invoke(mock_generate_answer: MagicMock) -> None:
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
== HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
request.safety_settings[0].threshold == HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)

assert request.temperature == 0.5
Expand Down
9 changes: 7 additions & 2 deletions libs/genai/tests/unit_tests/test_llms.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from langchain_core.pydantic_v1 import SecretStr

from langchain_google_genai.llms import GoogleGenerativeAI, GoogleModelFamily


Expand All @@ -10,7 +12,7 @@ def test_model_family() -> None:

def test_tracing_params() -> None:
# Test standard tracing params
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key="foo")
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key=SecretStr("foo"))
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "google_genai",
Expand All @@ -20,7 +22,10 @@ def test_tracing_params() -> None:
}

llm = GoogleGenerativeAI(
model="gemini-pro", temperature=0.1, max_output_tokens=10, google_api_key="foo"
model="gemini-pro",
temperature=0.1,
max_output_tokens=10,
google_api_key=SecretStr("foo"),
)
ls_params = llm._get_ls_params()
assert ls_params == {
Expand Down

0 comments on commit 3a128e0

Please sign in to comment.