Skip to content

Commit

Permalink
Temp fix (#51)
Browse files Browse the repository at this point in the history
* Fixed temp=0
  • Loading branch information
lkuligin authored Mar 6, 2024
1 parent 882eac5 commit 6702bed
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 1 deletion.
2 changes: 1 addition & 1 deletion libs/vertexai/langchain_google_vertexai/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def _default_params(self) -> Dict[str, Any]:
default_value = default_params.get(param_name)
if param_value or default_value:
updated_params[param_name] = (
param_value if param_value else default_value
param_value if param_value is not None else default_value
)
return updated_params

Expand Down
49 changes: 49 additions & 0 deletions libs/vertexai/tests/unit_tests/test_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from typing import Any, Dict
from unittest import TestCase
from unittest.mock import MagicMock, patch

from langchain_google_vertexai.llms import VertexAI


def test_vertexai_args_passed() -> None:
response_text = "Goodbye"
user_prompt = "Hello"
prompt_params: Dict[str, Any] = {
"max_output_tokens": 1,
"temperature": 0,
"top_k": 10,
"top_p": 0.5,
}

# Mock the library to ensure the args are passed correctly
with patch("langchain_google_vertexai.llms.GenerativeModel") as model:
with patch("langchain_google_vertexai.llms.get_generation_info") as gen_info:
gen_info.return_value = {}
mock_response = MagicMock()
candidate = MagicMock()
candidate.text = response_text
mock_response.candidates = [candidate]
model_instance = MagicMock()
model_instance.generate_content.return_value = mock_response
model.return_value = model_instance

llm = VertexAI(model_name="gemini-pro", **prompt_params)
response = llm.invoke("Hello")
assert response == response_text
model_instance.generate_content.assert_called_once

assert model_instance.generate_content.call_args.args[0] == [user_prompt]
TestCase().assertCountEqual(
model_instance.generate_content.call_args.kwargs,
{
"stream": False,
"safety_settings": None,
"generation_config": {
"max_output_tokens": 1,
"temperature": 0,
"top_k": 10,
"top_p": 0.5,
"stop_sequences": None,
},
},
)

0 comments on commit 6702bed

Please sign in to comment.