Skip to content

Commit

Permalink
Tests cleanup (#455)
Browse files Browse the repository at this point in the history
  • Loading branch information
lkuligin authored Aug 20, 2024
1 parent 8b7a871 commit 3557076
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 171 deletions.
4 changes: 2 additions & 2 deletions libs/vertexai/tests/integration_tests/test_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
[_DEFAULT_MODEL_NAME, "text-bison@001", "code-bison@001"],
[_DEFAULT_MODEL_NAME, "code-bison@001"],
)
def test_llm_invoke(model_name: str) -> None:
vb = VertexAICallbackHandler()
Expand Down Expand Up @@ -51,7 +51,7 @@ def test_chat_call(model_name: str) -> None:
@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
[_DEFAULT_MODEL_NAME, "text-bison@001", "code-bison@001"],
[_DEFAULT_MODEL_NAME, "code-bison@001"],
)
def test_invoke_config(model_name: str) -> None:
vb = VertexAICallbackHandler()
Expand Down
118 changes: 22 additions & 96 deletions libs/vertexai/tests/integration_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@
)
from tests.integration_tests.conftest import _DEFAULT_MODEL_NAME

model_names_to_test = [None, "codechat-bison", "chat-bison", _DEFAULT_MODEL_NAME]
model_names_to_test = ["codechat-bison", _DEFAULT_MODEL_NAME]

rate_limiter = InMemoryRateLimiter(requests_per_second=0.5)
rate_limiter = InMemoryRateLimiter(requests_per_second=1.0)


def _check_usage_metadata(message: AIMessage) -> None:
Expand All @@ -57,20 +57,14 @@ def _check_usage_metadata(message: AIMessage) -> None:
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_initialization(model_name: Optional[str]) -> None:
"""Test chat model initialization."""
if model_name:
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
else:
model = ChatVertexAI(rate_limiter=rate_limiter)
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
assert model._llm_type == "vertexai"


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_vertexai_single_call(model_name: Optional[str]) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
else:
model = ChatVertexAI(rate_limiter=rate_limiter)
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
message = HumanMessage(content="Hello")
response = model([message])
assert isinstance(response, AIMessage)
Expand All @@ -92,10 +86,9 @@ def test_candidates() -> None:


@pytest.mark.release
@pytest.mark.parametrize("model_name", ["chat-bison@001", _DEFAULT_MODEL_NAME])
async def test_vertexai_agenerate(model_name: str) -> None:
async def test_vertexai_agenerate() -> None:
model = ChatVertexAI(
temperature=0, model_name=model_name, rate_limiter=rate_limiter
temperature=0, model_name=_DEFAULT_MODEL_NAME, rate_limiter=rate_limiter
)
message = HumanMessage(content="Hello")
response = await model.agenerate([[message]])
Expand All @@ -117,10 +110,9 @@ async def test_vertexai_agenerate(model_name: str) -> None:


@pytest.mark.release
@pytest.mark.parametrize("model_name", ["chat-bison@001", _DEFAULT_MODEL_NAME])
def test_vertexai_stream(model_name: str) -> None:
def test_vertexai_stream() -> None:
model = ChatVertexAI(
temperature=0, model_name=model_name, rate_limiter=rate_limiter
temperature=0, model_name=_DEFAULT_MODEL_NAME, rate_limiter=rate_limiter
)
message = HumanMessage(content="Hello")

Expand Down Expand Up @@ -159,23 +151,6 @@ async def test_vertexai_astream() -> None:
_check_usage_metadata(full)


@pytest.mark.release
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI(rate_limiter=rate_limiter)
raw_context = (
"My name is Peter. You are my personal assistant. My favorite movies "
"are Lord of the Rings and Hobbit."
)
question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)


@pytest.mark.release
def test_multimodal() -> None:
llm = ChatVertexAI(model_name="gemini-pro-vision", rate_limiter=rate_limiter)
Expand Down Expand Up @@ -224,9 +199,7 @@ def test_multimodal() -> None:
@pytest.mark.release
@pytest.mark.parametrize("file_uri,mime_type", multimodal_inputs)
def test_multimodal_media_file_uri(file_uri, mime_type) -> None:
llm = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0514", rate_limiter=rate_limiter
)
llm = ChatVertexAI(model_name="gemini-1.5-pro-001", rate_limiter=rate_limiter)
media_message = {
"type": "media",
"file_uri": file_uri,
Expand All @@ -244,9 +217,7 @@ def test_multimodal_media_file_uri(file_uri, mime_type) -> None:
@pytest.mark.release
@pytest.mark.parametrize("file_uri,mime_type", multimodal_inputs)
def test_multimodal_media_inline_base64(file_uri, mime_type) -> None:
llm = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0514", rate_limiter=rate_limiter
)
llm = ChatVertexAI(model_name="gemini-1.5-pro-001", rate_limiter=rate_limiter)
storage_client = storage.Client()
blob = storage.Blob.from_string(file_uri, client=storage_client)
media_base64 = base64.b64encode(blob.download_as_bytes()).decode()
Expand Down Expand Up @@ -372,9 +343,7 @@ def test_parse_history_gemini_multimodal_FC():
@pytest.mark.release
@pytest.mark.parametrize("file_uri,mime_type", [video_param])
def test_multimodal_video_metadata(file_uri, mime_type) -> None:
llm = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0514", rate_limiter=rate_limiter
)
llm = ChatVertexAI(model_name="gemini-1.5-pro-001", rate_limiter=rate_limiter)
media_message = {
"type": "media",
"file_uri": file_uri,
Expand Down Expand Up @@ -424,28 +393,10 @@ def test_multimodal_history() -> None:
assert isinstance(response.content, str)


@pytest.mark.release
def test_vertexai_single_call_with_examples() -> None:
model = ChatVertexAI(rate_limiter=rate_limiter)
raw_context = "My name is Peter. You are my personal assistant."
question = "2+2"
text_question, text_answer = "4+4", "8"
inp = HumanMessage(content=text_question)
output = AIMessage(content=text_answer)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message], examples=[inp, output])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_vertexai_single_call_with_history(model_name: Optional[str]) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
else:
model = ChatVertexAI(rate_limiter=rate_limiter)
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
text_question1, text_answer1 = "How much is 2+2?", "4"
text_question2 = "How much is 3+3?"
message1 = HumanMessage(content=text_question1)
Expand All @@ -459,10 +410,7 @@ def test_vertexai_single_call_with_history(model_name: Optional[str]) -> None:
@pytest.mark.release
@pytest.mark.parametrize("model_name", ["gemini-1.0-pro-002"])
def test_vertexai_system_message(model_name: Optional[str]) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
else:
model = ChatVertexAI(rate_limiter=rate_limiter)
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
system_instruction = """CymbalBank is a bank located in London"""
text_question1 = "Where is Cymbal located? Provide only the name of the city."
sys_message = SystemMessage(content=system_instruction)
Expand All @@ -475,14 +423,8 @@ def test_vertexai_system_message(model_name: Optional[str]) -> None:


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_vertexai_single_call_with_no_system_messages(
model_name: Optional[str],
) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name, rate_limiter=rate_limiter)
else:
model = ChatVertexAI(rate_limiter=rate_limiter)
def test_vertexai_single_call_with_no_system_messages() -> None:
model = ChatVertexAI(model_name=_DEFAULT_MODEL_NAME, rate_limiter=rate_limiter)
text_question1, text_answer1 = "How much is 2+2?", "4"
text_question2 = "How much is 3+3?"
message1 = HumanMessage(content=text_question1)
Expand All @@ -493,26 +435,12 @@ def test_vertexai_single_call_with_no_system_messages(
assert isinstance(response.content, str)


@pytest.mark.release
def test_vertexai_single_call_fails_no_message() -> None:
chat = ChatVertexAI(rate_limiter=rate_limiter)
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert (
str(exc_info.value)
== "You should provide at least one message to start the chat!"
)


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_get_num_tokens_from_messages(model_name: str) -> None:
if model_name:
model = ChatVertexAI(
model_name=model_name, temperature=0.0, rate_limiter=rate_limiter
)
else:
model = ChatVertexAI(temperature=0.0, rate_limiter=rate_limiter)
model = ChatVertexAI(
model_name=model_name, temperature=0.0, rate_limiter=rate_limiter
)
message = HumanMessage(content="Hello")
token = model.get_num_tokens_from_messages(messages=[message])
assert isinstance(token, int)
Expand Down Expand Up @@ -611,7 +539,7 @@ class MyModel(BaseModel):
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH
}
model = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0409",
model_name="gemini-1.5-pro-001",
safety_settings=safety,
rate_limiter=rate_limiter,
).bind(
Expand Down Expand Up @@ -649,9 +577,7 @@ class MyModel(BaseModel):
safety = {
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH
}
model = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0409", safety_settings=safety
).bind(
model = ChatVertexAI(model_name="gemini-1.5-pro-001", safety_settings=safety).bind(
functions=[MyModel],
tool_config={
"function_calling_config": {
Expand All @@ -678,7 +604,7 @@ class MyModel(BaseModel):
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH
}
llm = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0409",
model_name="gemini-1.5-pro-001",
safety_settings=safety,
rate_limiter=rate_limiter,
)
Expand Down Expand Up @@ -735,7 +661,7 @@ def search(
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH
}
llm = ChatVertexAI(
model_name="gemini-1.5-pro-preview-0409",
model_name="gemini-1.5-pro-001",
safety_settings=safety,
temperature=0,
rate_limiter=rate_limiter,
Expand Down
47 changes: 10 additions & 37 deletions libs/vertexai/tests/integration_tests/test_llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,10 @@
from langchain_google_vertexai.llms import VertexAI
from tests.integration_tests.conftest import _DEFAULT_MODEL_NAME

model_names_to_test = [_DEFAULT_MODEL_NAME]
model_names_to_test_with_default = [None] + model_names_to_test


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_initialization(model_name: str) -> None:
llm = VertexAI(model_name=model_name) if model_name else VertexAI()
def test_vertex_initialization() -> None:
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME)
assert llm._llm_type == "vertexai"
try:
assert llm.model_name == llm.client._model_id
Expand All @@ -29,31 +22,15 @@ def test_vertex_initialization(model_name: str) -> None:


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_invoke(model_name: str) -> None:
llm = (
VertexAI(model_name=model_name, temperature=0)
if model_name
else VertexAI(temperature=0.0)
)
def test_vertex_invoke() -> None:
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME, temperature=0)
output = llm.invoke("Say foo:")
assert isinstance(output, str)


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
)
def test_vertex_generate(model_name: str) -> None:
llm = (
VertexAI(model_name=model_name, temperature=0)
if model_name
else VertexAI(temperature=0.0)
)
def test_vertex_generate() -> None:
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME, temperature=0)
output = llm.generate(["Say foo:"])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
Expand Down Expand Up @@ -87,7 +64,7 @@ def test_vertex_generate_code() -> None:

@pytest.mark.release
async def test_vertex_agenerate() -> None:
llm = VertexAI(temperature=0)
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME, temperature=0)
output = await llm.agenerate(["Please say foo:"])
assert isinstance(output, LLMResult)
usage_metadata = output.generations[0][0].generation_info["usage_metadata"] # type: ignore
Expand All @@ -104,7 +81,7 @@ def test_stream() -> None:

@pytest.mark.release
async def test_vertex_consistency() -> None:
llm = VertexAI(temperature=0)
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME, temperature=0)
output = llm.generate(["Please say foo:"])
streaming_output = llm.generate(["Please say foo:"], stream=True)
async_output = await llm.agenerate(["Please say foo:"])
Expand All @@ -120,11 +97,7 @@ async def test_astream() -> None:


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test,
)
def test_vertex_call_count_tokens(model_name: str) -> None:
llm = VertexAI(model_name=model_name)
def test_vertex_call_count_tokens() -> None:
llm = VertexAI(model_name=_DEFAULT_MODEL_NAME)
output = llm.get_num_tokens("How are you?")
assert output == 4
36 changes: 0 additions & 36 deletions libs/vertexai/tests/integration_tests/test_tools.py

This file was deleted.

0 comments on commit 3557076

Please sign in to comment.