Skip to content

Commit

Permalink
tests: add pytest flags
Browse files Browse the repository at this point in the history
  • Loading branch information
svidiella committed Feb 27, 2024
1 parent 960ac13 commit 98fc363
Show file tree
Hide file tree
Showing 9 changed files with 97 additions and 3 deletions.
2 changes: 1 addition & 1 deletion libs/vertexai/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ TEST_FILE ?= tests/unit_tests/
integration_test integration_tests: TEST_FILE = tests/integration_tests/

test tests integration_test integration_tests:
poetry run pytest $(TEST_FILE)
poetry run pytest --release $(TEST_FILE)


######################
Expand Down
52 changes: 52 additions & 0 deletions libs/vertexai/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
Tests configuration to be executed before tests execution.
"""

from typing import List
import pytest


_RELEASE_FLAG = "release"

Check failure on line 9 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (I001)

tests/conftest.py:5:1: I001 Import block is un-sorted or un-formatted
_GPU_FLAG = "gpu"
_LONG_FLAG = "long"
_EXTENDED_FLAG = "extended"

_PYTEST_FLAGS = [
_RELEASE_FLAG,
_GPU_FLAG,
_LONG_FLAG,
_EXTENDED_FLAG
]


def pytest_addoption(parser: pytest.Parser) -> None:
"""
Add flags accepted by our pytest CLI,
:param parser: The pytest parser object.
:return:
"""
for flag in _PYTEST_FLAGS:
parser.addoption(f"--{flag}", action="store_true", default=False, help=f"run {flag} tests")

Check failure on line 29 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (E501)

tests/conftest.py:29:89: E501 Line too long (99 > 88)


def pytest_configure(config: pytest.Config) -> None:
"""
Add our custom configuration,
:param config: The pytest config object.
:return:
"""
for flag in _PYTEST_FLAGS:
config.addinivalue_line("markers", f"{flag}: mark test to run as {flag} only test")

Check failure on line 39 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (E501)

tests/conftest.py:39:89: E501 Line too long (91 > 88)


def pytest_collection_modifyitems(config: pytest.Config, items: List[pytest.Item]) -> None:

Check failure on line 42 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (E501)

tests/conftest.py:42:89: E501 Line too long (91 > 88)
"""
Skip tests with a marker from our list that were not explicitly invoked.
:param config: The pytest config object.
:param items: The list of tests to be executed.
:return:
"""
for item in items:
keywords = list(set(item.keywords).intersection(_PYTEST_FLAGS))
if keywords and not any((config.getoption(f"--{keyword}") for keyword in keywords)):

Check failure on line 51 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (E501)

tests/conftest.py:51:89: E501 Line too long (92 > 88)
item.add_marker(pytest.mark.skip(reason=f"need --{keywords[0]} option to run"))

Check failure on line 52 in libs/vertexai/tests/conftest.py

View workflow job for this annotation

GitHub Actions / cd libs/vertexai / - / make lint #3.11

Ruff (E501)

tests/conftest.py:52:89: E501 Line too long (91 > 88)
5 changes: 5 additions & 0 deletions libs/vertexai/tests/integration_tests/test_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from langchain_google_vertexai.llms import VertexAI


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
["gemini-pro", "text-bison@001", "code-bison@001"],
Expand All @@ -25,6 +26,7 @@ def test_llm_invoke(model_name: str) -> None:
assert vb.completion_tokens > completion_tokens


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
["gemini-pro", "chat-bison@001", "codechat-bison@001"],
Expand All @@ -45,6 +47,7 @@ def test_chat_call(model_name: str) -> None:
assert vb.completion_tokens > completion_tokens


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
["gemini-pro", "text-bison@001", "code-bison@001"],
Expand All @@ -64,6 +67,7 @@ def test_invoke_config(model_name: str) -> None:
assert vb.completion_tokens > completion_tokens


@pytest.mark.release
def test_llm_stream() -> None:
vb = VertexAICallbackHandler()
llm = VertexAI(model_name="gemini-pro", temperature=0.0, callbacks=[vb])
Expand All @@ -74,6 +78,7 @@ def test_llm_stream() -> None:
assert vb.completion_tokens > 0


@pytest.mark.release
def test_chat_stream() -> None:
vb = VertexAICallbackHandler()
llm = ChatVertexAI(model_name="gemini-pro", temperature=0.0, callbacks=[vb])
Expand Down
16 changes: 16 additions & 0 deletions libs/vertexai/tests/integration_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
model_names_to_test = [None, "codechat-bison", "chat-bison", "gemini-pro"]


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_initialization(model_name: Optional[str]) -> None:
"""Test chat model initialization."""
Expand All @@ -32,6 +33,7 @@ def test_initialization(model_name: Optional[str]) -> None:
assert model.model_name == model.client._model_name.split("/")[-1]


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_vertexai_single_call(model_name: Optional[str]) -> None:
if model_name:
Expand All @@ -44,6 +46,7 @@ def test_vertexai_single_call(model_name: Optional[str]) -> None:
assert isinstance(response.content, str)


@pytest.mark.release
# mark xfail because Vertex API randomly doesn't respect
# the n/candidate_count parameter
@pytest.mark.xfail
Expand All @@ -56,6 +59,7 @@ def test_candidates() -> None:
assert len(response.generations[0]) == 2


@pytest.mark.release
@pytest.mark.parametrize("model_name", ["chat-bison@001", "gemini-pro"])
async def test_vertexai_agenerate(model_name: str) -> None:
model = ChatVertexAI(temperature=0, model_name=model_name)
Expand All @@ -76,6 +80,7 @@ async def test_vertexai_agenerate(model_name: str) -> None:
assert int(usage_metadata["candidates_token_count"]) > 0


@pytest.mark.release
@pytest.mark.parametrize("model_name", ["chat-bison@001", "gemini-pro"])
def test_vertexai_stream(model_name: str) -> None:
model = ChatVertexAI(temperature=0, model_name=model_name)
Expand All @@ -86,6 +91,7 @@ def test_vertexai_stream(model_name: str) -> None:
assert isinstance(chunk, AIMessageChunk)


@pytest.mark.release
async def test_vertexai_astream() -> None:
model = ChatVertexAI(temperature=0, model_name="gemini-pro")
message = HumanMessage(content="Hello")
Expand All @@ -94,6 +100,7 @@ async def test_vertexai_astream() -> None:
assert isinstance(chunk, AIMessageChunk)


@pytest.mark.release
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI()
raw_context = (
Expand All @@ -110,6 +117,7 @@ def test_vertexai_single_call_with_context() -> None:
assert isinstance(response.content, str)


@pytest.mark.release
def test_multimodal() -> None:
llm = ChatVertexAI(model_name="gemini-pro-vision")
gcs_url = (
Expand All @@ -129,6 +137,7 @@ def test_multimodal() -> None:
assert isinstance(output.content, str)


@pytest.mark.release
@pytest.mark.xfail(reason="problem on vertex side")
def test_multimodal_history() -> None:
llm = ChatVertexAI(model_name="gemini-pro-vision")
Expand Down Expand Up @@ -158,6 +167,7 @@ def test_multimodal_history() -> None:
assert isinstance(response.content, str)


@pytest.mark.release
def test_vertexai_single_call_with_examples() -> None:
model = ChatVertexAI()
raw_context = "My name is Peter. You are my personal assistant."
Expand All @@ -172,6 +182,7 @@ def test_vertexai_single_call_with_examples() -> None:
assert isinstance(response.content, str)


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_vertexai_single_call_with_history(model_name: Optional[str]) -> None:
if model_name:
Expand All @@ -188,6 +199,7 @@ def test_vertexai_single_call_with_history(model_name: Optional[str]) -> None:
assert isinstance(response.content, str)


@pytest.mark.release
def test_vertexai_single_call_fails_no_message() -> None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
Expand All @@ -198,6 +210,7 @@ def test_vertexai_single_call_fails_no_message() -> None:
)


@pytest.mark.release
@pytest.mark.parametrize("model_name", ["gemini-pro"])
def test_chat_vertexai_gemini_system_message_error(model_name: str) -> None:
model = ChatVertexAI(model_name=model_name)
Expand All @@ -211,6 +224,7 @@ def test_chat_vertexai_gemini_system_message_error(model_name: str) -> None:
model([system_message, message1, message2, message3])


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_chat_vertexai_system_message(model_name: Optional[str]) -> None:
if model_name:
Expand All @@ -231,6 +245,7 @@ def test_chat_vertexai_system_message(model_name: Optional[str]) -> None:
assert isinstance(response.content, str)


@pytest.mark.release
@pytest.mark.parametrize("model_name", model_names_to_test)
def test_get_num_tokens_from_messages(model_name: str) -> None:
if model_name:
Expand All @@ -243,6 +258,7 @@ def test_get_num_tokens_from_messages(model_name: str) -> None:
assert token == 3


@pytest.mark.release
def test_chat_vertexai_gemini_function_calling() -> None:
class MyModel(BaseModel):
name: str
Expand Down
6 changes: 6 additions & 0 deletions libs/vertexai/tests/integration_tests/test_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@
from langchain_google_vertexai.embeddings import VertexAIEmbeddings


@pytest.mark.release
def test_initialization() -> None:
"""Test embedding model initialization."""
VertexAIEmbeddings()


@pytest.mark.release
def test_langchain_google_vertexai_embedding_documents() -> None:
documents = ["foo bar"]
model = VertexAIEmbeddings()
Expand All @@ -23,13 +25,15 @@ def test_langchain_google_vertexai_embedding_documents() -> None:
assert model.model_name == "textembedding-gecko@001"


@pytest.mark.release
def test_langchain_google_vertexai_embedding_query() -> None:
document = "foo bar"
model = VertexAIEmbeddings()
output = model.embed_query(document)
assert len(output) == 768


@pytest.mark.release
def test_langchain_google_vertexai_large_batches() -> None:
documents = ["foo bar" for _ in range(0, 251)]
model_uscentral1 = VertexAIEmbeddings(location="us-central1")
Expand All @@ -40,6 +44,7 @@ def test_langchain_google_vertexai_large_batches() -> None:
assert model_asianortheast1.instance["batch_size"] < 50


@pytest.mark.release
def test_langchain_google_vertexai_paginated_texts() -> None:
documents = [
"foo bar",
Expand All @@ -58,6 +63,7 @@ def test_langchain_google_vertexai_paginated_texts() -> None:
assert model.model_name == model.client._model_id


@pytest.mark.release
def test_warning(caplog: pytest.LogCaptureFixture) -> None:
_ = VertexAIEmbeddings()
assert len(caplog.records) == 1
Expand Down
10 changes: 10 additions & 0 deletions libs/vertexai/tests/integration_tests/test_llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
model_names_to_test_with_default = [None] + model_names_to_test


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
Expand All @@ -26,6 +27,7 @@ def test_vertex_initialization(model_name: str) -> None:
assert llm.model_name == llm.client._model_name.split("/")[-1]


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
Expand All @@ -40,6 +42,7 @@ def test_vertex_invoke(model_name: str) -> None:
assert isinstance(output, str)


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
Expand All @@ -58,6 +61,7 @@ def test_vertex_generate(model_name: str) -> None:
assert int(usage_metadata["candidates_token_count"]) > 0


@pytest.mark.release
@pytest.mark.xfail(reason="VertexAI doesn't always respect number of candidates")
def test_vertex_generate_multiple_candidates() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="text-bison@001")
Expand All @@ -67,6 +71,7 @@ def test_vertex_generate_multiple_candidates() -> None:
assert len(output.generations[0]) == 2


@pytest.mark.release
@pytest.mark.xfail(reason="VertexAI doesn't always respect number of candidates")
def test_vertex_generate_code() -> None:
llm = VertexAI(temperature=0.3, n=2, model_name="code-bison@001")
Expand All @@ -79,6 +84,7 @@ def test_vertex_generate_code() -> None:
assert int(usage_metadata["candidates_token_count"]) > 1


@pytest.mark.release
async def test_vertex_agenerate() -> None:
llm = VertexAI(temperature=0)
output = await llm.agenerate(["Please say foo:"])
Expand All @@ -88,6 +94,7 @@ async def test_vertex_agenerate() -> None:
assert int(usage_metadata["candidates_token_count"]) > 0


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test_with_default,
Expand All @@ -102,6 +109,7 @@ def test_stream(model_name: str) -> None:
assert isinstance(token, str)


@pytest.mark.release
async def test_vertex_consistency() -> None:
llm = VertexAI(temperature=0)
output = llm.generate(["Please say foo:"])
Expand All @@ -111,12 +119,14 @@ async def test_vertex_consistency() -> None:
assert output.generations[0][0].text == async_output.generations[0][0].text


@pytest.mark.release
async def test_astream() -> None:
llm = VertexAI(temperature=0, model_name="gemini-pro")
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)


@pytest.mark.release
@pytest.mark.parametrize(
"model_name",
model_names_to_test,
Expand Down
4 changes: 2 additions & 2 deletions libs/vertexai/tests/integration_tests/test_llms_safety.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
"""


@pytest.mark.skip("CI testing not set up")
@pytest.mark.extended
def test_gemini_safety_settings_generate() -> None:
llm = VertexAI(model_name="gemini-pro", safety_settings=SAFETY_SETTINGS)
output = llm.generate(["What do you think about child abuse:"])
Expand Down Expand Up @@ -70,7 +70,7 @@ def test_gemini_safety_settings_generate() -> None:
assert not generation_info.get("is_blocked")


@pytest.mark.skip("CI testing not set up")
@pytest.mark.extended
async def test_gemini_safety_settings_agenerate() -> None:
llm = VertexAI(model_name="gemini-pro", safety_settings=SAFETY_SETTINGS)
output = await llm.agenerate(["What do you think about child abuse:"])
Expand Down
1 change: 1 addition & 0 deletions libs/vertexai/tests/integration_tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ def test_stream() -> None:
assert "function_call" in response[0].additional_kwargs


@pytest.mark.release
def test_multiple_tools() -> None:
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
Expand Down
Loading

0 comments on commit 98fc363

Please sign in to comment.