Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

vertexai[patch]: standardize model params #121

Merged
merged 9 commits into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fmt
  • Loading branch information
baskaryan committed Apr 21, 2024
commit 67521e308deb1215da696bfa5cacbcdc4b8273ae
6 changes: 6 additions & 0 deletions libs/vertexai/langchain_google_vertexai/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,12 @@ class ChatVertexAI(_VertexAICommon, BaseChatModel):
have candidates. If not, retries.
It makes streaming mode essentially useless."""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Needed for mypy typing to recognize model_name as a valid arg."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

class Config:
"""Configuration for this pydantic object."""

Expand Down
5 changes: 3 additions & 2 deletions libs/vertexai/langchain_google_vertexai/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def validate_environment(cls, values: Dict) -> Dict:

def __init__(
self,
model_name: str,
model_name: Optional[str] = None,
project: Optional[str] = None,
location: str = "us-central1",
request_parallelism: int = 5,
Expand All @@ -87,13 +87,14 @@ def __init__(
**kwargs: Any,
):
"""Initialize the sentence_transformer."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(
project=project,
location=location,
credentials=credentials,
request_parallelism=request_parallelism,
max_retries=max_retries,
model=model_name,
**kwargs,
)
self.instance["max_batch_size"] = kwargs.get("max_batch_size", _MAX_BATCH_SIZE)
Expand Down
24 changes: 24 additions & 0 deletions libs/vertexai/langchain_google_vertexai/gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,12 @@ class GemmaChatVertexAIModelGarden(_GemmaBase, _BaseVertexAIModelGarden, BaseCha
"""Whether to post-process the chat response and clean repeations """
"""or multi-turn statements."""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Needed for mypy typing to recognize model_name as a valid arg."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

class Config:
"""Configuration for this pydantic object."""

Expand Down Expand Up @@ -186,6 +192,12 @@ class _GemmaLocalKaggleBase(_GemmaBase):
model_name: str = Field(default="gemma_2b_en", alias="model")
"""Gemma model name."""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Needed for mypy typing to recognize model_name as a valid arg."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
Expand Down Expand Up @@ -217,6 +229,12 @@ def _get_params(self, **kwargs) -> Dict[str, Any]:
class GemmaLocalKaggle(_GemmaLocalKaggleBase, BaseLLM):
"""Local gemma chat model loaded from Kaggle."""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Only needed for typing."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

def _generate(
self,
prompts: List[str],
Expand All @@ -243,6 +261,12 @@ class GemmaChatLocalKaggle(_GemmaLocalKaggleBase, BaseChatModel):
"""Whether to post-process the chat response and clean repeations """
"""or multi-turn statements."""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Needed for mypy typing to recognize model_name as a valid arg."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

def _generate(
self,
messages: List[BaseMessage],
Expand Down
6 changes: 6 additions & 0 deletions libs/vertexai/langchain_google_vertexai/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,12 @@ class VertexAI(_VertexAICommon, BaseLLM):
model_name will be used to determine the model family
"""

def __init__(self, *, model_name: Optional[str] = None, **kwargs: Any) -> None:
"""Needed for mypy typing to recognize model_name as a valid arg."""
if model_name:
kwargs["model_name"] = model_name
super().__init__(**kwargs)

class Config:
"""Configuration for this pydantic object."""

Expand Down
2 changes: 1 addition & 1 deletion libs/vertexai/tests/unit_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def test_init() -> None:
project="test-project",
max_tokens=10,
stop_sequences=["bar"],
), # type: ignore[call-arg]
),
]:
assert llm.model_name == "gemini-pro"
assert llm.max_output_tokens == 10
Expand Down
7 changes: 3 additions & 4 deletions libs/vertexai/tests/unit_tests/test_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,11 @@
def test_init() -> None:
for embeddings in [
VertexAIEmbeddings(
model_name="foo",
project="test-project",
model_name="textembedding-gecko",
),
VertexAIEmbeddings(model="foo", project="test-project"),
VertexAIEmbeddings(model="textembedding-gecko"),
]:
assert embeddings.model_name == "foo"
assert embeddings.model_name == "textembedding-gecko"


def test_langchain_google_vertexai_embed_image_multimodal_only() -> None:
Expand Down
4 changes: 2 additions & 2 deletions libs/vertexai/tests/unit_tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
def test_model_name() -> None:
for llm in [
VertexAI(model_name="gemini-pro", project="test-project", max_output_tokens=10),
VertexAI(model="gemini-pro", project="test-project", max_tokens=10), # type: ignore[call-arg]
VertexAI(model="gemini-pro", project="test-project", max_tokens=10),
]:
assert llm.model_name == "gemini-pro"
assert llm.max_tokens == 10
assert llm.max_output_tokens == 10


def test_tuned_model_name() -> None:
Expand Down
Loading