Skip to content

Commit

Permalink
Merge pull request #281 from SylphAI-Inc/model_provider
Browse files Browse the repository at this point in the history
add model providers display and code
  • Loading branch information
Sylph-AI authored Nov 26, 2024
2 parents ba2a99c + 3bd952a commit 53f537c
Show file tree
Hide file tree
Showing 13 changed files with 415 additions and 40 deletions.
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,14 @@ You have full control over the prompt template, the model you use, and the outpu
<img src="https://raw.githubusercontent.com/SylphAI-Inc/LightRAG/main/docs/source/_static/images/AdalFlow_task_pipeline.png" alt="AdalFlow Task Pipeline">
</p>

Many providers and models accessible via the same interface:

<p align="center">
<img src="https://raw.githubusercontent.com/SylphAI-Inc/LightRAG/main/docs/source/_static/images/multi-providers.png" alt="AdalFlow Model Providers">
</p>

[All available model providers](https://adalflow.sylph.ai/apis/components/components.model_client.html)


<!-- LLMs are like water; they can be shaped into anything, from GenAI applications such as chatbots, translation, summarization, code generation, and autonomous agents to classical NLP tasks like text classification and named entity recognition. They interact with the world beyond the model’s internal knowledge via retrievers, memory, and tools (function calls). Each use case is unique in its data, business logic, and user experience.
Expand Down
5 changes: 5 additions & 0 deletions adalflow/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@

## [0.2.6] - 2024-11-25
### Improved
- Add default `max_tokens=512` to the `AnthropicAPIClient` to avoid the error when the user does not provide the `max_tokens` in the prompt.

## [0.2.5] - 2024-10-28

### Fixed
Expand Down
2 changes: 1 addition & 1 deletion adalflow/adalflow/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.2.5"
__version__ = "0.2.6"

from adalflow.core.component import Component, fun_to_component
from adalflow.core.container import Sequential
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ class AnthropicAPIClient(ModelClient):
Visit https://docs.anthropic.com/en/docs/intro-to-claude for more api details.
Ensure "max_tokens" are set.
Note:
As antropic API needs users to set max_tokens, we set up a default value of 512 for the max_tokens.
You can override this value by passing the max_tokens in the model_kwargs.
Reference: 8/1/2024
- https://docs.anthropic.com/en/docs/about-claude/models
Expand All @@ -64,6 +67,7 @@ def __init__(
self.chat_completion_parser = (
chat_completion_parser or get_first_message_content
)
self.default_max_tokens = 512

def init_sync_client(self):
api_key = self._api_key or os.getenv("ANTHROPIC_API_KEY")
Expand Down Expand Up @@ -116,6 +120,8 @@ def convert_inputs_to_api_kwargs(
api_kwargs["messages"] = [
{"role": "user", "content": input},
]
if "max_tokens" not in api_kwargs:
api_kwargs["max_tokens"] = self.default_max_tokens
# if input and input != "":
# api_kwargs["system"] = input
else:
Expand Down
35 changes: 33 additions & 2 deletions adalflow/adalflow/components/model_client/bedrock_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,27 @@ def get_first_message_content(completion: Dict) -> str:

class BedrockAPIClient(ModelClient):
__doc__ = r"""A component wrapper for the Bedrock API client.
Note:
This client needs a lot more work to be fully functional.
(1) Setup the AWS credentials.
(2) Access to the modelId.
(3) Convert the modelId to standard model.
To setup the AWS credentials, follow the instructions here:
https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html
Additionally, this medium article is a good reference:
https://medium.com/@harangpeter/setting-up-aws-bedrock-for-api-based-text-inference-dc25ab2b216b
Visit https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html for more api details.
"""

def __init__(
self,
aws_profile_name=None,
aws_region_name=None,
aws_profile_name="default",
aws_region_name="us-west-2", # Use a supported default region
aws_access_key_id=None,
aws_secret_access_key=None,
aws_session_token=None,
Expand Down Expand Up @@ -91,6 +105,8 @@ def init_sync_client(self):
aws_session_token=aws_session_token,
)
bedrock_runtime = session.client(service_name="bedrock-runtime", config=config)

self._client = session.client(service_name="bedrock")
return bedrock_runtime

def init_async_client(self):
Expand All @@ -117,6 +133,21 @@ def track_completion_usage(self, completion: Dict) -> CompletionUsage:
total_tokens=usage["totalTokens"],
)

def list_models(self):
# Initialize Bedrock client (not runtime)

try:
response = self._client.list_foundation_models()
models = response.get("models", [])
for model in models:
print(f"Model ID: {model['modelId']}")
print(f" Name: {model['name']}")
print(f" Description: {model['description']}")
print(f" Provider: {model['provider']}")
print("")
except Exception as e:
print(f"Error listing models: {e}")

def convert_inputs_to_api_kwargs(
self,
input: Optional[Any] = None,
Expand Down
37 changes: 20 additions & 17 deletions adalflow/adalflow/components/model_client/google_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,24 +34,27 @@ class GoogleGenAIClient(ModelClient):
Info: 8/1/2024
Tested: gemini-1.0-pro, gemini-1.5-pro-latest
class UsageMetadata(proto.Message):
prompt_token_count: int = proto.Field(
proto.INT32,
number=1,
)
cached_content_token_count: int = proto.Field(
proto.INT32,
number=4,
)
candidates_token_count: int = proto.Field(
proto.INT32,
number=2,
)
total_token_count: int = proto.Field(
proto.INT32,
number=3,
)
.. code-block:: python
class UsageMetadata(proto.Message):
prompt_token_count: int = proto.Field(
proto.INT32,
number=1,
)
cached_content_token_count: int = proto.Field(
proto.INT32,
number=4,
)
candidates_token_count: int = proto.Field(
proto.INT32,
number=2,
)
total_token_count: int = proto.Field(
proto.INT32,
number=3,
)
"""

def __init__(self, api_key: Optional[str] = None):
Expand Down
2 changes: 1 addition & 1 deletion adalflow/adalflow/core/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
GroqAPIClient,
OpenAIClient,
GoogleGenAIClient,
OllamaClient
OllamaClient,
)


Expand Down
18 changes: 14 additions & 4 deletions adalflow/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[tool.poetry]
name = "adalflow"

version = "0.2.5"
version = "0.2.6"
description = "The Library to Build and Auto-optimize LLM Applications"
authors = ["Li Yin <[email protected]>"]
readme = "README.md"
Expand Down Expand Up @@ -118,10 +118,20 @@ url = "https://pypi.nvidia.com"
# priority = "supplemental"
# url = "https://pypi.nvidia.com"

[tool.ruff]
exclude = ["images"]


[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

# for formatting and linting
[tool.black]
line-length = 88
target-version = ["py311"]

[tool.ruff]
exclude = ["images"]
lint.extend-ignore = [
"E402", # Ignore module-level import issues
"E731",
"UP007", # Wants | over Union, which breaks 3.8
]
Binary file added docs/source/_static/images/multi-providers.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 53f537c

Please sign in to comment.