Skip to content

[bug] anthropic model strings in PREFERRED_MODELS not mapping to litellm providers #325

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
Open
11 changes: 6 additions & 5 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Our vision is to build the defacto CLI for quickly spinning up an AI Agent proje
### Exclusive Contributor Sticker
AgentStack contributors all receive a free sticker pack including an exclusive holographic sticker only available to contributors to the project :)

Once your PR is merge, fill out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfvBEnsT8nsQleonJHoWQtHuhbsgUJ0a9IjOqeZbMGkga2NtA/viewform?usp=sf_link) and I'll send your sticker pack out ASAP! <3
Once your PR is merged, fill out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfvBEnsT8nsQleonJHoWQtHuhbsgUJ0a9IjOqeZbMGkga2NtA/viewform?usp=sf_link) and I'll send your sticker pack out ASAP! <3

## How to Help

Expand All @@ -16,14 +16,15 @@ The best place to engage in conversation about your contribution is in the Issue

## Setup

1. Clone the repo
`git clone https://github.com/AgentOps-AI/AgentStack.git`
1. Fork the repo from the github website or with [gh repo fork AgentOps-AI/AgentStack](https://cli.github.com/manual/gh_repo_fork)
2. Clone the forked repo and get in there!
`ssh example`
`git clone [email protected]:<your-github-username>/AgentStack.git`
`cd AgentStack`
2. Install agentstack as an edtiable project and set it up for development and testing
3. Install agentstack as an edtiable project and set it up for development and testing
`pip install -e .[dev,test]`
This will install the CLI locally and in editable mode so you can use `agentstack <command>` to test your latest changes


## Adding Tools
If you're reading this section, you probably have a product that AI agents can use as a tool. We're glad you're here!

Expand Down
8 changes: 3 additions & 5 deletions agentstack/cli/cli.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from typing import Optional
import os, sys
from art import text2art
import inquirer
from agentstack import conf, log
Expand All @@ -16,10 +15,10 @@
'deepseek/deepseek-coder',
'deepseek/deepseek-reasoner',
'openai/gpt-4o',
'anthropic/claude-3-5-sonnet',
'openai/o1-preview',
'openai/gpt-4-turbo',
'anthropic/claude-3-opus',
'anthropic/claude-3-opus-latest',
'anthropic/claude-3-5-sonnet-20240620',
]


Expand All @@ -38,7 +37,7 @@ def welcome_message():
def undo() -> None:
"""Undo the last committed changes."""
conf.assert_project()

changed_files = repo.get_uncommitted_files()
if changed_files:
log.warning("There are uncommitted changes that may be overwritten.")
Expand Down Expand Up @@ -113,4 +112,3 @@ def parse_insertion_point(position: Optional[str] = None) -> Optional[InsertionP
raise ValueError(f"Position must be one of {','.join(valid_positions)}.")

return next(x for x in InsertionPoint if x.value == position)

78 changes: 78 additions & 0 deletions tests/test_preferred_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
from importlib.util import find_spec
import pytest
import subprocess
import sys
from difflib import get_close_matches
from agentstack.cli.cli import PREFERRED_MODELS


@pytest.fixture(scope="session", autouse=True)
def install_litellm():
"""Install litellm if not already installed."""
print("\nChecking for litellm installation...")
spec = find_spec("litellm")
if spec is None:
print("litellm not found, installing...")
subprocess.check_call([sys.executable, "-m", "pip", "install", "litellm"])
print("litellm installation complete")


def clean_model_name(provider: str, model: str) -> str:
"""
Clean up model name by removing duplicate provider strings.
Seems like in litellm groq and deepseek have the provider twice in the model name.
"""
if model.startswith(f"{provider}/"):
return f"{provider}/{model[len(provider) + 1 :]}"
return f"{provider}/{model}"


def find_similar_models(model: str, all_models: set, num_suggestions: int = 3) -> list[str]:
"""
Find similar model names using string matching.
If the test fails, now you can see the ideal model to replace a broken one with.
"""
try:
provider, model_name = model.split('/')
except ValueError:
return get_close_matches(model, all_models, n=num_suggestions, cutoff=0.3)

provider_models = [m for m in all_models if m.startswith(f"{provider}/")]
if provider_models:
matches = get_close_matches(model, provider_models, n=num_suggestions, cutoff=0.3)
if matches:
return matches
return get_close_matches(model, all_models, n=num_suggestions, cutoff=0.3)


def test_preferred_models_validity():
"""Test that all PREFERRED_MODELS are valid LiteLLM models."""
from litellm import models_by_provider

all_litellm_models = set()
for provider, models in models_by_provider.items():
for model in models:
full_model_name = clean_model_name(provider, model)
all_litellm_models.add(full_model_name)

invalid_models_with_suggestions = {}
for model in PREFERRED_MODELS:
if model not in all_litellm_models:
suggestions = find_similar_models(model, all_litellm_models)
invalid_models_with_suggestions[model] = suggestions

if invalid_models_with_suggestions:
error_message = (
"The following models are not in LiteLLM's supported models:\n"
"\nFor a complete list of supported models, visit: https://docs.litellm.ai/docs/providers\n"
)
for model, suggestions in invalid_models_with_suggestions.items():
error_message += f"\n- {model}"
if suggestions:
error_message += "\n Similar available models:"
for suggestion in suggestions:
error_message += f"\n * {suggestion}"
else:
error_message += "\n No similar models found."

assert False, error_message
Loading