Skip to content

Commit

Permalink
Fix pypi extra dependency install bug.
Browse files Browse the repository at this point in the history
  • Loading branch information
eli64s committed Oct 7, 2024
1 parent 0a3889a commit 43e1e8e
Show file tree
Hide file tree
Showing 8 changed files with 40 additions and 25 deletions.
4 changes: 1 addition & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@ WORKDIR /app
ENV GIT_PYTHON_REFRESH=quiet

RUN apt-get update \
&& apt-get install -y --no-install-recommends git \
&& apt-get clean \
&& apt-get purge -y --auto-remove git \
&& apt-get install -y git \
&& rm -rf /var/lib/apt/lists/*

RUN pip install --no-cache-dir --upgrade readmeai
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "readmeai"
version = "0.5.95"
version = "0.5.96"
description = "Automated README file generator, powered by AI."
authors = ["Eli <[email protected]>"]
license = "MIT"
Expand Down
8 changes: 2 additions & 6 deletions readmeai/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,7 @@
)
from pydantic_extra_types.color import Color

from readmeai.config.constants import (
BadgeStyleOptions,
ImageOptions,
LLMService,
)
from readmeai.config.constants import BadgeStyleOptions, ImageOptions
from readmeai.errors import GitValidationError
from readmeai.logger import get_logger
from readmeai.readers.git.providers import GitURL, parse_git_url
Expand Down Expand Up @@ -162,7 +158,7 @@ class ModelSettings(BaseModel):
LLM API model settings and parameters.
"""

api: str = Field(default=LLMService.OFFLINE)
api: str
base_url: str
context_window: PositiveInt
encoder: str
Expand Down
21 changes: 10 additions & 11 deletions readmeai/models/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,15 @@
import anthropic

ANTHROPIC_AVAILABLE = True
ANTHROPIC_EXCEPTIONS = (
anthropic.APIError,
anthropic.APIConnectionError,
anthropic.RateLimitError,
)
except ImportError:
anthropic = None
ANTHROPIC_AVAILABLE = False
ANTHROPIC_EXCEPTIONS = tuple()


class AnthropicHandler(BaseModelHandler):
Expand Down Expand Up @@ -74,15 +80,7 @@ async def _build_payload(self, prompt: str, tokens: int) -> dict[str, Any]:
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(
(
anthropic.APIError,
anthropic.APIConnectionError,
anthropic.RateLimitError,
)
if ANTHROPIC_AVAILABLE
else tuple()
),
retry=retry_if_exception_type(ANTHROPIC_EXCEPTIONS),
)
async def _make_request(
self,
Expand All @@ -107,6 +105,7 @@ async def _make_request(
parameters = await self._build_payload(prompt, tokens)

async with self.rate_limit_semaphore:
self._logger.info(f"Making request to Anthropic for '{index}'")
response = await self.client.messages.create(**parameters)
data = (
response.content[0].text
Expand All @@ -118,9 +117,9 @@ async def _make_request(
)
return index, data

except () as e:
except ANTHROPIC_EXCEPTIONS as e:
self._logger.error(
f"Error processing request for '{index}': {e!r}"
f"API Error processing request for '{index}': {e!r}"
)
raise # Re-raise for retry decorator

Expand Down
6 changes: 4 additions & 2 deletions readmeai/models/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def _model_settings(self):
# Safely get top_p from config, use default if not available
self.top_p = getattr(self.config.llm, "top_p", self.top_p)

async def _build_payload(self, prompt: str, tokens: int) -> dict[str, Any]:
async def _build_payload(self, prompt: str, tokens: int) -> Any:
"""Build payload for POST request to the Gemini API."""
if not GENAI_AVAILABLE:
raise RuntimeError(
Expand All @@ -87,7 +87,9 @@ async def _build_payload(self, prompt: str, tokens: int) -> dict[str, Any]:
aiohttp.ClientError,
aiohttp.ClientResponseError,
aiohttp.ClientConnectorError,
),
)
if GENAI_AVAILABLE
else tuple()
),
)
async def _make_request(
Expand Down
3 changes: 2 additions & 1 deletion readmeai/models/openai.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""OpenAI API model handler implementation, with Ollama support."""

import os
from typing import Any

import aiohttp
Expand Down Expand Up @@ -39,7 +40,7 @@ def _model_settings(self):

if self.config.llm.api == LLMService.OPENAI.name:
self.url = f"{self.host_name}{self.path}"
self.client = openai.OpenAI()
self.client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

elif self.config.llm.api == LLMService.OLLAMA.name:
self.url = f"{self.localhost}{self.path}"
Expand Down
19 changes: 19 additions & 0 deletions tests/models/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,36 @@
from readmeai.ingestion.models import RepositoryContext
from readmeai.models.anthropic import AnthropicHandler

try:
import anthropic

ANTHROPIC_AVAILABLE = True
except ImportError:
ANTHROPIC_AVAILABLE = False


@pytest.fixture
def anthropic_handler(repository_context_fixture: RepositoryContext):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
config_loader = ConfigLoader()
context = repository_context_fixture
return AnthropicHandler(config_loader, context)


@pytest.mark.asyncio
async def test_model_settings(anthropic_handler: AnthropicHandler):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
anthropic_handler._model_settings()
assert isinstance(anthropic_handler.client, anthropic.AsyncAnthropic)
assert anthropic_handler.model == "claude-3-opus-20240229"


@pytest.mark.asyncio
async def test_build_payload(anthropic_handler: AnthropicHandler):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
prompt = "Test prompt"
tokens = 100
payload = await anthropic_handler._build_payload(prompt, tokens)
Expand All @@ -39,6 +52,8 @@ async def test_build_payload(anthropic_handler: AnthropicHandler):
async def test_make_request_success(
mock_create, mock_token_handler, anthropic_handler: AnthropicHandler
):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
mock_token_handler.return_value = "Processed prompt"
mock_token_handler.side_effect = lambda *args: args[2]
mock_create.return_value = MagicMock(
Expand All @@ -62,6 +77,8 @@ async def test_make_request_success(
async def test_make_request_api_error(
mock_create, mock_token_handler, anthropic_handler: AnthropicHandler
):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
mock_token_handler.return_value = "Processed prompt"
mock_create.side_effect = anthropic.APIError(
message="API error",
Expand All @@ -83,6 +100,8 @@ async def test_make_request_api_error(
async def test_make_request_unexpected_error(
mock_create, mock_token_handler, anthropic_handler: AnthropicHandler
):
if not ANTHROPIC_AVAILABLE:
pytest.skip("Anthropic library is not available")
mock_token_handler.return_value = "Processed prompt"
mock_create.side_effect = Exception("Unexpected error")
anthropic_handler.client = MagicMock()
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ async def test_model_settings(gemini_handler: GeminiHandler):
async def test_build_payload(gemini_handler: GeminiHandler):
payload = await gemini_handler._build_payload("test prompt", 100)
assert isinstance(payload, genai.types.GenerationConfig)
assert payload.max_output_tokens == 699
assert payload.max_output_tokens == 100
assert payload.temperature == 0.1
assert payload.top_p == 0.9

Expand Down

0 comments on commit 43e1e8e

Please sign in to comment.