diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 37fcefa..466df71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.0.0" + ".": "0.1.0" } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 209b0d6..2388545 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl ## Submitting bug reports and feature requests -The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-AI/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. +The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-ai/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. ## Submitting pull requests @@ -55,8 +55,6 @@ make lint The library's module structure is as follows: - - ### Type hints Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the library. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any. diff --git a/README.md b/README.md index ba7c9d9..cbe23e0 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,7 @@ This version of the library has a minimum Python version of 3.8. ## Getting started -Install the package - - $ pip install launchdarkly-server-sdk-ai - -The provided `TracingHook` can be setup as shown below: - - - -```python -import ldclient - -``` +Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK. ## Learn more diff --git a/ldai/client.py b/ldai/client.py index a1463f8..0dcc6e8 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -1,10 +1,25 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Literal, Optional from ldclient import Context from ldclient.client import LDClient import chevron from ldai.tracker import LDAIConfigTracker -from ldai.types import AIConfig +from dataclasses import dataclass + +@dataclass +class LDMessage(): + role: Literal['system', 'user', 'assistant'] + content: str + +@dataclass +class AIConfigData(): + model: Optional[dict] + prompt: Optional[List[LDMessage]] +class AIConfig(): + def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool): + self.config = config + self.tracker = tracker + self.enabled = enabled class LDAIClient: """The LaunchDarkly AI SDK client object.""" @@ -12,42 +27,41 @@ class LDAIClient: def __init__(self, client: LDClient): self.client = client - def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig: - """Get the value of a model configuration asynchronously. - - Args: - key: The key of the model configuration. - context: The context to evaluate the model configuration in. - default_value: The default value of the model configuration. - variables: Additional variables for the model configuration. + def model_config(self, key: str, context: Context, default_value: AIConfig, variables: Optional[Dict[str, Any]] = None) -> AIConfig: + """ + Get the value of a model configuration asynchronously. - Returns: - The value of the model configuration. + :param key: The key of the model configuration. + :param context: The context to evaluate the model configuration in. + :param default_value: The default value of the model configuration. + :param variables: Additional variables for the model configuration. + :return: The value of the model configuration. """ variation = self.client.variation(key, context, default_value) - all_variables = {'ldctx': context} + all_variables = {} if variables: all_variables.update(variables) + all_variables['ldctx'] = context + + if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + LDMessage( + role=entry['role'], + content=self.__interpolate_template(entry['content'], all_variables) + ) + for entry in variation['prompt'] + ] - variation['prompt'] = [ - { - **entry, - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + enabled = variation.get('_ldMeta',{}).get('enabled', False) + return AIConfig(config=AIConfigData(model=variation['model'], prompt=variation['prompt']), tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)) - - def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: - """Interpolate the template with the given variables. - - Args: - template: The template string. - variables: The variables to interpolate into the template. + def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: + """ + Interpolate the template with the given variables. - Returns: - The interpolated string. + :template: The template string. + :variables: The variables to interpolate into the template. + :return: The interpolated string. """ return chevron.render(template, variables) \ No newline at end of file diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py new file mode 100644 index 0000000..826f78e --- /dev/null +++ b/ldai/testing/test_model_config.py @@ -0,0 +1,105 @@ +import pytest +from ldclient import LDClient, Context, Config +from ldclient.integrations.test_data import TestData +from ldai.client import AIConfig, AIConfigData, LDAIClient, LDMessage +from ldai.tracker import LDAIConfigTracker +from ldclient.testing.builders import * + + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + td.update(td.flag('model-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('multiple-prompt').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}, {'role': 'user', 'content': 'The day is, {{day}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('ctx-interpolation').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + td.update(td.flag('off-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': False, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + return td + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + +@pytest.fixture +def tracker(client: LDClient) -> LDAIConfigTracker: + return LDAIConfigTracker(client, 'abcd', 'model-config', Context.create('user-key')) + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + +def test_model_config_interpolation(ldai_client: LDAIClient, tracker): + context = Context.create('user-key') + default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('model-config', context, default_value, variables) + + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, World!' + assert config.enabled is True + +def test_model_config_no_variables(ldai_client: LDAIClient, tracker): + context = Context.create('user-key') + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) + + config = ldai_client.model_config('model-config', context, default_value, {}) + + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, !' + assert config.enabled is True + +def test_context_interpolation(ldai_client: LDAIClient, tracker): + context = Context.builder('user-key').name("Sandy").build() + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) + + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, Sandy!' + assert config.enabled is True + +def test_model_config_multiple(ldai_client: LDAIClient, tracker): + context = Context.create('user-key') + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) + variables = {'name': 'World', 'day': 'Monday'} + + config = ldai_client.model_config('multiple-prompt', context, default_value, variables) + + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, World!' + assert config.config.prompt[1].content == 'The day is, Monday!' + assert config.enabled is True + +def test_model_config_disabled(ldai_client: LDAIClient, tracker): + context = Context.create('user-key') + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=False) + + config = ldai_client.model_config('off-config', context, default_value, {}) + + assert config.enabled is False diff --git a/ldai/tracker.py b/ldai/tracker.py index 9be9d9c..1c3bbc7 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -1,55 +1,231 @@ +from enum import Enum import time from typing import Dict, Union from ldclient import Context, LDClient -from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage +from dataclasses import dataclass +@dataclass +class TokenMetrics(): + """ + Metrics for token usage in AI operations. + + :param total: Total number of tokens used. + :param input: Number of input tokens. + :param output: Number of output tokens. + """ + total: int + input: int + output: int # type: ignore + +@dataclass +class FeedbackKind(Enum): + """ + Types of feedback that can be provided for AI operations. + """ + Positive = "positive" + Negative = "negative" + +@dataclass +class TokenUsage(): + """ + Tracks token usage for AI operations. + + :param total_tokens: Total number of tokens used. + :param prompt_tokens: Number of tokens in the prompt. + :param completion_tokens: Number of tokens in the completion. + """ + total_tokens: int + prompt_tokens: int + completion_tokens: int + + def to_metrics(self): + """ + Convert token usage to metrics format. + + :return: Dictionary containing token metrics. + """ + return { + 'total': self['total_tokens'], + 'input': self['prompt_tokens'], + 'output': self['completion_tokens'], + } + +@dataclass +class LDOpenAIUsage(): + """ + LaunchDarkly-specific OpenAI usage tracking. + + :param total_tokens: Total number of tokens used. + :param prompt_tokens: Number of tokens in the prompt. + :param completion_tokens: Number of tokens in the completion. + """ + total_tokens: int + prompt_tokens: int + completion_tokens: int + +@dataclass +class OpenAITokenUsage: + """ + Tracks OpenAI-specific token usage. + """ + def __init__(self, data: LDOpenAIUsage): + """ + Initialize OpenAI token usage tracking. + + :param data: OpenAI usage data. + """ + self.total_tokens = data.total_tokens + self.prompt_tokens = data.prompt_tokens + self.completion_tokens = data.completion_tokens + + def to_metrics(self) -> TokenMetrics: + """ + Convert OpenAI token usage to metrics format. + + :return: TokenMetrics object containing usage data. + """ + return TokenMetrics( + total=self.total_tokens, + input=self.prompt_tokens, + output=self.completion_tokens, + ) + +@dataclass +class BedrockTokenUsage: + """ + Tracks AWS Bedrock-specific token usage. + """ + def __init__(self, data: dict): + """ + Initialize Bedrock token usage tracking. + + :param data: Dictionary containing Bedrock usage data. + """ + self.totalTokens = data.get('totalTokens', 0) + self.inputTokens = data.get('inputTokens', 0) + self.outputTokens = data.get('outputTokens', 0) + + def to_metrics(self) -> TokenMetrics: + """ + Convert Bedrock token usage to metrics format. + + :return: TokenMetrics object containing usage data. + """ + return TokenMetrics( + total=self.totalTokens, + input=self.inputTokens, + output=self.outputTokens, + ) + class LDAIConfigTracker: - def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context): + """ + Tracks configuration and usage metrics for LaunchDarkly AI operations. + """ + def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): + """ + Initialize an AI configuration tracker. + + :param ld_client: LaunchDarkly client instance. + :param version_key: Version key for tracking. + :param config_key: Configuration key for tracking. + :param context: Context for evaluation. + """ self.ld_client = ld_client - self.variation_id = variation_id + self.version_key = version_key self.config_key = config_key self.context = context - def get_track_data(self): + def __get_track_data(self): + """ + Get tracking data for events. + + :return: Dictionary containing version and config keys. + """ return { - 'variationId': self.variation_id, + 'versionKey': self.version_key, 'configKey': self.config_key, } def track_duration(self, duration: int) -> None: - self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) + """ + Manually track the duration of an AI operation. + + :param duration: Duration in milliseconds. + """ + self.ld_client.track('$ld:ai:duration:total', self.context, self.__get_track_data(), duration) - def track_duration_of(self, func, *args, **kwargs): + def track_duration_of(self, func): + """ + Automatically track the duration of an AI operation. + + :param func: Function to track. + :return: Result of the tracked function. + """ start_time = time.time() - result = func(*args, **kwargs) + result = func() end_time = time.time() duration = int((end_time - start_time) * 1000) # duration in milliseconds self.track_duration(duration) return result - def track_error(self, error: int) -> None: - self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error) - def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: + """ + Track user feedback for an AI operation. + + :param feedback: Dictionary containing feedback kind. + """ if feedback['kind'] == FeedbackKind.Positive: - self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) + self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.__get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: - self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) + self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.__get_track_data(), 1) + + def track_success(self) -> None: + """ + Track a successful AI generation. + """ + self.ld_client.track('$ld:ai:generation', self.context, self.__get_track_data(), 1) - def track_generation(self, generation: int) -> None: - self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation) + def track_openai(self, func): + """ + Track OpenAI-specific operations. - def track_openai(self, func, *args, **kwargs): - result = self.track_duration_of(func, *args, **kwargs) + :param func: Function to track. + :return: Result of the tracked function. + """ + result = self.track_duration_of(func) if result.usage: self.track_tokens(OpenAITokenUsage(result.usage)) return result - def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: + def track_bedrock_converse(self, res: dict) -> dict: + """ + Track AWS Bedrock conversation operations. + + :param res: Response dictionary from Bedrock. + :return: The original response dictionary. + """ + status_code = res.get('$metadata', {}).get('httpStatusCode', 0) + if status_code == 200: + self.track_success() + elif status_code >= 400: + # Potentially add error tracking in the future. + pass + if res.get('metrics', {}).get('latencyMs'): + self.track_duration(res['metrics']['latencyMs']) + if res.get('usage'): + self.track_tokens(BedrockTokenUsage(res['usage'])) + return res + + def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: + """ + Track token usage metrics. + + :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources. + """ token_metrics = tokens.to_metrics() - if token_metrics['total'] > 0: - self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) - if token_metrics['input'] > 0: - self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input']) - if token_metrics['output'] > 0: - self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output']) \ No newline at end of file + if token_metrics.total > 0: + self.ld_client.track('$ld:ai:tokens:total', self.context, self.__get_track_data(), token_metrics.total) + if token_metrics.input > 0: + self.ld_client.track('$ld:ai:tokens:input', self.context, self.__get_track_data(), token_metrics.input) + if token_metrics.output > 0: + self.ld_client.track('$ld:ai:tokens:output', self.context, self.__get_track_data(), token_metrics.output) diff --git a/ldai/types.py b/ldai/types.py deleted file mode 100644 index efa8300..0000000 --- a/ldai/types.py +++ /dev/null @@ -1,85 +0,0 @@ -from enum import Enum -from typing import Callable -from dataclasses import dataclass - -@dataclass -class TokenMetrics(): - total: int - input: int - output: int # type: ignore - -@dataclass - -class AIConfigData(): - config: dict - prompt: any - _ldMeta: dict - -class AITracker(): - track_duration: Callable[..., None] - track_tokens: Callable[..., None] - track_error: Callable[..., None] - track_generation: Callable[..., None] - track_feedback: Callable[..., None] - -class AIConfig(): - def __init__(self, config: AIConfigData, tracker: AITracker): - self.config = config - self.tracker = tracker - -class FeedbackKind(Enum): - Positive = "positive" - Negative = "negative" - -@dataclass - -class TokenUsage(): - total_tokens: int - prompt_tokens: int - completion_tokens: int - - def to_metrics(self): - return { - 'total': self['total_tokens'], - 'input': self['prompt_tokens'], - 'output': self['completion_tokens'], - } - -class OpenAITokenUsage: - def __init__(self, data: any): - self.total_tokens = data.total_tokens - self.prompt_tokens = data.prompt_tokens - self.completion_tokens = data.completion_tokens - - def to_metrics(self) -> TokenMetrics: - return { - 'total': self.total_tokens, - 'input': self.prompt_tokens, - 'output': self.completion_tokens, - } - -class UnderscoreTokenUsage: - def __init__(self, data: dict): - self.total_tokens = data.get('total_tokens', 0) - self.prompt_tokens = data.get('prompt_tokens', 0) - self.completion_tokens = data.get('completion_tokens', 0) - - def to_metrics(self) -> TokenMetrics: - return { - 'total': self.total_tokens, - 'input': self.prompt_tokens, - 'output': self.completion_tokens, - } - -class BedrockTokenUsage: - def __init__(self, data: dict): - self.totalTokens = data.get('totalTokens', 0) - self.inputTokens = data.get('inputTokens', 0) - self.outputTokens = data.get('outputTokens', 0) - - def to_metrics(self) -> TokenMetrics: - return { - 'total': self.totalTokens, - 'input': self.inputTokens, - 'output': self.outputTokens, - } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3b527a4..5829046 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [tool.poetry] name = "launchdarkly-server-sdk-ai" -version = "0.0.1" +version = "0.1.0" description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0" readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/server-side/python-ai" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" repository = "https://github.com/launchdarkly/python-server-sdk-ai" documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" classifiers = [