From 780fa159b7dac46d0ebe2045929ba6d1d0fc3f3b Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Wed, 6 Nov 2024 13:22:32 -0500 Subject: [PATCH 01/21] align Python with most recent TypeScript changes --- ldai/client.py | 3 ++- ldai/tracker.py | 25 +++++++++++++++++-------- ldai/types.py | 7 ++++--- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/ldai/client.py b/ldai/client.py index a1463f8..3863d9d 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -38,7 +38,8 @@ def model_config(self, key: str, context: Context, default_value: str, variables for entry in variation['prompt'] ] - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context)) + enabled = ['_ldMeta'].get('enabled') + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """Interpolate the template with the given variables. diff --git a/ldai/tracker.py b/ldai/tracker.py index 9be9d9c..d0f165d 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -4,15 +4,15 @@ from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage class LDAIConfigTracker: - def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context): + def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): self.ld_client = ld_client - self.variation_id = variation_id + self.version_key = version_key self.config_key = config_key self.context = context def get_track_data(self): return { - 'variationId': self.variation_id, + 'versionKey': self.version_key, 'configKey': self.config_key, } @@ -27,17 +27,14 @@ def track_duration_of(self, func, *args, **kwargs): self.track_duration(duration) return result - def track_error(self, error: int) -> None: - self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error) - def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: if feedback['kind'] == FeedbackKind.Positive: self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) - def track_generation(self, generation: int) -> None: - self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation) + def track_success(self) -> None: + self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) def track_openai(self, func, *args, **kwargs): result = self.track_duration_of(func, *args, **kwargs) @@ -45,6 +42,18 @@ def track_openai(self, func, *args, **kwargs): self.track_tokens(OpenAITokenUsage(result.usage)) return result + def track_bedrock_converse(self, res: dict) -> dict: + if res.get('$metadata', {}).get('httpStatusCode') == 200: + self.track_success() + elif res.get('$metadata', {}).get('httpStatusCode') and res['$metadata']['httpStatusCode'] >= 400: + # Potentially add error tracking in the future. + pass + if res.get('metrics', {}).get('latencyMs'): + self.track_duration(res['metrics']['latencyMs']) + if res.get('usage'): + self.track_tokens(BedrockTokenUsage(res['usage'])) + return res + def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() if token_metrics['total'] > 0: diff --git a/ldai/types.py b/ldai/types.py index efa8300..2b2d3d3 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -23,16 +23,16 @@ class AITracker(): track_feedback: Callable[..., None] class AIConfig(): - def __init__(self, config: AIConfigData, tracker: AITracker): + def __init__(self, config: AIConfigData, tracker: AITracker, enabled: bool): self.config = config self.tracker = tracker + self.enabled = enabled +@dataclass class FeedbackKind(Enum): Positive = "positive" Negative = "negative" -@dataclass - class TokenUsage(): total_tokens: int prompt_tokens: int @@ -45,6 +45,7 @@ def to_metrics(self): 'output': self['completion_tokens'], } +@dataclass class OpenAITokenUsage: def __init__(self, data: any): self.total_tokens = data.total_tokens From a1f949050c31c2dc6b282d974c7af7fb50e936c4 Mon Sep 17 00:00:00 2001 From: Dan O'Brien Date: Wed, 6 Nov 2024 15:36:26 -0500 Subject: [PATCH 02/21] Update ldai/client.py Co-authored-by: Matthew M. Keeler --- ldai/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldai/client.py b/ldai/client.py index 3863d9d..ef8e99b 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -38,7 +38,7 @@ def model_config(self, key: str, context: Context, default_value: str, variables for entry in variation['prompt'] ] - enabled = ['_ldMeta'].get('enabled') + enabled = variation['_ldMeta'].get('enabled') return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: From 7be4d270f690d537ec66c6aa55da27b5aaec62e2 Mon Sep 17 00:00:00 2001 From: Dan O'Brien Date: Wed, 6 Nov 2024 15:36:46 -0500 Subject: [PATCH 03/21] Update ldai/tracker.py Co-authored-by: Matthew M. Keeler --- ldai/tracker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index d0f165d..1888626 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -43,9 +43,10 @@ def track_openai(self, func, *args, **kwargs): return result def track_bedrock_converse(self, res: dict) -> dict: - if res.get('$metadata', {}).get('httpStatusCode') == 200: + status_code = res.get('$metadata', {}).get('httpStatusCode', 0) + if status_code == 200: self.track_success() - elif res.get('$metadata', {}).get('httpStatusCode') and res['$metadata']['httpStatusCode'] >= 400: + elif status_code >= 400: # Potentially add error tracking in the future. pass if res.get('metrics', {}).get('latencyMs'): From d53c63c8ff88a4c67a4acb2801eecefd04a370ab Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Wed, 6 Nov 2024 16:14:55 -0500 Subject: [PATCH 04/21] review feedback --- .release-please-manifest.json | 2 +- CONTRIBUTING.md | 4 +-- ldai/client.py | 52 +++++++++++++++++------------------ ldai/tracker.py | 8 +++--- ldai/types.py | 1 - pyproject.toml | 2 +- 6 files changed, 32 insertions(+), 37 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 37fcefa..466df71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.0.0" + ".": "0.1.0" } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 209b0d6..2388545 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl ## Submitting bug reports and feature requests -The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-AI/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. +The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-ai/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. ## Submitting pull requests @@ -55,8 +55,6 @@ make lint The library's module structure is as follows: - - ### Type hints Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the library. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any. diff --git a/ldai/client.py b/ldai/client.py index ef8e99b..e90ce9c 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -12,43 +12,41 @@ class LDAIClient: def __init__(self, client: LDClient): self.client = client - def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig: - """Get the value of a model configuration asynchronously. - - Args: - key: The key of the model configuration. - context: The context to evaluate the model configuration in. - default_value: The default value of the model configuration. - variables: Additional variables for the model configuration. + def model_config(self, key: str, context: Context, default_value: AIConfig, variables: Optional[Dict[str, Any]] = None) -> AIConfig: + """ + Get the value of a model configuration asynchronously. - Returns: - The value of the model configuration. + :param key: The key of the model configuration. + :param context: The context to evaluate the model configuration in. + :param default_value: The default value of the model configuration. + :param variables: Additional variables for the model configuration. + :return: The value of the model configuration. """ variation = self.client.variation(key, context, default_value) - all_variables = {'ldctx': context} + all_variables = {} if variables: all_variables.update(variables) + all_variables['ldctx'] = context - variation['prompt'] = [ - { - **entry, - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + { + 'role': entry['role'], + 'content': self.interpolate_template(entry['content'], all_variables) + } + for entry in variation['prompt'] + ] - enabled = variation['_ldMeta'].get('enabled') - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['versionKey'], key, context, bool(enabled))) + enabled = variation.get('_ldMeta',{}).get('enabled', False) + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context, bool(enabled))) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: - """Interpolate the template with the given variables. - - Args: - template: The template string. - variables: The variables to interpolate into the template. + """ + Interpolate the template with the given variables. - Returns: - The interpolated string. + :template: The template string. + :variables: The variables to interpolate into the template. + :return: The interpolated string. """ return chevron.render(template, variables) \ No newline at end of file diff --git a/ldai/tracker.py b/ldai/tracker.py index 1888626..e2c8bbc 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -19,9 +19,9 @@ def get_track_data(self): def track_duration(self, duration: int) -> None: self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) - def track_duration_of(self, func, *args, **kwargs): + def track_duration_of(self, func): start_time = time.time() - result = func(*args, **kwargs) + result = func() end_time = time.time() duration = int((end_time - start_time) * 1000) # duration in milliseconds self.track_duration(duration) @@ -36,8 +36,8 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: def track_success(self) -> None: self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) - def track_openai(self, func, *args, **kwargs): - result = self.track_duration_of(func, *args, **kwargs) + def track_openai(self, func): + result = self.track_duration_of(func) if result.usage: self.track_tokens(OpenAITokenUsage(result.usage)) return result diff --git a/ldai/types.py b/ldai/types.py index 2b2d3d3..d4b9f39 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -9,7 +9,6 @@ class TokenMetrics(): output: int # type: ignore @dataclass - class AIConfigData(): config: dict prompt: any diff --git a/pyproject.toml b/pyproject.toml index 87d77f0..38d5243 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "launchdarkly-server-sdk-ai" -version = "0.0.1" +version = "0.1.0" description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0" From 24e46808f6c3c9a17f105abb0ccd9c83ce6519de Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 07:33:26 -0500 Subject: [PATCH 05/21] add testing --- ldai/testing/test_model_config.py | 101 ++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 ldai/testing/test_model_config.py diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py new file mode 100644 index 0000000..939c983 --- /dev/null +++ b/ldai/testing/test_model_config.py @@ -0,0 +1,101 @@ +import pytest +from ldclient import LDClient, Context, Config +from ldclient.integrations.test_data import TestData +from ldai.types import AIConfig +from ldai.client import LDAIClient +from ldclient.testing.builders import * + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + td.update(td.flag('model-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('multiple-prompt').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}, {'role': 'user', 'content': 'The day is, {{day}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, "green").variation_for_all(0)) + + td.update(td.flag('ctx-interpolation').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + td.update(td.flag('off-config').variations({ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': False, 'versionKey': 'abcd'} + }).variation_for_all(0)) + + return td + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + +def test_model_config_interpolation(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={ + 'model': { 'modelId': 'fakeModel'}, + 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], + '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} + }, tracker=None, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('model-config', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, World!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_model_config_no_variables(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + + config = ldai_client.model_config('model-config', context, default_value, {}) + + assert config.config['prompt'][0]['content'] == 'Hello, !' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_context_interpolation(ldai_client: LDAIClient): + context = Context.builder('user-key').name("Sandy").build() + default_value = AIConfig(config={}, tracker=None, enabled=True) + variables = {'name': 'World'} + + config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, Sandy!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' + +def test_model_config_disabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + + config = ldai_client.model_config('off-config', context, default_value, {}) + + assert config.enabled is False + assert config.tracker.version_key == 'abcd' + +def test_model_config_multiple(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config={}, tracker=None, enabled=True) + variables = {'name': 'World', 'day': 'Monday'} + + config = ldai_client.model_config('multiple-prompt', context, default_value, variables) + + assert config.config['prompt'][0]['content'] == 'Hello, World!' + assert config.config['prompt'][1]['content'] == 'The day is, Monday!' + assert config.enabled is True + assert config.tracker.version_key == 'abcd' \ No newline at end of file From 5d58b33a1b9466f4e6dfa4997fae80a3aa32bd18 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 07:33:56 -0500 Subject: [PATCH 06/21] README cleanup client changes --- README.md | 13 +------------ ldai/client.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index ba7c9d9..cbe23e0 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,7 @@ This version of the library has a minimum Python version of 3.8. ## Getting started -Install the package - - $ pip install launchdarkly-server-sdk-ai - -The provided `TracingHook` can be setup as shown below: - - - -```python -import ldclient - -``` +Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK. ## Learn more diff --git a/ldai/client.py b/ldai/client.py index e90ce9c..1dbdfb6 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -28,18 +28,18 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari if variables: all_variables.update(variables) all_variables['ldctx'] = context - - if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): - variation['prompt'] = [ - { - 'role': entry['role'], - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + print(variation) + #if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + { + 'role': entry['role'], + 'content': self.interpolate_template(entry['content'], all_variables) + } + for entry in variation['prompt'] + ] enabled = variation.get('_ldMeta',{}).get('enabled', False) - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context, bool(enabled))) + return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """ From de02914791f342ebf50a9918d65e7e0196c95280 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:08:42 -0500 Subject: [PATCH 07/21] add back missing dataclass remove UnderscoreToken class to match TypeScript --- ldai/tracker.py | 4 ++-- ldai/types.py | 14 +------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index e2c8bbc..f7bfc51 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -1,7 +1,7 @@ import time from typing import Dict, Union from ldclient import Context, LDClient -from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage +from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage class LDAIConfigTracker: def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): @@ -55,7 +55,7 @@ def track_bedrock_converse(self, res: dict) -> dict: self.track_tokens(BedrockTokenUsage(res['usage'])) return res - def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None: + def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() if token_metrics['total'] > 0: self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) diff --git a/ldai/types.py b/ldai/types.py index d4b9f39..8c01094 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -32,6 +32,7 @@ class FeedbackKind(Enum): Positive = "positive" Negative = "negative" +@dataclass class TokenUsage(): total_tokens: int prompt_tokens: int @@ -58,19 +59,6 @@ def to_metrics(self) -> TokenMetrics: 'output': self.completion_tokens, } -class UnderscoreTokenUsage: - def __init__(self, data: dict): - self.total_tokens = data.get('total_tokens', 0) - self.prompt_tokens = data.get('prompt_tokens', 0) - self.completion_tokens = data.get('completion_tokens', 0) - - def to_metrics(self) -> TokenMetrics: - return { - 'total': self.total_tokens, - 'input': self.prompt_tokens, - 'output': self.completion_tokens, - } - class BedrockTokenUsage: def __init__(self, data: dict): self.totalTokens = data.get('totalTokens', 0) From 60f7353544877eee5fb298a2ca4e2ef832165b5a Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:09:15 -0500 Subject: [PATCH 08/21] fix up types --- ldai/types.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ldai/types.py b/ldai/types.py index 8c01094..f51ae56 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -59,6 +59,7 @@ def to_metrics(self) -> TokenMetrics: 'output': self.completion_tokens, } +@dataclass class BedrockTokenUsage: def __init__(self, data: dict): self.totalTokens = data.get('totalTokens', 0) From 005500805f47a0d36d60ae3689969f4cf044fc73 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:10:10 -0500 Subject: [PATCH 09/21] fix project url --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 38d5243..e90bcc0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "LaunchDarkly SDK for AI" authors = ["LaunchDarkly "] license = "Apache-2.0" readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/server-side/python-ai" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" repository = "https://github.com/launchdarkly/python-server-sdk-ai" documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" classifiers = [ From d97db5b27db638e1f9ad4fa1a70b93aaba1089bb Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 09:33:11 -0500 Subject: [PATCH 10/21] Add LDMessage type --- ldai/types.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ldai/types.py b/ldai/types.py index f51ae56..ae85f52 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Callable +from typing import Callable, List, Literal from dataclasses import dataclass @dataclass @@ -8,10 +8,15 @@ class TokenMetrics(): input: int output: int # type: ignore +@dataclass +class LDMessage(): + role: Literal['system', 'user', 'assistant'] + content: str + @dataclass class AIConfigData(): - config: dict - prompt: any + model: dict + prompt: List[LDMessage] _ldMeta: dict class AITracker(): From 7c02cf07588a1575e61a1055aefe7eb38ee3dee2 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 11:38:43 -0500 Subject: [PATCH 11/21] fixing up types --- ldai/client.py | 16 +++++------ ldai/testing/test_model_config.py | 48 ++++++++++++++++--------------- ldai/tracker.py | 6 ++-- ldai/types.py | 45 +++++++++++++++-------------- 4 files changed, 59 insertions(+), 56 deletions(-) diff --git a/ldai/client.py b/ldai/client.py index 1dbdfb6..b26270c 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -29,14 +29,14 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari all_variables.update(variables) all_variables['ldctx'] = context print(variation) - #if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): - variation['prompt'] = [ - { - 'role': entry['role'], - 'content': self.interpolate_template(entry['content'], all_variables) - } - for entry in variation['prompt'] - ] + if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): + variation['prompt'] = [ + { + 'role': entry['role'], + 'content': self.interpolate_template(entry['content'], all_variables) + } + for entry in variation['prompt'] + ] enabled = variation.get('_ldMeta',{}).get('enabled', False) return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index 939c983..f2cb92b 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -1,10 +1,13 @@ import pytest from ldclient import LDClient, Context, Config from ldclient.integrations.test_data import TestData -from ldai.types import AIConfig +from ldai.types import AIConfig, AIConfigData, LDMessage from ldai.client import LDAIClient +from ldai.tracker import LDAIConfigTracker from ldclient.testing.builders import * + + @pytest.fixture def td() -> TestData: td = TestData.data_source() @@ -45,57 +48,56 @@ def ldai_client(client: LDClient) -> LDAIClient: def test_model_config_interpolation(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config={ - 'model': { 'modelId': 'fakeModel'}, - 'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}], - '_ldMeta': {'enabled': True, 'versionKey': 'abcd'} - }, tracker=None, enabled=True) + default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('model-config', context, default_value, variables) - - assert config.config['prompt'][0]['content'] == 'Hello, World!' + + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, World!' assert config.enabled is True - assert config.tracker.version_key == 'abcd' def test_model_config_no_variables(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config={}, tracker=None, enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) config = ldai_client.model_config('model-config', context, default_value, {}) - assert config.config['prompt'][0]['content'] == 'Hello, !' + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, !' assert config.enabled is True - assert config.tracker.version_key == 'abcd' def test_context_interpolation(ldai_client: LDAIClient): context = Context.builder('user-key').name("Sandy").build() - default_value = AIConfig(config={}, tracker=None, enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) - assert config.config['prompt'][0]['content'] == 'Hello, Sandy!' + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, Sandy!' assert config.enabled is True - assert config.tracker.version_key == 'abcd' - + def test_model_config_disabled(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config={}, tracker=None, enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) config = ldai_client.model_config('off-config', context, default_value, {}) assert config.enabled is False - assert config.tracker.version_key == 'abcd' def test_model_config_multiple(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config={}, tracker=None, enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) variables = {'name': 'World', 'day': 'Monday'} config = ldai_client.model_config('multiple-prompt', context, default_value, variables) - assert config.config['prompt'][0]['content'] == 'Hello, World!' - assert config.config['prompt'][1]['content'] == 'The day is, Monday!' - assert config.enabled is True - assert config.tracker.version_key == 'abcd' \ No newline at end of file + assert config.config.prompt is not None + assert len(config.config.prompt) > 0 + assert config.config.prompt[0].content == 'Hello, World!' + assert config.config.prompt[1].content == 'The day is, Monday!' + assert config.enabled is True \ No newline at end of file diff --git a/ldai/tracker.py b/ldai/tracker.py index f7bfc51..15e5786 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -58,8 +58,8 @@ def track_bedrock_converse(self, res: dict) -> dict: def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() if token_metrics['total'] > 0: - self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total']) + self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics.total) if token_metrics['input'] > 0: - self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input']) + self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics.input) if token_metrics['output'] > 0: - self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output']) \ No newline at end of file + self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output) \ No newline at end of file diff --git a/ldai/types.py b/ldai/types.py index ae85f52..46aa8e4 100644 --- a/ldai/types.py +++ b/ldai/types.py @@ -1,7 +1,9 @@ from enum import Enum -from typing import Callable, List, Literal +from typing import Any, Callable, List, Literal, Optional from dataclasses import dataclass +from ldai.tracker import LDAIConfigTracker + @dataclass class TokenMetrics(): total: int @@ -15,19 +17,12 @@ class LDMessage(): @dataclass class AIConfigData(): - model: dict - prompt: List[LDMessage] + model: Optional[dict] + prompt: Optional[List[LDMessage]] _ldMeta: dict -class AITracker(): - track_duration: Callable[..., None] - track_tokens: Callable[..., None] - track_error: Callable[..., None] - track_generation: Callable[..., None] - track_feedback: Callable[..., None] - class AIConfig(): - def __init__(self, config: AIConfigData, tracker: AITracker, enabled: bool): + def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool): self.config = config self.tracker = tracker self.enabled = enabled @@ -50,19 +45,25 @@ def to_metrics(self): 'output': self['completion_tokens'], } +@dataclass +class LDOpenAIUsage(): + total_tokens: int + prompt_tokens: int + completion_tokens: int + @dataclass class OpenAITokenUsage: - def __init__(self, data: any): + def __init__(self, data: LDOpenAIUsage): self.total_tokens = data.total_tokens self.prompt_tokens = data.prompt_tokens self.completion_tokens = data.completion_tokens def to_metrics(self) -> TokenMetrics: - return { - 'total': self.total_tokens, - 'input': self.prompt_tokens, - 'output': self.completion_tokens, - } + return TokenMetrics( + total=self.total_tokens, + input=self.prompt_tokens, + output=self.completion_tokens, + ) @dataclass class BedrockTokenUsage: @@ -72,8 +73,8 @@ def __init__(self, data: dict): self.outputTokens = data.get('outputTokens', 0) def to_metrics(self) -> TokenMetrics: - return { - 'total': self.totalTokens, - 'input': self.inputTokens, - 'output': self.outputTokens, - } \ No newline at end of file + return TokenMetrics( + total=self.totalTokens, + input=self.inputTokens, + output=self.outputTokens, + ) \ No newline at end of file From 5cae6162d6b5c0940b87c4b4ca8d733cf9b18708 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 11:41:14 -0500 Subject: [PATCH 12/21] whitespace cleanup --- ldai/testing/test_model_config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index f2cb92b..8593087 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -7,7 +7,6 @@ from ldclient.testing.builders import * - @pytest.fixture def td() -> TestData: td = TestData.data_source() @@ -52,7 +51,7 @@ def test_model_config_interpolation(ldai_client: LDAIClient): variables = {'name': 'World'} config = ldai_client.model_config('model-config', context, default_value, variables) - + assert config.config.prompt is not None assert len(config.config.prompt) > 0 assert config.config.prompt[0].content == 'Hello, World!' From d1257fc664140a4c82ec7f95343f238404ef4fa7 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 13:24:49 -0500 Subject: [PATCH 13/21] fix tests --- ldai/client.py | 29 ++++++++--- ldai/testing/test_model_config.py | 33 +++++++------ ldai/tracker.py | 61 ++++++++++++++++++++++- ldai/types.py | 80 ------------------------------- 4 files changed, 100 insertions(+), 103 deletions(-) delete mode 100644 ldai/types.py diff --git a/ldai/client.py b/ldai/client.py index b26270c..64ea7ea 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -1,10 +1,25 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Literal, Optional from ldclient import Context from ldclient.client import LDClient import chevron from ldai.tracker import LDAIConfigTracker -from ldai.types import AIConfig +from dataclasses import dataclass + +@dataclass +class LDMessage(): + role: Literal['system', 'user', 'assistant'] + content: str + +@dataclass +class AIConfigData(): + model: Optional[dict] + prompt: Optional[List[LDMessage]] +class AIConfig(): + def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool): + self.config = config + self.tracker = tracker + self.enabled = enabled class LDAIClient: """The LaunchDarkly AI SDK client object.""" @@ -31,15 +46,15 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari print(variation) if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): variation['prompt'] = [ - { - 'role': entry['role'], - 'content': self.interpolate_template(entry['content'], all_variables) - } + LDMessage( + role=entry['role'], + content=self.interpolate_template(entry['content'], all_variables) + ) for entry in variation['prompt'] ] enabled = variation.get('_ldMeta',{}).get('enabled', False) - return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) + return AIConfig(config=AIConfigData(model=variation['model'], prompt=variation['prompt']), tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """ diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index 8593087..ec53ec7 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -1,8 +1,7 @@ import pytest from ldclient import LDClient, Context, Config from ldclient.integrations.test_data import TestData -from ldai.types import AIConfig, AIConfigData, LDMessage -from ldai.client import LDAIClient +from ldai.client import AIConfig, AIConfigData, LDAIClient, LDMessage from ldai.tracker import LDAIConfigTracker from ldclient.testing.builders import * @@ -41,13 +40,17 @@ def client(td: TestData) -> LDClient: config = Config('sdk-key', update_processor_class=td, send_events=False) return LDClient(config=config) +@pytest.fixture +def tracker(td: TestData) -> LDAIConfigTracker: + return LDAIConfigTracker(client(td), 'abcd', 'model-config', Context.create('user-key')) + @pytest.fixture def ldai_client(client: LDClient) -> LDAIClient: return LDAIClient(client) def test_model_config_interpolation(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) + default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker(td()), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('model-config', context, default_value, variables) @@ -59,7 +62,7 @@ def test_model_config_interpolation(ldai_client: LDAIClient): def test_model_config_no_variables(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) config = ldai_client.model_config('model-config', context, default_value, {}) @@ -70,7 +73,7 @@ def test_model_config_no_variables(ldai_client: LDAIClient): def test_context_interpolation(ldai_client: LDAIClient): context = Context.builder('user-key').name("Sandy").build() - default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) @@ -80,17 +83,9 @@ def test_context_interpolation(ldai_client: LDAIClient): assert config.config.prompt[0].content == 'Hello, Sandy!' assert config.enabled is True -def test_model_config_disabled(ldai_client: LDAIClient): - context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) - - config = ldai_client.model_config('off-config', context, default_value, {}) - - assert config.enabled is False - def test_model_config_multiple(ldai_client: LDAIClient): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[], _ldMeta={'enabled': True, 'versionKey': 'abcd'}), tracker=LDAIConfigTracker(), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) variables = {'name': 'World', 'day': 'Monday'} config = ldai_client.model_config('multiple-prompt', context, default_value, variables) @@ -99,4 +94,12 @@ def test_model_config_multiple(ldai_client: LDAIClient): assert len(config.config.prompt) > 0 assert config.config.prompt[0].content == 'Hello, World!' assert config.config.prompt[1].content == 'The day is, Monday!' - assert config.enabled is True \ No newline at end of file + assert config.enabled is True + +def test_model_config_disabled(ldai_client: LDAIClient): + context = Context.create('user-key') + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=False) + + config = ldai_client.model_config('off-config', context, default_value, {}) + + assert config.enabled is False \ No newline at end of file diff --git a/ldai/tracker.py b/ldai/tracker.py index 15e5786..9b1db64 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -1,8 +1,67 @@ +from enum import Enum import time from typing import Dict, Union from ldclient import Context, LDClient -from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage +from dataclasses import dataclass +@dataclass +class TokenMetrics(): + total: int + input: int + output: int # type: ignore + +@dataclass +class FeedbackKind(Enum): + Positive = "positive" + Negative = "negative" + +@dataclass +class TokenUsage(): + total_tokens: int + prompt_tokens: int + completion_tokens: int + + def to_metrics(self): + return { + 'total': self['total_tokens'], + 'input': self['prompt_tokens'], + 'output': self['completion_tokens'], + } + +@dataclass +class LDOpenAIUsage(): + total_tokens: int + prompt_tokens: int + completion_tokens: int + +@dataclass +class OpenAITokenUsage: + def __init__(self, data: LDOpenAIUsage): + self.total_tokens = data.total_tokens + self.prompt_tokens = data.prompt_tokens + self.completion_tokens = data.completion_tokens + + def to_metrics(self) -> TokenMetrics: + return TokenMetrics( + total=self.total_tokens, + input=self.prompt_tokens, + output=self.completion_tokens, + ) + +@dataclass +class BedrockTokenUsage: + def __init__(self, data: dict): + self.totalTokens = data.get('totalTokens', 0) + self.inputTokens = data.get('inputTokens', 0) + self.outputTokens = data.get('outputTokens', 0) + + def to_metrics(self) -> TokenMetrics: + return TokenMetrics( + total=self.totalTokens, + input=self.inputTokens, + output=self.outputTokens, + ) + class LDAIConfigTracker: def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): self.ld_client = ld_client diff --git a/ldai/types.py b/ldai/types.py deleted file mode 100644 index 46aa8e4..0000000 --- a/ldai/types.py +++ /dev/null @@ -1,80 +0,0 @@ -from enum import Enum -from typing import Any, Callable, List, Literal, Optional -from dataclasses import dataclass - -from ldai.tracker import LDAIConfigTracker - -@dataclass -class TokenMetrics(): - total: int - input: int - output: int # type: ignore - -@dataclass -class LDMessage(): - role: Literal['system', 'user', 'assistant'] - content: str - -@dataclass -class AIConfigData(): - model: Optional[dict] - prompt: Optional[List[LDMessage]] - _ldMeta: dict - -class AIConfig(): - def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool): - self.config = config - self.tracker = tracker - self.enabled = enabled - -@dataclass -class FeedbackKind(Enum): - Positive = "positive" - Negative = "negative" - -@dataclass -class TokenUsage(): - total_tokens: int - prompt_tokens: int - completion_tokens: int - - def to_metrics(self): - return { - 'total': self['total_tokens'], - 'input': self['prompt_tokens'], - 'output': self['completion_tokens'], - } - -@dataclass -class LDOpenAIUsage(): - total_tokens: int - prompt_tokens: int - completion_tokens: int - -@dataclass -class OpenAITokenUsage: - def __init__(self, data: LDOpenAIUsage): - self.total_tokens = data.total_tokens - self.prompt_tokens = data.prompt_tokens - self.completion_tokens = data.completion_tokens - - def to_metrics(self) -> TokenMetrics: - return TokenMetrics( - total=self.total_tokens, - input=self.prompt_tokens, - output=self.completion_tokens, - ) - -@dataclass -class BedrockTokenUsage: - def __init__(self, data: dict): - self.totalTokens = data.get('totalTokens', 0) - self.inputTokens = data.get('inputTokens', 0) - self.outputTokens = data.get('outputTokens', 0) - - def to_metrics(self) -> TokenMetrics: - return TokenMetrics( - total=self.totalTokens, - input=self.inputTokens, - output=self.outputTokens, - ) \ No newline at end of file From 742c38527059866fd1c7190c2f9260e02c5a2ad6 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 14:04:10 -0500 Subject: [PATCH 14/21] remove eronous print fix access of class --- ldai/client.py | 2 +- ldai/tracker.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ldai/client.py b/ldai/client.py index 64ea7ea..fdb3500 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -43,7 +43,7 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari if variables: all_variables.update(variables) all_variables['ldctx'] = context - print(variation) + if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']): variation['prompt'] = [ LDMessage( diff --git a/ldai/tracker.py b/ldai/tracker.py index 9b1db64..62ee41e 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -116,9 +116,9 @@ def track_bedrock_converse(self, res: dict) -> dict: def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: token_metrics = tokens.to_metrics() - if token_metrics['total'] > 0: + if token_metrics.total > 0: self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics.total) - if token_metrics['input'] > 0: + if token_metrics.input > 0: self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics.input) - if token_metrics['output'] > 0: + if token_metrics.output > 0: self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output) \ No newline at end of file From 59928eafa269ef040e2a323ceb639d796f41e6ac Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 14:13:05 -0500 Subject: [PATCH 15/21] fix: Fixtures are not meant to be called directly, --- ldai/testing/test_model_config.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index ec53ec7..65a80a3 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -48,9 +48,9 @@ def tracker(td: TestData) -> LDAIConfigTracker: def ldai_client(client: LDClient) -> LDAIClient: return LDAIClient(client) -def test_model_config_interpolation(ldai_client: LDAIClient): +def test_model_config_interpolation(ldai_client: LDAIClient, td): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker(td()), enabled=True) + default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker(td), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('model-config', context, default_value, variables) @@ -60,9 +60,9 @@ def test_model_config_interpolation(ldai_client: LDAIClient): assert config.config.prompt[0].content == 'Hello, World!' assert config.enabled is True -def test_model_config_no_variables(ldai_client: LDAIClient): +def test_model_config_no_variables(ldai_client: LDAIClient, td): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) config = ldai_client.model_config('model-config', context, default_value, {}) @@ -71,9 +71,9 @@ def test_model_config_no_variables(ldai_client: LDAIClient): assert config.config.prompt[0].content == 'Hello, !' assert config.enabled is True -def test_context_interpolation(ldai_client: LDAIClient): +def test_context_interpolation(ldai_client: LDAIClient, td): context = Context.builder('user-key').name("Sandy").build() - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) @@ -83,9 +83,9 @@ def test_context_interpolation(ldai_client: LDAIClient): assert config.config.prompt[0].content == 'Hello, Sandy!' assert config.enabled is True -def test_model_config_multiple(ldai_client: LDAIClient): +def test_model_config_multiple(ldai_client: LDAIClient, td): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) variables = {'name': 'World', 'day': 'Monday'} config = ldai_client.model_config('multiple-prompt', context, default_value, variables) @@ -96,9 +96,9 @@ def test_model_config_multiple(ldai_client: LDAIClient): assert config.config.prompt[1].content == 'The day is, Monday!' assert config.enabled is True -def test_model_config_disabled(ldai_client: LDAIClient): +def test_model_config_disabled(ldai_client: LDAIClient, td): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=False) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=False) config = ldai_client.model_config('off-config', context, default_value, {}) From 6d5d8fb58cb1eb7babbf5da776bae2bc77657827 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 14:16:02 -0500 Subject: [PATCH 16/21] fix: More fixture changes --- ldai/testing/test_model_config.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index 65a80a3..977baf3 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -41,16 +41,16 @@ def client(td: TestData) -> LDClient: return LDClient(config=config) @pytest.fixture -def tracker(td: TestData) -> LDAIConfigTracker: - return LDAIConfigTracker(client(td), 'abcd', 'model-config', Context.create('user-key')) +def tracker() -> LDAIConfigTracker: + return LDAIConfigTracker(client, 'abcd', 'model-config', Context.create('user-key')) @pytest.fixture def ldai_client(client: LDClient) -> LDAIClient: return LDAIClient(client) -def test_model_config_interpolation(ldai_client: LDAIClient, td): +def test_model_config_interpolation(ldai_client: LDAIClient, tracker): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker(td), enabled=True) + default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker, enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('model-config', context, default_value, variables) @@ -60,9 +60,9 @@ def test_model_config_interpolation(ldai_client: LDAIClient, td): assert config.config.prompt[0].content == 'Hello, World!' assert config.enabled is True -def test_model_config_no_variables(ldai_client: LDAIClient, td): +def test_model_config_no_variables(ldai_client: LDAIClient, tracker): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) config = ldai_client.model_config('model-config', context, default_value, {}) @@ -71,9 +71,9 @@ def test_model_config_no_variables(ldai_client: LDAIClient, td): assert config.config.prompt[0].content == 'Hello, !' assert config.enabled is True -def test_context_interpolation(ldai_client: LDAIClient, td): +def test_context_interpolation(ldai_client: LDAIClient, tracker): context = Context.builder('user-key').name("Sandy").build() - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) variables = {'name': 'World'} config = ldai_client.model_config('ctx-interpolation', context, default_value, variables) @@ -83,9 +83,9 @@ def test_context_interpolation(ldai_client: LDAIClient, td): assert config.config.prompt[0].content == 'Hello, Sandy!' assert config.enabled is True -def test_model_config_multiple(ldai_client: LDAIClient, td): +def test_model_config_multiple(ldai_client: LDAIClient, tracker): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=True) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=True) variables = {'name': 'World', 'day': 'Monday'} config = ldai_client.model_config('multiple-prompt', context, default_value, variables) @@ -96,9 +96,9 @@ def test_model_config_multiple(ldai_client: LDAIClient, td): assert config.config.prompt[1].content == 'The day is, Monday!' assert config.enabled is True -def test_model_config_disabled(ldai_client: LDAIClient, td): +def test_model_config_disabled(ldai_client: LDAIClient, tracker): context = Context.create('user-key') - default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td), enabled=False) + default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker, enabled=False) config = ldai_client.model_config('off-config', context, default_value, {}) From bf61cc36eab6d9f2911e685b45728da831124300 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 14:18:43 -0500 Subject: [PATCH 17/21] fix: change client call --- ldai/testing/test_model_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index 977baf3..0bbbb91 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -41,7 +41,7 @@ def client(td: TestData) -> LDClient: return LDClient(config=config) @pytest.fixture -def tracker() -> LDAIConfigTracker: +def tracker(client: LDClient) -> LDAIConfigTracker: return LDAIConfigTracker(client, 'abcd', 'model-config', Context.create('user-key')) @pytest.fixture From c69a0c82848a7efe0bdfd2ea8b515dea5a4265fc Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 15:14:59 -0500 Subject: [PATCH 18/21] doc updates --- ldai/tracker.py | 111 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index 62ee41e..fdb0462 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -6,22 +6,44 @@ @dataclass class TokenMetrics(): + """ + Metrics for token usage in AI operations. + + :param total: Total number of tokens used. + :param input: Number of input tokens. + :param output: Number of output tokens. + """ total: int input: int output: int # type: ignore @dataclass class FeedbackKind(Enum): + """ + Types of feedback that can be provided for AI operations. + """ Positive = "positive" Negative = "negative" @dataclass class TokenUsage(): + """ + Tracks token usage for AI operations. + + :param total_tokens: Total number of tokens used. + :param prompt_tokens: Number of tokens in the prompt. + :param completion_tokens: Number of tokens in the completion. + """ total_tokens: int prompt_tokens: int completion_tokens: int def to_metrics(self): + """ + Convert token usage to metrics format. + + :return: Dictionary containing token metrics. + """ return { 'total': self['total_tokens'], 'input': self['prompt_tokens'], @@ -30,18 +52,38 @@ def to_metrics(self): @dataclass class LDOpenAIUsage(): + """ + LaunchDarkly-specific OpenAI usage tracking. + + :param total_tokens: Total number of tokens used. + :param prompt_tokens: Number of tokens in the prompt. + :param completion_tokens: Number of tokens in the completion. + """ total_tokens: int prompt_tokens: int completion_tokens: int @dataclass class OpenAITokenUsage: + """ + Tracks OpenAI-specific token usage. + """ def __init__(self, data: LDOpenAIUsage): + """ + Initialize OpenAI token usage tracking. + + :param data: OpenAI usage data. + """ self.total_tokens = data.total_tokens self.prompt_tokens = data.prompt_tokens self.completion_tokens = data.completion_tokens def to_metrics(self) -> TokenMetrics: + """ + Convert OpenAI token usage to metrics format. + + :return: TokenMetrics object containing usage data. + """ return TokenMetrics( total=self.total_tokens, input=self.prompt_tokens, @@ -50,12 +92,25 @@ def to_metrics(self) -> TokenMetrics: @dataclass class BedrockTokenUsage: + """ + Tracks AWS Bedrock-specific token usage. + """ def __init__(self, data: dict): + """ + Initialize Bedrock token usage tracking. + + :param data: Dictionary containing Bedrock usage data. + """ self.totalTokens = data.get('totalTokens', 0) self.inputTokens = data.get('inputTokens', 0) self.outputTokens = data.get('outputTokens', 0) def to_metrics(self) -> TokenMetrics: + """ + Convert Bedrock token usage to metrics format. + + :return: TokenMetrics object containing usage data. + """ return TokenMetrics( total=self.totalTokens, input=self.inputTokens, @@ -63,22 +118,49 @@ def to_metrics(self) -> TokenMetrics: ) class LDAIConfigTracker: + """ + Tracks configuration and usage metrics for LaunchDarkly AI operations. + """ def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context): + """ + Initialize an AI configuration tracker. + + :param ld_client: LaunchDarkly client instance. + :param version_key: Version key for tracking. + :param config_key: Configuration key for tracking. + :param context: Context for evaluation. + """ self.ld_client = ld_client self.version_key = version_key self.config_key = config_key self.context = context def get_track_data(self): + """ + Get tracking data for events. + + :return: Dictionary containing version and config keys. + """ return { 'versionKey': self.version_key, 'configKey': self.config_key, } def track_duration(self, duration: int) -> None: + """ + Track the duration of an AI operation. + + :param duration: Duration in milliseconds. + """ self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) def track_duration_of(self, func): + """ + Track the duration of a function execution. + + :param func: Function to track. + :return: Result of the tracked function. + """ start_time = time.time() result = func() end_time = time.time() @@ -87,21 +169,41 @@ def track_duration_of(self, func): return result def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: + """ + Track user feedback for an AI operation. + + :param feedback: Dictionary containing feedback kind. + """ if feedback['kind'] == FeedbackKind.Positive: self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) def track_success(self) -> None: + """ + Track a successful AI generation. + """ self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) def track_openai(self, func): + """ + Track OpenAI-specific operations. + + :param func: Function to track. + :return: Result of the tracked function. + """ result = self.track_duration_of(func) if result.usage: self.track_tokens(OpenAITokenUsage(result.usage)) return result - def track_bedrock_converse(self, res: dict) -> dict: + def track_bedrock_converse(self, res: dict) -> dict: + """ + Track AWS Bedrock conversation operations. + + :param res: Response dictionary from Bedrock. + :return: The original response dictionary. + """ status_code = res.get('$metadata', {}).get('httpStatusCode', 0) if status_code == 200: self.track_success() @@ -115,10 +217,15 @@ def track_bedrock_converse(self, res: dict) -> dict: return res def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: + """ + Track token usage metrics. + + :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources. + """ token_metrics = tokens.to_metrics() if token_metrics.total > 0: self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics.total) if token_metrics.input > 0: self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics.input) if token_metrics.output > 0: - self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output) \ No newline at end of file + self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output) From 8c608527ae9d29f84ad9ae196a5bc4736758531a Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 15:16:42 -0500 Subject: [PATCH 19/21] clarify docs --- ldai/tracker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldai/tracker.py b/ldai/tracker.py index fdb0462..1d9bf6d 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -148,7 +148,7 @@ def get_track_data(self): def track_duration(self, duration: int) -> None: """ - Track the duration of an AI operation. + Manually track the duration of an AI operation. :param duration: Duration in milliseconds. """ @@ -156,7 +156,7 @@ def track_duration(self, duration: int) -> None: def track_duration_of(self, func): """ - Track the duration of a function execution. + Automatically track the duration of an AI operation. :param func: Function to track. :return: Result of the tracked function. From 9b86f7f0383d5ee4cf5792135df0922d53c42cd2 Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Thu, 7 Nov 2024 15:20:47 -0500 Subject: [PATCH 20/21] make methods private --- ldai/client.py | 4 ++-- ldai/tracker.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ldai/client.py b/ldai/client.py index fdb3500..0dcc6e8 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -48,7 +48,7 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari variation['prompt'] = [ LDMessage( role=entry['role'], - content=self.interpolate_template(entry['content'], all_variables) + content=self.__interpolate_template(entry['content'], all_variables) ) for entry in variation['prompt'] ] @@ -56,7 +56,7 @@ def model_config(self, key: str, context: Context, default_value: AIConfig, vari enabled = variation.get('_ldMeta',{}).get('enabled', False) return AIConfig(config=AIConfigData(model=variation['model'], prompt=variation['prompt']), tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled)) - def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: + def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """ Interpolate the template with the given variables. diff --git a/ldai/tracker.py b/ldai/tracker.py index 1d9bf6d..1c3bbc7 100644 --- a/ldai/tracker.py +++ b/ldai/tracker.py @@ -135,7 +135,7 @@ def __init__(self, ld_client: LDClient, version_key: str, config_key: str, conte self.config_key = config_key self.context = context - def get_track_data(self): + def __get_track_data(self): """ Get tracking data for events. @@ -152,7 +152,7 @@ def track_duration(self, duration: int) -> None: :param duration: Duration in milliseconds. """ - self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration) + self.ld_client.track('$ld:ai:duration:total', self.context, self.__get_track_data(), duration) def track_duration_of(self, func): """ @@ -175,15 +175,15 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: :param feedback: Dictionary containing feedback kind. """ if feedback['kind'] == FeedbackKind.Positive: - self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1) + self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.__get_track_data(), 1) elif feedback['kind'] == FeedbackKind.Negative: - self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1) + self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.__get_track_data(), 1) def track_success(self) -> None: """ Track a successful AI generation. """ - self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1) + self.ld_client.track('$ld:ai:generation', self.context, self.__get_track_data(), 1) def track_openai(self, func): """ @@ -224,8 +224,8 @@ def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None: """ token_metrics = tokens.to_metrics() if token_metrics.total > 0: - self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics.total) + self.ld_client.track('$ld:ai:tokens:total', self.context, self.__get_track_data(), token_metrics.total) if token_metrics.input > 0: - self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics.input) + self.ld_client.track('$ld:ai:tokens:input', self.context, self.__get_track_data(), token_metrics.input) if token_metrics.output > 0: - self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output) + self.ld_client.track('$ld:ai:tokens:output', self.context, self.__get_track_data(), token_metrics.output) From e37d4baf95bdc9c9cb0c8e463b13d9c8030f26cb Mon Sep 17 00:00:00 2001 From: Daniel OBrien Date: Fri, 8 Nov 2024 09:09:02 -0500 Subject: [PATCH 21/21] newline EOF --- ldai/testing/test_model_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldai/testing/test_model_config.py b/ldai/testing/test_model_config.py index 0bbbb91..826f78e 100644 --- a/ldai/testing/test_model_config.py +++ b/ldai/testing/test_model_config.py @@ -102,4 +102,4 @@ def test_model_config_disabled(ldai_client: LDAIClient, tracker): config = ldai_client.model_config('off-config', context, default_value, {}) - assert config.enabled is False \ No newline at end of file + assert config.enabled is False