Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: SDK Changes in preparation for release #7

Merged
merged 22 commits into from
Nov 8, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.0.0"
".": "0.1.0"
}
4 changes: 1 addition & 3 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl

## Submitting bug reports and feature requests

The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-AI/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days.
The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk-ai/issues) in the SDK repository. Bug reports and feature requests specific to this library should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days.

## Submitting pull requests

Expand Down Expand Up @@ -55,8 +55,6 @@ make lint

The library's module structure is as follows:

<!-- TODO: Add structure description -->

### Type hints

Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the library. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any.
Expand Down
13 changes: 1 addition & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,7 @@ This version of the library has a minimum Python version of 3.8.

## Getting started

Install the package

$ pip install launchdarkly-server-sdk-ai

The provided `TracingHook` can be setup as shown below:

<!-- TODO: Install instructions -->

```python
import ldclient

```
Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/ai/python) for instructions on getting started with using the SDK.

## Learn more

Expand Down
72 changes: 43 additions & 29 deletions ldai/client.py
Original file line number Diff line number Diff line change
@@ -1,53 +1,67 @@
from typing import Any, Dict, Optional
from typing import Any, Dict, List, Literal, Optional
from ldclient import Context
from ldclient.client import LDClient
import chevron

from ldai.tracker import LDAIConfigTracker
from ldai.types import AIConfig
from dataclasses import dataclass

@dataclass
class LDMessage():
role: Literal['system', 'user', 'assistant']
content: str

@dataclass
class AIConfigData():
model: Optional[dict]
prompt: Optional[List[LDMessage]]
class AIConfig():
def __init__(self, config: AIConfigData, tracker: LDAIConfigTracker, enabled: bool):
self.config = config
self.tracker = tracker
self.enabled = enabled

class LDAIClient:
"""The LaunchDarkly AI SDK client object."""

def __init__(self, client: LDClient):
self.client = client

def model_config(self, key: str, context: Context, default_value: str, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
"""Get the value of a model configuration asynchronously.

Args:
key: The key of the model configuration.
context: The context to evaluate the model configuration in.
default_value: The default value of the model configuration.
variables: Additional variables for the model configuration.
def model_config(self, key: str, context: Context, default_value: AIConfig, variables: Optional[Dict[str, Any]] = None) -> AIConfig:
"""
Get the value of a model configuration asynchronously.

Returns:
The value of the model configuration.
:param key: The key of the model configuration.
:param context: The context to evaluate the model configuration in.
:param default_value: The default value of the model configuration.
:param variables: Additional variables for the model configuration.
:return: The value of the model configuration.
"""
variation = self.client.variation(key, context, default_value)

all_variables = {'ldctx': context}
all_variables = {}
if variables:
all_variables.update(variables)
all_variables['ldctx'] = context
print(variation)
if isinstance(variation['prompt'], list) and all(isinstance(entry, dict) for entry in variation['prompt']):
InTheCloudDan marked this conversation as resolved.
Show resolved Hide resolved
variation['prompt'] = [
LDMessage(
role=entry['role'],
content=self.interpolate_template(entry['content'], all_variables)
)
for entry in variation['prompt']
]

variation['prompt'] = [
{
**entry,
'content': self.interpolate_template(entry['content'], all_variables)
}
for entry in variation['prompt']
]

return AIConfig(config=variation, tracker=LDAIConfigTracker(self.client, variation['_ldMeta']['variationId'], key, context))
enabled = variation.get('_ldMeta',{}).get('enabled', False)
return AIConfig(config=AIConfigData(model=variation['model'], prompt=variation['prompt']), tracker=LDAIConfigTracker(self.client, variation.get('_ldMeta', {}).get('versionKey', ''), key, context), enabled=bool(enabled))

def interpolate_template(self, template: str, variables: Dict[str, Any]) -> str:
"""Interpolate the template with the given variables.

Args:
template: The template string.
variables: The variables to interpolate into the template.
"""
Interpolate the template with the given variables.

Returns:
The interpolated string.
:template: The template string.
:variables: The variables to interpolate into the template.
:return: The interpolated string.
"""
return chevron.render(template, variables)
105 changes: 105 additions & 0 deletions ldai/testing/test_model_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import pytest
from ldclient import LDClient, Context, Config
from ldclient.integrations.test_data import TestData
from ldai.client import AIConfig, AIConfigData, LDAIClient, LDMessage
from ldai.tracker import LDAIConfigTracker
from ldclient.testing.builders import *


@pytest.fixture
def td() -> TestData:
td = TestData.data_source()
td.update(td.flag('model-config').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}, "green").variation_for_all(0))

td.update(td.flag('multiple-prompt').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}, {'role': 'user', 'content': 'The day is, {{day}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}, "green").variation_for_all(0))

td.update(td.flag('ctx-interpolation').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{ldctx.name}}!'}],
'_ldMeta': {'enabled': True, 'versionKey': 'abcd'}
}).variation_for_all(0))

td.update(td.flag('off-config').variations({
'model': { 'modelId': 'fakeModel'},
'prompt': [{'role': 'system', 'content': 'Hello, {{name}}!'}],
'_ldMeta': {'enabled': False, 'versionKey': 'abcd'}
}).variation_for_all(0))

return td

@pytest.fixture
def client(td: TestData) -> LDClient:
config = Config('sdk-key', update_processor_class=td, send_events=False)
return LDClient(config=config)

@pytest.fixture
def tracker(td: TestData) -> LDAIConfigTracker:
return LDAIConfigTracker(client(td), 'abcd', 'model-config', Context.create('user-key'))

@pytest.fixture
def ldai_client(client: LDClient) -> LDAIClient:
return LDAIClient(client)

def test_model_config_interpolation(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config=AIConfigData(model={ 'modelId': 'fakeModel'}, prompt=[LDMessage(role='system', content='Hello, {{name}}!')]), tracker=tracker(td()), enabled=True)
variables = {'name': 'World'}

config = ldai_client.model_config('model-config', context, default_value, variables)

assert config.config.prompt is not None
assert len(config.config.prompt) > 0
assert config.config.prompt[0].content == 'Hello, World!'
assert config.enabled is True

def test_model_config_no_variables(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True)

config = ldai_client.model_config('model-config', context, default_value, {})

assert config.config.prompt is not None
assert len(config.config.prompt) > 0
assert config.config.prompt[0].content == 'Hello, !'
assert config.enabled is True

def test_context_interpolation(ldai_client: LDAIClient):
context = Context.builder('user-key').name("Sandy").build()
default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True)
variables = {'name': 'World'}

config = ldai_client.model_config('ctx-interpolation', context, default_value, variables)

assert config.config.prompt is not None
assert len(config.config.prompt) > 0
assert config.config.prompt[0].content == 'Hello, Sandy!'
assert config.enabled is True

def test_model_config_multiple(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=True)
variables = {'name': 'World', 'day': 'Monday'}

config = ldai_client.model_config('multiple-prompt', context, default_value, variables)

assert config.config.prompt is not None
assert len(config.config.prompt) > 0
assert config.config.prompt[0].content == 'Hello, World!'
assert config.config.prompt[1].content == 'The day is, Monday!'
assert config.enabled is True

def test_model_config_disabled(ldai_client: LDAIClient):
context = Context.create('user-key')
default_value = AIConfig(config=AIConfigData(model={}, prompt=[]), tracker=tracker(td()), enabled=False)

config = ldai_client.model_config('off-config', context, default_value, {})

assert config.enabled is False
InTheCloudDan marked this conversation as resolved.
Show resolved Hide resolved
103 changes: 86 additions & 17 deletions ldai/tracker.py
Original file line number Diff line number Diff line change
@@ -1,55 +1,124 @@
from enum import Enum
import time
from typing import Dict, Union
from ldclient import Context, LDClient
from ldai.types import BedrockTokenUsage, FeedbackKind, OpenAITokenUsage, TokenUsage, UnderscoreTokenUsage
from dataclasses import dataclass

@dataclass
class TokenMetrics():
total: int
input: int
output: int # type: ignore

@dataclass
class FeedbackKind(Enum):
Positive = "positive"
Negative = "negative"

@dataclass
class TokenUsage():
total_tokens: int
prompt_tokens: int
completion_tokens: int

def to_metrics(self):
return {
'total': self['total_tokens'],
'input': self['prompt_tokens'],
'output': self['completion_tokens'],
}

@dataclass
class LDOpenAIUsage():
total_tokens: int
prompt_tokens: int
completion_tokens: int

@dataclass
class OpenAITokenUsage:
def __init__(self, data: LDOpenAIUsage):
self.total_tokens = data.total_tokens
self.prompt_tokens = data.prompt_tokens
self.completion_tokens = data.completion_tokens

def to_metrics(self) -> TokenMetrics:
return TokenMetrics(
total=self.total_tokens,
input=self.prompt_tokens,
output=self.completion_tokens,
)

@dataclass
class BedrockTokenUsage:
def __init__(self, data: dict):
self.totalTokens = data.get('totalTokens', 0)
self.inputTokens = data.get('inputTokens', 0)
self.outputTokens = data.get('outputTokens', 0)

def to_metrics(self) -> TokenMetrics:
return TokenMetrics(
total=self.totalTokens,
input=self.inputTokens,
output=self.outputTokens,
)

class LDAIConfigTracker:
def __init__(self, ld_client: LDClient, variation_id: str, config_key: str, context: Context):
def __init__(self, ld_client: LDClient, version_key: str, config_key: str, context: Context):
self.ld_client = ld_client
self.variation_id = variation_id
self.version_key = version_key
self.config_key = config_key
self.context = context

def get_track_data(self):
return {
'variationId': self.variation_id,
'versionKey': self.version_key,
'configKey': self.config_key,
}

def track_duration(self, duration: int) -> None:
self.ld_client.track('$ld:ai:duration:total', self.context, self.get_track_data(), duration)

def track_duration_of(self, func, *args, **kwargs):
def track_duration_of(self, func):
InTheCloudDan marked this conversation as resolved.
Show resolved Hide resolved
start_time = time.time()
result = func(*args, **kwargs)
result = func()
end_time = time.time()
duration = int((end_time - start_time) * 1000) # duration in milliseconds
self.track_duration(duration)
return result

def track_error(self, error: int) -> None:
self.ld_client.track('$ld:ai:error', self.context, self.get_track_data(), error)

def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None:
if feedback['kind'] == FeedbackKind.Positive:
self.ld_client.track('$ld:ai:feedback:user:positive', self.context, self.get_track_data(), 1)
elif feedback['kind'] == FeedbackKind.Negative:
self.ld_client.track('$ld:ai:feedback:user:negative', self.context, self.get_track_data(), 1)

def track_generation(self, generation: int) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), generation)
def track_success(self) -> None:
self.ld_client.track('$ld:ai:generation', self.context, self.get_track_data(), 1)

def track_openai(self, func, *args, **kwargs):
result = self.track_duration_of(func, *args, **kwargs)
def track_openai(self, func):
result = self.track_duration_of(func)
if result.usage:
self.track_tokens(OpenAITokenUsage(result.usage))
return result

def track_tokens(self, tokens: Union[TokenUsage, UnderscoreTokenUsage, BedrockTokenUsage]) -> None:
def track_bedrock_converse(self, res: dict) -> dict:
status_code = res.get('$metadata', {}).get('httpStatusCode', 0)
if status_code == 200:
self.track_success()
elif status_code >= 400:
# Potentially add error tracking in the future.
pass
if res.get('metrics', {}).get('latencyMs'):
self.track_duration(res['metrics']['latencyMs'])
if res.get('usage'):
self.track_tokens(BedrockTokenUsage(res['usage']))
return res

def track_tokens(self, tokens: Union[TokenUsage, BedrockTokenUsage]) -> None:
token_metrics = tokens.to_metrics()
if token_metrics['total'] > 0:
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics['total'])
self.ld_client.track('$ld:ai:tokens:total', self.context, self.get_track_data(), token_metrics.total)
if token_metrics['input'] > 0:
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics['input'])
self.ld_client.track('$ld:ai:tokens:input', self.context, self.get_track_data(), token_metrics.input)
if token_metrics['output'] > 0:
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics['output'])
self.ld_client.track('$ld:ai:tokens:output', self.context, self.get_track_data(), token_metrics.output)
Loading
Loading