Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Ibm watsonx integration #74

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions examples/anthropic/async-streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import os
import asyncio
from anthropic import AsyncAnthropic
import lunary

client = AsyncAnthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"), # This is the default and can be omitted
)
lunary.monitor(client)


async def main() -> None:
stream = await client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Hello, Claude",
}
],
model="claude-3-opus-20240229",
)
for event in stream:
pass


asyncio.run(main())
26 changes: 26 additions & 0 deletions examples/anthropic/async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import os
import asyncio
from anthropic import AsyncAnthropic
import lunary

client = AsyncAnthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"), # This is the default and can be omitted
)
lunary.monitor(client)


async def main() -> None:
message = await client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Hello, Claude",
}
],
model="claude-3-opus-20240229",
)
print(message.content)


asyncio.run(main())
22 changes: 22 additions & 0 deletions examples/anthropic/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import os
from anthropic import Anthropic
import lunary

client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
lunary.monitor(client)


message = client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Hello, Claude",
}
],
model="claude-3-opus-20240229",
)

print(message.ro
26 changes: 26 additions & 0 deletions examples/anthropic/streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import os
from anthropic import Anthropic
import lunary

client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
lunary.monitor(client)


stream = client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Hello, Claude",
}
],
model="claude-3-opus-20240229",
stream=True
)

for event in stream:
pass


86 changes: 86 additions & 0 deletions examples/anthropic/tool-call.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from anthropic import Anthropic
import lunary

client = Anthropic()
lunary.monitor(client)

response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
tools=[
{
"name": "get_weather",
"description": "Get the current weather in a given location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
}
},
"required": ["location"],
},
}
],
messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}],
)

response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1024,
tools=[
{
"name": "get_weather",
"description": "Get the current weather in a given location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The unit of temperature, either 'celsius' or 'fahrenheit'"
}
},
"required": ["location"]
}
}
],
messages=[
{
"role": "user",
"content": "What's the weather like in San Francisco?"
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "<thinking>I need to use get_weather, and the user wants SF, which is likely San Francisco, CA.</thinking>"
},
{
"type": "tool_use",
"id": "toolu_01A09q90qw90lq917835lq9",
"name": "get_weather",
"input": {"location": "San Francisco, CA", "unit": "celsius"}
}
]
},
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9", # from the API response
"content": "65 degrees" # from running your tool
}
]
}
]
)

print(response)
24 changes: 24 additions & 0 deletions examples/ibm/basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from ibm_watsonx_ai import Credentials
from ibm_watsonx_ai.foundation_models import ModelInference
import lunary
import json

model = ModelInference(
model_id="meta-llama/llama-3-1-8b-instruct",
credentials=Credentials(
api_key = "pfc6XWOR0oX_oZY-c4axG-JKKzMhKz1NF04g9C8Idfz2",
url = "https://us-south.ml.cloud.ibm.com"),
project_id="c36b3e63-c64c-4f2c-8885-933244642424"
)

lunary.monitor_ibm(model)

print(model.model_id)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"}
]
response = model.chat(messages=messages)

# print(json.dumps(response, indent=4))
# print(generated_response['choices'][0]['message']['content'])
28 changes: 28 additions & 0 deletions examples/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from openai import OpenAI
import lunary

client = OpenAI()
lunary.monitor(client)

tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
},
},
}
]

completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather like in Paris today?"}],
tools=tools,
)

print(completion.choices[0].message.tool_calls)
10 changes: 10 additions & 0 deletions lunary/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from .exceptions import *
from .parsers import default_input_parser, default_output_parser, filter_params, method_input_parser, PydanticHandler
from .openai_utils import OpenAIUtils
from .ibm_utils import IBMUtils
from .event_queue import EventQueue
from .thread import Thread
from .utils import clean_nones, create_uuid_from_string
Expand Down Expand Up @@ -564,6 +565,15 @@ def async_stream_wrapper(*args, **kwargs):
return wrapper


def monitor_ibm(object):
object.chat = wrap(
object.chat,
"llm",
input_parser=IBMUtils.parse_input,
output_parser=IBMUtils.parse_output,
name=object.model_id,
)

def monitor(object):
try:
openai_version = parse_version(version("openai"))
Expand Down
83 changes: 83 additions & 0 deletions lunary/anthropic_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import json, logging
import anthropic

logger = logging.getLogger(__name__)

# TODO: make sure it's the correct list
KWARGS_TO_CAPTURE = [
"frequency_penalty",
"functions",
"logit_bias",
"max_tokens",
"max_completion_tokens",
"n",
"presence_penalty",
"response_format",
"seed",
"stop",
"stream",
"audio",
"modalities",
"temperature",
"tool_choice",
"tools",
"tool_calls",
"top_p",
"top_k",
"top_logprobs",
"logprobs",
"prediction",
"service_tier",
"parallel_tool_calls"
]

class AnthropicUtils:
@staticmethod
def parse_message(message):
tool_calls = getattr(message, "tool_calls")

if tool_calls is not None:
tool_calls = [
json.loads(tool_calls.model_dump_json(index=2, exclude_unset=True))
for tool_calls in tool_calls
]
# TODO: audio?
# audio = getattr(message, "audio")
# if audio is not None:
# audio = json.loads(audio.model_dump_json(indent=2, exclude_unset=True))

parsed_message = {
"role": getattr(message, "role"),
"content": getattr(message, "content"),
"refusal": getattr(message, "refusal"),
# TODO: "audio": audio?
# TODO: function_calls?
"tool_calls": getattr(message, "tool_calls")
}
return parsed_message


@staticmethod
def parse_input(*args, **kwargs):
try:
messages = [AnthropicUtils.parse_message(message) for message in kwargs["messages"]]
name = kwargs.get("model")
extra = {key: kwargs[key] for key in KWARGS_TO_CAPTURE if key in kwargs}

extra = {k: v for k, v in kwargs.items() if k in KWARGS_TO_CAPTURE}
return {"name": name, "input": messages, "extra": extra}
except Exception as e:
logger.error("Error parsing input: ", e)

@staticmethod
def parse_output(message, stream=False):
try:
parsed_output = {
"output": AnthropicUtils.parse_message(message),
"tokenUsage": {
"prompt": getattr(message.usage, "input_tokens"),
"completion": getattr(message.usage, "output_tokens")
}
}
except Exception as e:
logger.error("Error parsing output: ", e)
Loading
Loading