Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add support for o3 models & update litellm #130

Merged
merged 10 commits into from
Feb 4, 2025
94 changes: 93 additions & 1 deletion dynamiq/nodes/llms/openai.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
from typing import Any

from dynamiq.connections import HttpApiKey
from dynamiq.connections import OpenAI as OpenAIConnection
from dynamiq.nodes.llms.base import BaseLLM
from dynamiq.nodes.llms.base import BaseLLM, BaseLLMInputSchema
from dynamiq.nodes.node import ensure_config
from dynamiq.nodes.types import InferenceMode
from dynamiq.prompts import Prompt
from dynamiq.runnables import RunnableConfig


class OpenAI(BaseLLM):
Expand All @@ -21,3 +28,88 @@ def __init__(self, **kwargs):
if kwargs.get("client") is None and kwargs.get("connection") is None:
kwargs["connection"] = OpenAIConnection()
super().__init__(**kwargs)

def is_o_series_model(self) -> bool:
olbychos marked this conversation as resolved.
Show resolved Hide resolved
"""Determine if the model belongs to the O-series (e.g. o1 or o3).

Returns:
bool: True if the model is an O-series model, otherwise False.
"""
model_lower = self.model.lower()
return "o1" in model_lower or "o3" in model_lower
olbychos marked this conversation as resolved.
Show resolved Hide resolved

def execute(
self,
input_data: BaseLLMInputSchema,
config: RunnableConfig = None,
prompt: Prompt | None = None,
schema: dict | None = None,
inference_mode: InferenceMode | None = None,
**kwargs,
):
"""Execute the LLM node.

This method processes the input data, formats the prompt, and generates a response using
the configured LLM.

Args:
input_data (BaseLLMInputSchema): The input data for the LLM.
config (RunnableConfig, optional): The configuration for the execution. Defaults to None.
prompt (Prompt, optional): The prompt to use for this execution. Defaults to None.
schema (Dict[str, Any], optional): schema_ for structured output or function calling.
Overrides instance schema_ if provided.
inference_mode (InferenceMode, optional): Mode of inference.
Overrides instance inference_mode if provided.
**kwargs: Additional keyword arguments.

Returns:
dict: A dictionary containing the generated content and tool calls.
"""
config = ensure_config(config)
prompt = prompt or self.prompt or Prompt(messages=[], tools=None)
messages = prompt.format_messages(**dict(input_data))
base_tools = prompt.format_tools(**dict(input_data))
self.run_on_node_execute_run(callbacks=config.callbacks, prompt_messages=messages, **kwargs)

params = self.connection.conn_params
if self.client and not isinstance(self.connection, HttpApiKey):
params.update({"client": self.client})

current_inference_mode = inference_mode or self.inference_mode
current_schema = schema or self.schema_
response_format, tools = self._get_response_format_and_tools(
inference_mode=current_inference_mode, schema=current_schema
)
tools = tools or base_tools

common_params: dict[str, Any] = {
"model": self.model,
"messages": messages,
"stream": self.streaming.enabled,
"tools": tools,
"tool_choice": self.tool_choice,
"stop": self.stop,
"top_p": self.top_p,
"seed": self.seed,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"response_format": response_format,
"drop_params": True,
**params,
}

if self.is_o_series_model():
olbychos marked this conversation as resolved.
Show resolved Hide resolved
common_params["max_completion_tokens"] = self.max_tokens
else:
common_params["temperature"] = self.temperature
common_params["max_tokens"] = self.max_tokens

response = self._completion(**common_params)

handle_completion = (
self._handle_streaming_completion_response if self.streaming.enabled else self._handle_completion_response
)

return handle_completion(
response=response, messages=messages, config=config, input_data=dict(input_data), **kwargs
)
84 changes: 84 additions & 0 deletions examples/orchestrators/adaptive_article_o3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from dynamiq.connections import E2B as E2BConnection
from dynamiq.connections import Exa, ZenRows
from dynamiq.nodes.agents.orchestrators.adaptive import AdaptiveOrchestrator
from dynamiq.nodes.agents.orchestrators.adaptive_manager import AdaptiveAgentManager
from dynamiq.nodes.agents.react import ReActAgent
from dynamiq.nodes.agents.simple import SimpleAgent
from dynamiq.nodes.tools.e2b_sandbox import E2BInterpreterTool
from dynamiq.nodes.tools.exa_search import ExaTool
from dynamiq.nodes.tools.zenrows import ZenRowsTool
from dynamiq.nodes.types import InferenceMode
from examples.llm_setup import setup_llm

INPUT_TASK = (
"Let's find data on optimizing "
"SEO campaigns in 2025, analyze it, "
"and provide predictions with calculations "
"on how to improve and implement these strategies."
)


if __name__ == "__main__":
python_tool = E2BInterpreterTool(
name="Code Executor",
connection=E2BConnection(),
)

zenrows_tool = ZenRowsTool(
connection=ZenRows(),
name="Web Scraper",
)

exa_tool = ExaTool(
connection=Exa(),
name="Search Engine",
)

llm = setup_llm(model_provider="gpt", model_name="o3-mini", max_tokens=100000)

agent_coding = ReActAgent(
name="Coding Agent",
llm=llm,
tools=[python_tool],
max_loops=13,
inference_mode=InferenceMode.XML,
)

agent_web = ReActAgent(
name="Web Agent",
llm=llm,
tools=[zenrows_tool, exa_tool],
max_loops=13,
inference_mode=InferenceMode.XML,
)

agent_reflection = SimpleAgent(
name="Reflection Agent (Reviewer, Critic)",
llm=llm,
role=(
"Analyze and review the accuracy of any results, "
"including tasks, code, or data. "
"Offer feedback and suggestions for improvement."
),
)

agent_manager = AdaptiveAgentManager(
llm=llm,
)

orchestrator = AdaptiveOrchestrator(
name="Adaptive Orchestrator",
agents=[agent_coding, agent_web, agent_reflection],
manager=agent_manager,
)

result = orchestrator.run(
input_data={
"input": INPUT_TASK,
},
config=None,
)

output_content = result.output.get("content")
print("RESULT")
print(output_content)
Loading
Loading