Skip to content

Commit

Permalink
Merge branch 'main' into proto_serialization_test
Browse files Browse the repository at this point in the history
  • Loading branch information
rysweet authored Nov 18, 2024
2 parents 300da0f + f1daff1 commit dbf4ce2
Show file tree
Hide file tree
Showing 75 changed files with 4,059 additions and 1,987 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -126,14 +126,16 @@ async def main() -> None:
The following example demonstrates how to create an assistant agent with
a model client and a tool, and generate a stream of messages for a task.
a model client and a tool, generate a stream of messages for a task, and
print the messages to the console.
.. code-block:: python
import asyncio
from autogen_ext.models import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_agentchat.task import Console
from autogen_core.base import CancellationToken
Expand All @@ -145,13 +147,12 @@ async def main() -> None:
model_client = OpenAIChatCompletionClient(model="gpt-4o")
agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time])
stream = agent.on_messages_stream(
[TextMessage(content="What is the current time?", source="user")], CancellationToken()
await Console(
agent.on_messages_stream(
[TextMessage(content="What is the current time?", source="user")], CancellationToken()
)
)
async for message in stream:
print(message)
asyncio.run(main())
Expand All @@ -165,7 +166,7 @@ def __init__(
tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
handoffs: List[Handoff | str] | None = None,
description: str = "An agent that provides assistance with ability to use tools.",
system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with 'TERMINATE' when the task has been completed.",
system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.",
):
super().__init__(name=name, description=description)
self._model_client = model_client
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from autogen_core.base import CancellationToken

from ..base import ChatAgent, Response, TaskResult
from ..messages import AgentMessage, ChatMessage, MultiModalMessage, TextMessage
from ..messages import AgentMessage, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage


class BaseChatAgent(ChatAgent, ABC):
Expand Down Expand Up @@ -54,21 +54,25 @@ async def on_messages_stream(
async def run(
self,
*,
task: str | TextMessage | MultiModalMessage | None = None,
task: str | ChatMessage | None = None,
cancellation_token: CancellationToken | None = None,
) -> TaskResult:
"""Run the agent with the given task and return the result."""
if cancellation_token is None:
cancellation_token = CancellationToken()
input_messages: List[ChatMessage] = []
output_messages: List[AgentMessage] = []
if isinstance(task, str):
if task is None:
pass
elif isinstance(task, str):
text_msg = TextMessage(content=task, source="user")
input_messages.append(text_msg)
output_messages.append(text_msg)
elif isinstance(task, TextMessage | MultiModalMessage):
elif isinstance(task, TextMessage | MultiModalMessage | StopMessage | HandoffMessage):
input_messages.append(task)
output_messages.append(task)
else:
raise ValueError(f"Invalid task type: {type(task)}")
response = await self.on_messages(input_messages, cancellation_token)
if response.inner_messages is not None:
output_messages += response.inner_messages
Expand All @@ -78,7 +82,7 @@ async def run(
async def run_stream(
self,
*,
task: str | TextMessage | MultiModalMessage | None = None,
task: str | ChatMessage | None = None,
cancellation_token: CancellationToken | None = None,
) -> AsyncGenerator[AgentMessage | TaskResult, None]:
"""Run the agent with the given task and return a stream of messages
Expand All @@ -87,15 +91,19 @@ async def run_stream(
cancellation_token = CancellationToken()
input_messages: List[ChatMessage] = []
output_messages: List[AgentMessage] = []
if isinstance(task, str):
if task is None:
pass
elif isinstance(task, str):
text_msg = TextMessage(content=task, source="user")
input_messages.append(text_msg)
output_messages.append(text_msg)
yield text_msg
elif isinstance(task, TextMessage | MultiModalMessage):
elif isinstance(task, TextMessage | MultiModalMessage | StopMessage | HandoffMessage):
input_messages.append(task)
output_messages.append(task)
yield task
else:
raise ValueError(f"Invalid task type: {type(task)}")
async for message in self.on_messages_stream(input_messages, cancellation_token):
if isinstance(message, Response):
yield message.chat_message
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,52 @@


class CodeExecutorAgent(BaseChatAgent):
"""An agent that executes code snippets and report the results."""
"""An agent that extracts and executes code snippets found in received messages and returns the output.
It is typically used within a team with another agent that generates code snippets to be executed.
.. note::
It is recommended that the `CodeExecutorAgent` agent uses a Docker container to execute code. This ensures that model-generated code is executed in an isolated environment. To use Docker, your environment must have Docker installed and running.
Follow the installation instructions for `Docker <https://docs.docker.com/get-docker/>`_.
In this example, we show how to set up a `CodeExecutorAgent` agent that uses the
:py:class:`~autogen_ext.code_executors.DockerCommandLineCodeExecutor`
to execute code snippets in a Docker container. The `work_dir` parameter indicates where all executed files are first saved locally before being executed in the Docker container.
.. code-block:: python
from autogen_agentchat.agents import CodeExecutorAgent
from autogen_ext.code_executors import DockerCommandLineCodeExecutor
async def run_code_executor_agent() -> None:
# Create a code executor agent that uses a Docker container to execute code.
code_executor = DockerCommandLineCodeExecutor(work_dir="coding")
await code_executor.start()
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
# Run the agent with a given code snippet.
task = TextMessage(
content='''Here is some code
```python
print('Hello world')
```
''',
source="user",
)
response = await code_executor_agent.on_messages([task], CancellationToken())
print(response.chat_message)
# Stop the code executor.
await code_executor.stop()
# Use asyncio.run(run_code_executor_agent()) when running in a script.
await run_code_executor_agent()
"""

def __init__(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from autogen_core.base import CancellationToken

from ..messages import AgentMessage, MultiModalMessage, TextMessage
from ..messages import AgentMessage, ChatMessage


@dataclass
Expand All @@ -23,7 +23,7 @@ class TaskRunner(Protocol):
async def run(
self,
*,
task: str | TextMessage | MultiModalMessage | None = None,
task: str | ChatMessage | None = None,
cancellation_token: CancellationToken | None = None,
) -> TaskResult:
"""Run the task and return the result.
Expand All @@ -36,7 +36,7 @@ async def run(
def run_stream(
self,
*,
task: str | TextMessage | MultiModalMessage | None = None,
task: str | ChatMessage | None = None,
cancellation_token: CancellationToken | None = None,
) -> AsyncGenerator[AgentMessage | TaskResult, None]:
"""Run the task and produces a stream of messages and the final result
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,38 @@
import os
import sys
import time
from typing import AsyncGenerator, List

from autogen_core.components import Image
from autogen_core.components.models import RequestUsage

from autogen_agentchat.base import TaskResult
from autogen_agentchat.base import Response, TaskResult
from autogen_agentchat.messages import AgentMessage, MultiModalMessage


async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> None:
"""Consume the stream from :meth:`~autogen_agentchat.teams.Team.run_stream`
and print the messages to the console."""
def _is_running_in_iterm() -> bool:
return os.getenv("TERM_PROGRAM") == "iTerm.app"


def _is_output_a_tty() -> bool:
return sys.stdout.isatty()


async def Console(
stream: AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None],
*,
no_inline_images: bool = False,
) -> None:
"""Consume the stream from :meth:`~autogen_agentchat.base.Team.run_stream`
or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`
and print the messages to the console.
Args:
stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Stream to render
no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False.
"""

render_image_iterm = _is_running_in_iterm() and _is_output_a_tty() and not no_inline_images
start_time = time.time()
total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0)
async for message in stream:
Expand All @@ -26,23 +47,56 @@ async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> No
f"Duration: {duration:.2f} seconds\n"
)
sys.stdout.write(output)
elif isinstance(message, Response):
duration = time.time() - start_time

# Print final response.
output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n"
if message.chat_message.models_usage:
output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n"
total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens
total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens
sys.stdout.write(output)

# Print summary.
if message.inner_messages is not None:
num_inner_messages = len(message.inner_messages)
else:
num_inner_messages = 0
output = (
f"{'-' * 10} Summary {'-' * 10}\n"
f"Number of inner messages: {num_inner_messages}\n"
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
f"Total completion tokens: {total_usage.completion_tokens}\n"
f"Duration: {duration:.2f} seconds\n"
)
sys.stdout.write(output)
else:
output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message)}\n"
output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n"
if message.models_usage:
output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n"
total_usage.completion_tokens += message.models_usage.completion_tokens
total_usage.prompt_tokens += message.models_usage.prompt_tokens
sys.stdout.write(output)


def _message_to_str(message: AgentMessage) -> str:
# iTerm2 image rendering protocol: https://iterm2.com/documentation-images.html
def _image_to_iterm(image: Image) -> str:
image_data = image.to_base64()
return f"\033]1337;File=inline=1:{image_data}\a\n"


def _message_to_str(message: AgentMessage, *, render_image_iterm: bool = False) -> str:
if isinstance(message, MultiModalMessage):
result: List[str] = []
for c in message.content:
if isinstance(c, str):
result.append(c)
else:
result.append("<image>")
if render_image_iterm:
result.append(_image_to_iterm(c))
else:
result.append("<image>")
return "\n".join(result)
else:
return f"{message.content}"
Loading

0 comments on commit dbf4ce2

Please sign in to comment.