From 85ea6e967360fda21bf368fcc1c5a2ef3b53a645 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 15 Nov 2024 16:33:42 -0500 Subject: [PATCH 01/21] Initial autogen-core seeds --- .../autogen-core/samples/common/adas/adas.py | 479 ++++++++ .../samples/common/adas/adas_prompt.py | 1048 +++++++++++++++++ .../autogen-core/samples/common/adas/utils.py | 296 +++++ 3 files changed, 1823 insertions(+) create mode 100644 python/packages/autogen-core/samples/common/adas/adas.py create mode 100644 python/packages/autogen-core/samples/common/adas/adas_prompt.py create mode 100644 python/packages/autogen-core/samples/common/adas/utils.py diff --git a/python/packages/autogen-core/samples/common/adas/adas.py b/python/packages/autogen-core/samples/common/adas/adas.py new file mode 100644 index 000000000000..3a1c6bad78cf --- /dev/null +++ b/python/packages/autogen-core/samples/common/adas/adas.py @@ -0,0 +1,479 @@ + + + + + +import argparse +import asyncio +import os +import logging +import json +import re +import uuid +import pickle +from dataclasses import dataclass +from typing import Dict, List, Union +from collections import namedtuple +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import threading +import random + +from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent +from autogen_core.base import AgentId, AgentType, AgentRuntime, CancellationToken, MessageContext, TopicId +from autogen_core.components import RoutedAgent, default_subscription, message_handler +from autogen_core.components.models import ( + AssistantMessage, + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, +) + +from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core.components import DefaultTopicId +from autogen_core.components.models import OpenAIChatCompletionClient +from autogen_core.components.tools import FunctionTool, PythonCodeExecutionTool, ToolSchema +from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop +from autogen_ext.code_executors import DockerCommandLineCodeExecutor #, extract_markdown_code_blocks +from autogen_core.components.code_executor import CodeBlock, CodeExecutor, extract_markdown_code_blocks +from autogen_magentic_one.utils import LogHandler +from autogen_core.application.logging import EVENT_LOGGER_NAME + +# TODO fix imports +import sys +sys.path.append("/home/andyye/autogen/python/packages/autogen-core/samples/") +from common.utils import get_chat_completion_client_from_envs + +from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt +from utils import random_id, bootstrap_confidence_interval, load_drop, drop_metric + + +logging.basicConfig(level=logging.WARNING) +logging.getLogger("autogen_core").setLevel(logging.DEBUG) + +Info = namedtuple('Info', ['name', 'author', 'content', 'iteration_idx']) + +SEARCHING_MODE = True + + + +@dataclass +class CodeWritingTask: + task: str + + +@dataclass +class CodeWritingResult: + task: str + code: str + review: str + + +@dataclass +class CodeReviewTask: + session_id: str + code_writing_task: str + code_writing_scratchpad: str + code: str + + +@dataclass +class CodeReviewResult: + review: str + session_id: str + approved: bool + + +@dataclass +class ADASTask: + task: str + +@dataclass +class ADASResult: + result: str + +@dataclass +class ReflectTask: + session_id: str + task: str + thought: str + + +@dataclass +class LLMMessageList: + llm_message_list: List[LLMMessage] + + +@dataclass +class SimpleReflectAgentResponse: + json_content: Dict[str, str] + # content: str + + +@dataclass +class LLMAgentBaseTask: + system_message: LLMMessage + instruction: LLMMessage + input_infos: List[Info] + iteration_idx: int + output_fields: List[str] + role: str + + +@dataclass +class LLMAgentBaseResponse: + output: str + + +# An agent that makes a direct call to the model, and returns json +class SimpleReflectAgent(RoutedAgent): + def __init__(self, description: str, model_client: ChatCompletionClient, system_prompt: str) -> None: + super().__init__(description) + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content=system_prompt, + ) + ] + self._chat_history: List[LLMMessage] = [] + self._model_client = model_client + self._cnt = 0 + + @message_handler + async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> SimpleReflectAgentResponse: + # logging.info(f"{self._description} received message: {message}") + # import pdb; pdb.set_trace() + # model_result = await self._model_client.create( + # self._system_messages + self._chat_history + message.llm_message_list + # ) + print(f"llm_message_list {len(message.llm_message_list)}") + self._chat_history.extend(message.llm_message_list) + + print(f"-----cnt {self._cnt}") + print(f"chat history {len(self._chat_history)}") + self._cnt += 1 + assert isinstance(model_result.content, str) + json_content = json.loads(model_result.content) + return SimpleReflectAgentResponse(json_content=json_content) + + +@dataclass +class Message: + content: str + + +@default_subscription +class Assistant(RoutedAgent): + def __init__(self, model_client: ChatCompletionClient) -> None: + super().__init__("An assistant agent.") + self._model_client = model_client + self._chat_history: List[LLMMessage] = [ + SystemMessage( + content="""Write Python script in markdown block, and it will be executed. +Always save figures to file in the current directory. Do not use plt.show()""", + ) + ] + + @message_handler + async def handle_message(self, message: Message, ctx: MessageContext) -> None: + self._chat_history.append(UserMessage(content=message.content, source="user")) + result = await self._model_client.create(self._chat_history) + print(f"\n{'-'*80}\nAssistant:\n{result.content}") + self._chat_history.append(AssistantMessage(content=result.content, source="assistant")) # type: ignore + await self.publish_message(Message(content=result.content), DefaultTopicId()) # type: ignore + + +@default_subscription +class Executor(RoutedAgent): + def __init__(self, code_executor: CodeExecutor) -> None: + super().__init__("An executor agent.") + self._code_executor = code_executor + + @message_handler + async def handle_message(self, message: Message, ctx: MessageContext) -> None: + code_blocks = extract_markdown_code_blocks(message.content) + if code_blocks: + result = await self._code_executor.execute_code_blocks( + code_blocks, cancellation_token=ctx.cancellation_token + ) + print(f"\n{'-'*80}\nExecutor:\n{result.output}") + await self.publish_message(Message(content=result.output), DefaultTopicId()) + + +class AgentSystem(): + def __init__(self) -> None: + pass + +def generate_task(input_infos) -> str: + + # construct input infos text + input_infos_text = '' + for input_info in input_infos: + if isinstance(input_info, Info): + (field_name, author, content, iteration_idx) = input_info + else: + continue + + if field_name == 'task': + input_infos_text += f'# Your Task:\n{content}\n\n' + elif iteration_idx != -1: + # input_infos_text += f'### {field_name} #{iteration_idx + 1} by {author}:\n{content}\n\n' + input_infos_text += f'### {field_name} #{iteration_idx + 1}:\n{content}\n\n' + else: + # input_infos_text += f'### {field_name} by {author}:\n{content}\n\n' + input_infos_text += f'### {field_name}:\n{content}\n\n' + + prompt = input_infos_text + "# Instruction: \n" + return prompt + +def evaluate_forward_fn(args, forward_str): + # dynamically define forward() + # modified from https://github.com/luchris429/DiscoPOP/blob/main/scripts/launch_evo.py + namespace = {} + print(f"forward str {forward_str}") + exec(forward_str, globals(), namespace) + names = list(namespace.keys()) + if len(names) != 1: + raise AssertionError(f"{len(names)} things in namespace. Please only provide 1") + func = namespace[names[0]] + if not callable(func): + raise AssertionError(f"{func} is not callable") + setattr(AgentSystem, "forward", func) + + # set seed 0 for valid set + examples = load_drop(args.data_filename)[1:-1] # first one and the last one is for few-shot examples + random.seed(args.shuffle_seed) + random.shuffle(examples) + + if SEARCHING_MODE: + examples = examples[:args.valid_size] * args.n_repeat + else: + examples = examples[args.valid_size:args.valid_size + args.test_size] * args.n_repeat + + questions = [example['inputs'] for example in examples] + answers = [example['targets'] for example in examples] + + print(f"problem length: {len(examples)}") + max_workers = min(len(examples), args.max_workers) if args.multiprocessing else 1 + + task_queue = [] + for q in questions: + taskInfo = Info('task', 'User', q, -1) + task_queue.append((taskInfo, AgentSystem())) + + # agentSystem = AgentSystem() + + def call_forward(agent_task_queue): + taskInfo, agent = agent_task_queue + print(f"taskInfo {taskInfo}") + task = generate_task([taskInfo]) + + # For magentic one using the create_completion_client_from_env() helper + # export CHAT_COMPLETION_PROVIDER='azure' + + + agent_model_kwargs = {} + + result = agent.forward(task, agent_model_kwargs) + return result + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + results = list(tqdm(executor.map(call_forward, task_queue), total=len(task_queue))) + + acc_list = [] + for q_idx, res in enumerate(results): + try: + if isinstance(res, Info): + extracted_answer = res.content + else: + extracted_answer = res + correct_answers = answers[q_idx] + print(f"extracted_answer {extracted_answer}, correct_answers {correct_answers}") + em_score, f1_score = drop_metric(extracted_answer, correct_answers) + except Exception as e: + acc_list.append(0) + continue + + acc_list.append(f1_score) + + print(f"f1: {bootstrap_confidence_interval(acc_list)}") + import pdb; pdb.set_trace() + return acc_list + + + +@default_subscription +class ADASAgent(RoutedAgent): + """An agent that performs ADAS.""" + + def __init__(self, + model_client: ChatCompletionClient, + # system_prompt: str, + # evaluate_agent_type: str, + reflect_agent_type: str, + executor_agent_type: str, + args, + archive + ) -> None: + super().__init__("An agent searching agent.") + # self._system_messages: List[LLMMessage] = [ + # SystemMessage( + # content=system_prompt, + # ) + # ] + # self._evaluate_agent_id = AgentId(evaluate_agent_type, self.id.key) + self._reflect_agent_id = AgentId(reflect_agent_type, self.id.key) + self._executor_agent_id = AgentId(executor_agent_type, self.id.key) + self._args = args + self._archive = archive + self._model_client = model_client + self._session_memory: Dict[str, List[ADASTask | ADASResult]] = {} + + @message_handler + async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None: + # Store the messages in a temporary memory for this request only. + session_id = str(uuid.uuid4()) + self._session_memory.setdefault(session_id, []).append(message) + + # Process archive + file_path = os.path.join(args.save_dir, f"{args.expr_name}_run_archive.json") + if os.path.exists(file_path): + with open(file_path, 'r') as json_file: + archive = json.load(json_file) + if "generation" in archive[-1] and isinstance(archive[-1]['generation'], int): + start = archive[-1]['generation'] + else: + start = 0 + else: + archive = get_init_archive() + start = 0 + + for solution in archive: + if 'fitness' in solution: + continue + + solution['generation'] = "initial" + print(f"============Initial Archive: {solution['name']}=================") + try: + acc_list = evaluate_forward_fn(args, solution["code"]) + except Exception as e: + print("During evaluating initial archive:") + print(e) + continue + + fitness_str = bootstrap_confidence_interval(acc_list) + solution['fitness'] = fitness_str + + # save results + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, 'w') as json_file: + json.dump(archive, json_file, indent=4) + + import pdb; pdb.set_trace() + # Initial prompt + for n in range(start, args.n_generation): + print(f"============Generation {n + 1}=================") + msg_list = [UserMessage(content=message.task, source=self.metadata["type"])] + import pdb; pdb.set_trace() + try: + response = await self.send_message(LLMMessageList(msg_list), self._reflect_agent_id) + Reflexion_prompt_1, Reflexion_prompt_2 = get_reflexion_prompt(self._archive[-1] if n > 0 else None) + + # Reflexion 1 + next_solution = response.json_content + new_messages = [ + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=Reflexion_prompt_1, source=self.metadata["type"]), + ] + response = await self.send_message(LLMMessageList(new_messages), AgentId('simple_reflect_agent', self.id.key)) + + # Reflexion 2 + next_solution = response.json_content + new_messages = [ + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=Reflexion_prompt_2, source=self.metadata["type"]), + ] + response = await self.send_message(LLMMessageList(new_messages), AgentId('simple_reflect_agent', self.id.key)) + except Exception as e: + # import pdb; pdb.set_trace() + print("During LLM generate new solution:") + print(e) + continue + + # TODO: Evaluate code + next_solution = response.json_content + print(f"final {str(next_solution)}") + import pdb; pdb.set_trace() + acc_list = evaluate_forward_fn(args, next_solution["code"]) + import pdb; pdb.set_trace() + + print("asdf") + # TODO: Maybe not... instantiate many agents to run eval. + # acc_list = await self.send_message(EvaluateTask(), self._evaluate_agent_id) + + +async def main(args) -> None: + runtime = SingleThreadedAgentRuntime() + client = get_chat_completion_client_from_envs(model="gpt-4o-mini") + archive = get_init_archive() + system_prompt, prompt = get_prompt(archive) + + # Create the reflect agent + await SimpleReflectAgent.register( + runtime, "simple_reflect_agent", lambda: SimpleReflectAgent( + description='Simple Reflect Agent', + model_client=client, + system_prompt=system_prompt, + ) + ) + + await ADASAgent.register( + runtime, "adas_agent", lambda: ADASAgent( + model_client=client, + args=args, + archive=archive, + reflect_agent_type='simple_reflect_agent', + executor_agent_type='executor', + ) + ) + + runtime.start() + + # Publish an initial message to trigger the ADAS search to start. + await runtime.publish_message( + message=ADASTask(task=prompt), + topic_id=DefaultTopicId(), + ) + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + +# python packages/autogen-core/samples/common/adas/adas.py --data_filename=/home/andyye/ADAS/dataset/drop_v0_dev.jsonl.gz --valid_size=1 +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run ADAS") + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.") + parser.add_argument('--data_filename', type=str, default="dataset/drop_v0_dev.jsonl.gz") + parser.add_argument('--valid_size', type=int, default=128) + parser.add_argument('--test_size', type=int, default=800) + parser.add_argument('--shuffle_seed', type=int, default=0) + parser.add_argument('--n_repeat', type=int, default=1) + parser.add_argument('--multiprocessing', action='store_true', default=True) + parser.add_argument('--max_workers', type=int, default=48) + parser.add_argument('--debug', action='store_true', default=True) + parser.add_argument('--save_dir', type=str, default='results/') + parser.add_argument('--expr_name', type=str, default="drop_gpt3.5_results") + parser.add_argument('--n_generation', type=int, default=30) + parser.add_argument('--debug_max', type=int, default=3) + parser.add_argument('--model', + type=str, + default='gpt-4o-2024-05-13', + choices=['gpt-4-turbo-2024-04-09', 'gpt-3.5-turbo-0125', 'gpt-4o-2024-05-13']) + args = parser.parse_args() + if args.verbose: + logging.basicConfig(level=logging.WARNING) + logging.getLogger("autogen_core").setLevel(logging.DEBUG) + handler = logging.FileHandler("adas.log") + logging.getLogger("autogen_core").addHandler(handler) + + asyncio.run(main(args)) diff --git a/python/packages/autogen-core/samples/common/adas/adas_prompt.py b/python/packages/autogen-core/samples/common/adas/adas_prompt.py new file mode 100644 index 000000000000..79a1342895b7 --- /dev/null +++ b/python/packages/autogen-core/samples/common/adas/adas_prompt.py @@ -0,0 +1,1048 @@ +import json + +EXAMPLE = { + "thought": "**Insights:**\nYour insights on what should be the next interesting agent.\n**Overall Idea:**\nyour reasoning and the overall concept behind the agent design.\n**Implementation:**\ndescribe the implementation step by step.", + "name": "Name of your proposed agent", + "code": """def forward(self, taskInfo): + # Your code here + return answer +""" +} + +# COT = { +# "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", +# "name": "Chain-of-Thought", +# "code": """def forward(self, taskInfo): +# # Instruction for the Chain-of-Thought (CoT) approach +# # It is an important practice that allows the LLM to think step by step before solving the task. +# cot_instruction = "Please think step by step and then solve the task." + +# # Instantiate a new LLM agent specifically for CoT +# # To allow LLM thinking before answering, we need to set an additional output field 'thinking'. +# cot_agent = LLMAgentBase(['thinking', 'answer'], 'Chain-of-Thought Agent') + +# # Prepare the inputs for the CoT agent +# # The input should be a list of Info, and the first one is often the taskInfo +# cot_agent_inputs = [taskInfo] + +# # Get the response from the CoT agent +# thinking, answer = cot_agent(cot_agent_inputs, cot_instruction) + +# # Return only the final answer +# return answer +# """ +# } + + +COT = { + "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", + "name": "Chain-of-Thought", + "code": """def forward(self, task, agent_model_kwargs): + import asyncio + import logging + import json + from dataclasses import dataclass + import sys + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.base import AgentId, AgentRuntime, MessageContext + from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription + from autogen_core.components.models import ( + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, + ) + from autogen_ext.models import AzureOpenAIChatCompletionClient + from typing import List + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=agent_model_kwargs['model'], + api_version=agent_model_kwargs['api_version'], + azure_endpoint=agent_model_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + # Define message types as data classes + @dataclass + class ChainOfThoughtTask: + task: str + + + @dataclass + class FinalResult: + result: str + + + # Define the Chain-of-Thought Agent + class ChainOfThoughtAgent(RoutedAgent): + def __init__(self, description: str, + model_client: ChatCompletionClient, + system_prompt: str, + instruction: str, + ) -> None: + super().__init__(description) + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content=system_prompt, + ) + ] + self._model_client = model_client + self._instruction = instruction + + @message_handler + async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> None: + + logging.info(f"{self._description} received message: {message.task}") + user_prompt = message.task + "\\n" + self._instruction + msgs = self._system_messages + [UserMessage(content=user_prompt, source=self.metadata["type"])] + model_result = await self._model_client.create(msgs) + assert isinstance(model_result.content, str) + + await self.publish_message( + message=FinalResult(model_result.content), + topic_id=DefaultTopicId(), + ) + + + # Define the main function to set up and run the agent system + async def main(): + + queue = asyncio.Queue[FinalResult]() + async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None: + await queue.put(message) + + # Initialize the agent runtime + runtime = SingleThreadedAgentRuntime() + + # Create the chain-of-thought agent + agent_id = AgentId("COTAgent", "default") + cot_instruction = "Please think step by step and then solve the task." + await ChainOfThoughtAgent.register( + runtime, "COTAgent", lambda: ChainOfThoughtAgent( + description='Chain-of-Thought Agent', + model_client=model_client, + system_prompt="You are a helpful assistant. Directly answer the question. Keep it very concise.", + instruction=cot_instruction, + ) + ) + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) + + runtime.start() + initial_message = ChainOfThoughtTask(task=task) + await runtime.send_message(initial_message, agent_id) # publish_message + await runtime.stop_when_idle() + + # Return the first answer from the queue + return (await queue.get()).result + + return asyncio.run(main()) +""" +} + +COT_SC = { + "thought": "While an LLM can arrive at the correct answer, its reasoning may vary. By repeatedly asking the same question with high temperature settings, we can generate different reasoning paths. We then combine multiple answers from these Chain-of-Thought (CoT) agents to produce a more accurate final answer through ensembling.", + "name": "Self-Consistency with Chain-of-Thought", + "code": """def forward(self, task, agent_model_kwargs): + import asyncio + import logging + import json + from dataclasses import dataclass + import sys + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.base import AgentId, AgentRuntime, MessageContext + from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription + from autogen_core.components.models import ( + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, + ) + from typing import List + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=agent_model_kwargs['model'], + api_version=agent_model_kwargs['api_version'], + azure_endpoint=agent_model_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + @dataclass + class WorkerTask: + task: str + previous_results: List[str] + + + @dataclass + class WorkerTaskResult: + result: str + + + @dataclass + class UserTask: + task: str + + + @dataclass + class FinalResult: + result: str + + + class WorkerAgent(RoutedAgent): + def __init__( + self, + model_client: ChatCompletionClient, + instruction: str, + ) -> None: + super().__init__(description="Worker Agent") + self._model_client = model_client + self._instruction = instruction + + @message_handler + async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult: + user_prompt = message.task + "\\n" + self._instruction + + if message.previous_results: + # If previous results are provided, we need to synthesize them to create a single prompt. + # system_prompt = "You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:" + system_prompt = "Given all the solutions, reason over them carefully and provide a final answer." + system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(message.previous_results)]) + model_result = await self._model_client.create( + [SystemMessage(system_prompt), UserMessage(content=user_prompt, source="user")] + ) + else: + # If no previous results are provided, we can simply pass the user query to the model. + model_result = await self._model_client.create([UserMessage(content=user_prompt, source="user")]) + assert isinstance(model_result.content, str) + print(f"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}") + return WorkerTaskResult(result=model_result.content) + + + class OrchestratorAgent(RoutedAgent): + def __init__( + self, + model_client: ChatCompletionClient, + worker_agent_types: List[str], + num_layers: int, + ) -> None: + super().__init__(description="Aggregator Agent") + self._model_client = model_client + self._worker_agent_types = worker_agent_types + self._num_layers = num_layers + + + @message_handler + async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult: + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}") + # Create task for the first layer. + worker_task = WorkerTask(task=message.task, previous_results=[]) + # Iterate over layers. + for i in range(self._num_layers): + # Assign workers for this layer. + worker_ids = [ + AgentId(worker_type, f"{self.id.key}/layer_{i}/worker_{j}") + for j, worker_type in enumerate(self._worker_agent_types) + ] + # Dispatch tasks to workers. + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}") + results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids]) + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}") + # Prepare task for the next layer. + worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results]) + # Perform final aggregation. + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation") + # system_prompt = "You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:" + system_prompt = "Given all the above solutions, reason over them carefully and provide a final answer." + system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(worker_task.previous_results)]) + model_result = await self._model_client.create( + [SystemMessage(system_prompt), UserMessage(content=message.task, source="user")] + ) + assert isinstance(model_result.content, str) + return FinalResult(result=model_result.content) + + # Define the main function to set up and run the agent system + async def main(): + + # Initialize the agent runtime + runtime = SingleThreadedAgentRuntime() + + cot_instruction = "Please think step by step and then solve the task." + await WorkerAgent.register( + runtime, "worker", lambda: WorkerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), instruction=cot_instruction) + ) + await OrchestratorAgent.register( + runtime, + "orchestrator", + lambda: OrchestratorAgent( + model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), worker_agent_types=["worker"] * 5, num_layers=1 + ), + ) + + runtime.start() + result = await runtime.send_message(UserTask(task=task), AgentId("orchestrator", "default")) + return result.result + + return asyncio.run(main()) +""" +} + +Reflexion = { + "thought": "To enhance its performance, an LLM can iteratively improve its answer based on feedback. By reflecting on its previous attempts and incorporating feedback, the model can refine its reasoning and provide a more accurate solution.", + "name": "Self-Refine (Reflexion)", + "code": '''def forward(self, task, agent_model_kwargs): + import asyncio + import json + import logging + import re + import sys + import uuid + from dataclasses import dataclass + from typing import Dict, List, Union + from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime + from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription + from autogen_core.components.models import ( + AssistantMessage, + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, + ) + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=agent_model_kwargs['model'], + api_version=agent_model_kwargs['api_version'], + azure_endpoint=agent_model_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + @dataclass + class WritingTask: + task: str + + + @dataclass + class WritingResult: + task: str + answer: str + review: str + + + @dataclass + class ReviewTask: + session_id: str + writing_task: str + answer_scratchpad: str + answer: str + + + @dataclass + class ReviewResult: + review: str + session_id: str + approved: bool + + + @default_subscription + class WorkerAgent(RoutedAgent): + "An agent that performs writing tasks." + + def __init__(self, + model_client: ChatCompletionClient, + instruction: str, + ) -> None: + super().__init__("A helpful assistant") + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content="""You are a helpful assistant. Work with the critic to improve your answer. + Make sure to directly answer the question. Keep it very concise. + Respond using the following format: + + Thoughts: + Answer: + """, + ) + ] + self._model_client = model_client + self._session_memory: Dict[str, List[WritingTask | ReviewTask | ReviewResult]] = {} + self._instruction = instruction + + @message_handler + async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) -> None: + # Store the messages in a temporary memory for this request only. + session_id = str(uuid.uuid4()) + self._session_memory.setdefault(session_id, []).append(message) + # Generate a response using the chat completion API. + response = await self._model_client.create( + self._system_messages + [UserMessage(content=message.task + self._instruction, source=self.metadata["type"])], + cancellation_token=ctx.cancellation_token, + ) + assert isinstance(response.content, str) + # Extract the answer from the response. + answer = self._extract_answer(response.content) + if answer is None: + raise ValueError("Answer not found.") + # Create a review task. + review_task = ReviewTask( + session_id=session_id, + writing_task=message.task, + answer_scratchpad=response.content, + answer=answer, + ) + # Store the review task in the session memory. + self._session_memory[session_id].append(review_task) + # Publish a review task. + await self.publish_message(review_task, topic_id=TopicId("default", self.id.key)) + + @message_handler + async def handle_review_result(self, message: ReviewResult, ctx: MessageContext) -> None: + # Store the review result in the session memory. + self._session_memory[message.session_id].append(message) + # Obtain the request from previous messages. + review_request = next( + m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewTask) + ) + assert review_request is not None + # Check if the is approved. + if message.approved: + # Publish the writing result. + await self.publish_message( + WritingResult( + answer=review_request.answer, + task=review_request.writing_task, + review=message.review, + ), + topic_id=TopicId("result", self.id.key), + ) + print("Writing Result:") + print("-" * 80) + print(f"Task:\\n{review_request.writing_task}") + print("-" * 80) + print(f"Answer:\\n{review_request.answer}") + print("-" * 80) + print(f"Review:\\n{message.review}") + print("-" * 80) + else: + # Create a list of LLM messages to send to the model. + messages: List[LLMMessage] = [*self._system_messages] + for m in self._session_memory[message.session_id]: + if isinstance(m, ReviewResult): + messages.append(UserMessage(content=m.review, source="Reviewer")) + elif isinstance(m, ReviewTask): + messages.append(AssistantMessage(content=m.answer_scratchpad, source="Worker")) + elif isinstance(m, WritingTask): + messages.append(UserMessage(content=m.task, source="User")) + else: + raise ValueError(f"Unexpected message type: {m}") + # Generate a revision using the chat completion API. + response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token) + assert isinstance(response.content, str) + # Extract the answer from the response. + answer = self._extract_answer(response.content) + if answer is None: + raise ValueError("Answer not found.") + # Create a new review task. + review_task = ReviewTask( + session_id=message.session_id, + writing_task=review_request.writing_task, + answer_scratchpad=response.content, + answer=answer, + ) + # Store the review task in the session memory. + self._session_memory[message.session_id].append(review_task) + # Publish a new review task. + await self.publish_message(review_task, topic_id=TopicId("default", self.id.key)) + + + def _extract_answer(self, text: str) -> Union[str, None]: + pattern = "(?<=Answer: ).*" + # Search for the pattern in the markdown text + match = re.search(pattern, text, re.DOTALL) + # Extract the language and code block if a match is found + if match: + return match.group(0) + return None + + @default_subscription + class ReviewerAgent(RoutedAgent): + """An agent that critiques tasks.""" + + def __init__(self, model_client: ChatCompletionClient) -> None: + super().__init__("A critic agent.") + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content="""You are a critic. Review answers and criticize on where it might be wrong. + Respond using the following JSON format: + { + "correctness": "", + "approval": "", + "suggested_changes": "" + } + """, + ) + ] + self._session_memory: Dict[str, List[ReviewTask | ReviewResult]] = {} + self._model_client = model_client + + @message_handler + async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> None: + # Format the prompt for the review. + # Gather the previous feedback if available. + previous_feedback = "" + if message.session_id in self._session_memory: + previous_review = next( + (m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewResult)), + None, + ) + if previous_review is not None: + previous_feedback = previous_review.review + # Store the messages in a temporary memory for this request only. + self._session_memory.setdefault(message.session_id, []).append(message) + prompt = f"""The problem statement is: {message.writing_task} + The answer is: + ``` + {message.answer} + ``` + + Previous feedback: + {previous_feedback} + + Please review the answer. If previous feedback was provided, see if it was addressed. + """ + # Generate a response using the chat completion API. + response = await self._model_client.create( + self._system_messages + [UserMessage(content=prompt, source=self.metadata["type"])], + cancellation_token=ctx.cancellation_token, + json_output=True, + ) + assert isinstance(response.content, str) + # TODO: use structured generation library e.g. guidance to ensure the response is in the expected format. + # Parse the response JSON. + review = json.loads(response.content) + # Construct the review text. + review_text = "Review:\\n" + "\\n".join([f"{k}: {v}" for k, v in review.items()]) + approved = review["approval"].lower().strip() == "approve" + result = ReviewResult( + review=review_text, + session_id=message.session_id, + approved=approved, + ) + # Store the review result in the session memory. + self._session_memory[message.session_id].append(result) + # Publish the review result. + await self.publish_message(result, topic_id=TopicId("default", self.id.key)) + + + # Define the main function to set up and run the agent system + async def main(): + queue = asyncio.Queue[WritingResult]() + async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None: + await queue.put(message) + + runtime = SingleThreadedAgentRuntime() + await ReviewerAgent.register( + runtime, "ReviewerAgent", lambda: ReviewerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini")) + ) + cot_instruction = "Please think step by step and then solve the task." + await WorkerAgent.register( + runtime, "WorkerAgent", lambda: WorkerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), instruction=cot_instruction) + ) + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + + runtime.start() + await runtime.publish_message( + message=WritingTask(task=task), + topic_id=DefaultTopicId(), + ) + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + # Return the answer from the queue + return (await queue.get()).answer + + return asyncio.run(main()) +''' +} + +LLM_debate = { + "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", + "name": "LLM Debate", + "code": """def forward(self, taskInfo): + # Instruction for initial reasoning + debate_initial_instruction = "Please think step by step and then solve the task." + + # Instruction for debating and updating the solution based on other agents' solutions + debate_instruction = "Given solutions to the problem from other agents, consider their opinions as additional advice. Please think carefully and provide an updated answer." + + # Initialize debate agents with different roles and a moderate temperature for varied reasoning + debate_agents = [LLMAgentBase(['thinking', 'answer'], 'Debate Agent', temperature=0.8, role=role) for role in ['Reading Comprehension Specialist', 'Logical Reasoning Strategist', 'Multidisciplinary Knowledge Integrator']] + + # Instruction for final decision-making based on all debates and solutions + final_decision_instruction = "Given all the above thinking and answers, reason over them carefully and provide a final answer." + final_decision_agent = LLMAgentBase(['thinking', 'answer'], 'Final Decision Agent', temperature=0.1) + + max_round = 2 # Maximum number of debate rounds + all_thinking = [[] for _ in range(max_round)] + all_answer = [[] for _ in range(max_round)] + + # Perform debate rounds + for r in range(max_round): + for i in range(len(debate_agents)): + if r == 0: + thinking, answer = debate_agents[i]([taskInfo], debate_initial_instruction) + else: + input_infos = [taskInfo] + [all_thinking[r-1][i]] + all_thinking[r-1][:i] + all_thinking[r-1][i+1:] + thinking, answer = debate_agents[i](input_infos, debate_instruction) + all_thinking[r].append(thinking) + all_answer[r].append(answer) + + # Make the final decision based on all debate results and solutions + thinking, answer = final_decision_agent([taskInfo] + all_thinking[max_round-1] + all_answer[max_round-1], final_decision_instruction) + return answer +""" +} + +Take_a_step_back = {"thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", + "name": "Step-back Abstraction", + "code": """def forward(self, taskInfo): + # Instruction for understanding the principles involved in the task + principle_instruction = "What are the physics, chemistry or biology principles and concepts involved in solving this task? First think step by step. Then list all involved principles and explain them." + + # Instruction for solving the task based on the principles + cot_instruction = "Given the question and the involved principle behind the question, think step by step and then solve the task." + + # Instantiate LLM agents + principle_agent = LLMAgentBase(['thinking', 'principle'], 'Principle Agent') + cot_agent = LLMAgentBase(['thinking', 'answer'], 'Chain-of-Thought Agent') + + # Get the principles involved in the task + thinking, principle = principle_agent([taskInfo], principle_instruction) + + # Use the principles to solve the task + thinking, answer = cot_agent([taskInfo, thinking, principle], cot_instruction) + return answer +""" + } + +QD = {"thought": "Similar to Quality-Diversity methods, let LLM generate multiple diverse interesting solutions could help. By encouraging the model to explore different reasoning paths, we can increase the chances of finding the best solution.", + "name": "Quality-Diversity", + "code": """def forward(self, taskInfo): + # Instruction for initial reasoning + cot_initial_instruction = "Please think step by step and then solve the task." + + # Instruction for giving diverse answers + qd_instruction = "Given previous attempts, try to come up with another interesting way to solve the task." + cot_agent = LLMAgentBase(['thinking', 'answer'], 'Chain-of-Thought Agent') + + # Instruction for final decision-making based on collected reasoning and answers + final_decision_instruction = "Given all the above solutions, reason over them carefully and provide a final answer." + final_decision_agent = LLMAgentBase(['thinking', 'answer'], 'Final Decision Agent', temperature=0.1) + + N_max = 3 # Maximum number of attempts + + # Initial attempt + cot_inputs = [taskInfo] + possible_answers = [] + thinking, answer = cot_agent(cot_inputs, cot_initial_instruction, 0) + + # Add the answer to the list of possible answers + possible_answers.extend([thinking, answer]) + + for i in range(N_max): + # Reflect on previous attempts and generate another interesting answer + cot_inputs.extend([thinking, answer]) + + # Generate another interesting answer + thinking, answer = cot_agent(cot_inputs, qd_instruction, i + 1) + possible_answers.extend([thinking, answer]) + + # Make the final decision based on all generated answers + thinking, answer = final_decision_agent([taskInfo] + possible_answers, final_decision_instruction) + return answer +""" + } + +Role_Assignment = {"thought": "Similar to Auto-GPT and expert prompting, we can use dynamic control flow in the design to let the agent decide what expert we should use.", + "name": "Dynamic Assignment of Roles", + "code": """def forward(self, taskInfo): + # Instruction for step-by-step reasoning + cot_instruction = "Please think step by step and then solve the task." + expert_agents = [LLMAgentBase(['thinking', 'answer'], 'Expert Agent', role=role) for role in ['Reading Comprehension Specialist', 'Logical Reasoning Strategist', 'Multidisciplinary Knowledge Integrator', 'Helpful Assistant']] + + # Instruction for routing the task to the appropriate expert + routing_instruction = "Given the task, please choose an Expert to answer the question. Choose from: Math Professor, Grade School Teacher, Math Enthusiast." + routing_agent = LLMAgentBase(['choice'], 'Routing agent') + + # Get the choice of expert to route the task + choice = routing_agent([taskInfo], routing_instruction)[0] + + if 'professor' in choice.content.lower(): + expert_id = 0 + elif 'teacher' in choice.content.lower(): + expert_id = 1 + elif 'enthusiast' in choice.content.lower(): + expert_id = 2 + else: + expert_id = 3 # Default to helpful assistant + + thinking, answer = expert_agents[expert_id]([taskInfo], cot_instruction) + return answer +""" + } + +system_prompt = """You are a helpful assistant. Make sure to return in a WELL-FORMED JSON object.""" + +base = """# Overview +You are an expert machine learning researcher testing various agentic systems. Your objective is to design building blocks such as prompts and control flows within these systems to solve complex tasks. Your aim is to design an optimal agent performing well on the Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs (DROP), which assesses the ability to perform discrete reasoning and comprehend detailed information across multiple paragraphs. + +## An example question from DROP: + +You will be asked to read a passage and answer a question. +Passage: +Non-nationals make up more than half of the population of Bahrain, with immigrants making up about 55% of the overall population. Of those, the vast majority come from South and Southeast Asia: according to various media reports and government statistics dated between 2005-2009 roughly 290,000 Indians, 125,000 Bangladeshis, 45,000 Pakistanis, 45,000 Filipinos, and 8,000 Indonesians.\nQuestion: What two nationalities had the same number of people living in Bahrain between 2005-2009? +Answer [Not Given]: +Pakistanis and Filipinos + + +# The utility code: + +```python +from collections import namedtuple +from typing import Union +import numpy as np +import json + +import openai +import backoff +from utils import random_id + +# Initialize the OpenAI client +client = openai.OpenAI() + +# Named tuple for holding task information +Info = namedtuple('Info', ['name', 'author', 'content', 'iteration_idx']) + +# Format instructions for LLM response +FORMAT_INST = lambda request_keys: f"Reply EXACTLY with the following JSON format.\n{str(request_keys)}\nDO NOT MISS ANY FIELDS AND MAKE SURE THE JSON FORMAT IS CORRECT!\n" + +# Description of the role for the LLM +ROLE_DESC = lambda role: f"You are a {role}." + +@backoff.on_exception(backoff.expo, openai.RateLimitError) +def get_json_response_from_gpt(msg, model, system_message, temperature=0.5): + \""" + Function to get JSON response from GPT model. + + Args: + - msg (str): The user message. + - model (str): The model to use. + - system_message (str): The system message. + - temperature (float): Sampling temperature. + + Returns: + - dict: The JSON response. + \""" + response = client.chat.completions.create( + model=model, + messages=[ + {"role": "system", "content": system_message}, + {"role": "user", "content": msg}, + ], + temperature=temperature, + max_tokens=1024, + stop=None, + response_format={"type": "json_object"} + ) + content = response.choices[0].message.content + json_dict = json.loads(content) + return json_dict + +class LLMAgentBase: + \""" + Base class for an LLM agent. + + Attributes: + - output_fields (list): Fields expected in the output. + - agent_name (str): Name of the agent. + - role (str): Role description for the agent. + - model (str): Model to be used. (option. Keep it default.) + - temperature (float): Sampling temperature. + - id (str): Unique identifier for the agent instance. + \""" + + def __init__(self, output_fields: list, agent_name: str, role='helpful assistant', model='gpt-3.5-turbo-0125', temperature=0.5) -> None: + self.output_fields = output_fields + self.agent_name = agent_name + self.role = role + self.model = model + self.temperature = temperature + self.id = random_id() + + def generate_prompt(self, input_infos, instruction) -> str: + \""" + Generates a prompt for the LLM. + + Args: + - input_infos (list): List of input information. + - instruction (str): Instruction for the task. + + Returns: + - tuple: System prompt and user prompt. + + An example of a generated prompt: + "" + You are a helpful assistant. + + # Output Format: + Reply EXACTLY with the following JSON format. + ... + + # Your Task: + You will be given some number of paired example inputs and outputs. The outputs ... + + ### thinking #1 by Chain-of-Thought Agent hkFo (yourself): + ... + + ### code #1 by Chain-of-Thought Agent hkFo (yourself): + ... + + ### answer by Chain-of-Thought Agent hkFo's code evaluator:... + + + # Instruction: + Please think step by step and then solve the task by writing the code. + "" + \""" + output_fields_and_description = {key: f"Your {key}." if not 'answer' in key else f"Your {key}. Return ONLY the alphabet choice, i.e. A or B or C or D." for key in self.output_fields} + system_prompt = ROLE_DESC(self.role) + "\n\n" + FORMAT_INST(output_fields_and_description) + + input_infos_text = '' + for input_info in input_infos: + if isinstance(input_info, Info): + (field_name, author, content, iteration_idx) = input_info + else: + continue + if author == self.__repr__(): + author += ' (yourself)' + if field_name == 'task': + input_infos_text += f'# Your Task:\n{content}\n\n' + elif iteration_idx != -1: + input_infos_text += f'### {field_name} #{iteration_idx+1} by {author}:\n{content}\n\n' + else: + input_infos_text += f'### {field_name} by {author}:\n{content}\n\n' + + prompt = input_infos_text + instruction + return system_prompt, prompt + + def query(self, input_infos: list, instruction, iteration_idx=-1) -> list[Info]: + \""" + Queries the LLM with provided input information and instruction. + + Args: + - input_infos (list): List of input information. + - instruction (str): Instruction for the task. + - iteration_idx (int): Iteration index for the task. + + Returns: + - output_infos (list[Info]): Output information. + \""" + system_prompt, prompt = self.generate_prompt(input_infos, instruction) + response_json = get_json_response_from_gpt(prompt, self.model, system_prompt, self.temperature) + + output_infos = [] + for key, value in response_json.items(): + info = Info(key, self.__repr__(), value, iteration_idx) + output_infos.append(info) + return output_infos + + def __repr__(self): + return f"{self.agent_name} {self.id}" + + def __call__(self, input_infos: list, instruction, iteration_idx=-1): + # Note: + # The output of the LLM is a list of Info. If you are only querying one output, you should access it with [0]. + # It is a good practice to always include 'thinking' in the output. + return self.query(input_infos, instruction, iteration_idx=iteration_idx) + +class AgentArchitecture: + \""" + Fill in your code here. + \""" + def forward(self, taskInfo) -> Union[Info, str]: + \""" + Placeholder method for processing task information. + + Args: + - taskInfo (Info): Task information. + + Returns: + - Answer (Union[Info, str]): Your FINAL Answer. Return either a namedtuple Info or a string of answers. + \""" + pass +``` +# Discovered architecture archive +Here is the archive of the discovered architectures: + +[ARCHIVE] + +The fitness value is the median and 95% Bootstrap Confidence Interval of the correct rate on a validation question set. Your GOAL is to maximize the "fitness". + +# Output Instruction and Example: +The first key should be ("thought"), and it should capture your thought process for designing the next function. In the "thought" section, first reason about what should be the next interesting agent to try, then describe your reasoning and the overall concept behind the agent design, and finally detail the implementation steps. +The second key ("name") corresponds to the name of your next agent architecture. +Finally, the last key ("code") corresponds to the exact “forward()” function in Python code that you would like to try. You must write a COMPLETE CODE in "code": Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets. + +Here is an example of the output format for the next agent architecture: + +[EXAMPLE] + +You must use the exact function interface used above. You need to specify the instruction, input information, and the required output fields for various LLM agents to do their specific part of the architecture. +Also, it could be helpful to set the LLM’s role and temperature to further control the LLM’s response. Note that the LLMAgentBase() will automatically parse the output and return a list of “Infos”. You can get the content by Infos.content. +DO NOT FORGET the taskInfo input to LLM if you think it is needed, otherwise LLM will not know about the task. + +## WRONG Implementation examples: +Here are some mistakes you may make: + +1. This is WRONG: ``` +feedback, correct = critic_agent([taskInfo, thinking, answer], critic_instruction, i) +feedback_info = verifier_agent([taskInfo, Info('feedback', 'Critic Agent', thinking, 0)], verification_instruction) +``` +It is wrong to use "Info('feedback', 'Critic Agent', thinking, 0)". The returned "feedback" from LLMAgentBase is already Info. + +2. This is WRONG: ``` +# Debugging: Log the generated answer +print('Generated Answer:', ...) +feedback_info = verifier_agent([taskInfo, Info('feedback', 'Critic Agent', thinking, 0)], verification_instruction) +if len(feedback_info) < 3: # Check if feedback_info has enough elements + return 'Error: Feedback info incomplete' +``` +First, the len(feedback_info) will not work. +Second, you should never return an error message. You should always return the best answer you can get. +Third, you should never print anything in the code. +Lastly, again, DO NOT CREATE Info object by yourself. + +3. This is WRONG: ``` +all_thinking = [] +all_answers = [] +for agent, role in zip(agents, roles): + outputs = agent([taskInfo], independent_reasoning_instruction.format(role=role)) + all_thinking.append(outputs[0].content) + all_answers.append(outputs[1].content) + +# Aggregate the reasoning paths and answers +aggregated_thinking = '\n'.join(all_thinking) +aggregated_answers = '\n'.join(all_answers) +``` +You SHOULD NOT extract the content from the Info object by yourself. You should use the Info object directly. If you want to aggregate the content, you should just put those Info objects into a list and then use the list as input to the next LLM agent. + +4. This is WRONG: ``` +reasoning_agent = LLMAgentBase(['thinking', 'answer'], 'Reasoning Agent') +response_infos = reasoning_agent([taskInfo] + ..., reasoning_instruction) + +# Extract the final answer from the response_infos +for info in response_infos: + if info.name == 'final_answer': + return info +# Fallback if no answer is found +return Info('answer', 'Final Decision Agent', 'No answer generated.', 0) +``` +You should not extract the final answer by yourself. You SHOULD directly return the answer Info. Also, you should always return the best answer you can get. +CORRECT example: ``` +reasoning_agent = LLMAgentBase(['thinking', 'answer'], 'Reasoning Agent') +thinking, answer = reasoning_agent([taskInfo] + ..., reasoning_instruction) +return answer +``` + +# Your task +You are deeply familiar with prompting techniques and the agent works from the literature. Your goal is to maximize the specified performance metrics by proposing interestingly new agents. +Observe the discovered agents carefully and think about what insights, lessons, or stepping stones can be learned from them. +Be creative when thinking about the next interesting agent to try. You are encouraged to draw inspiration from related agent papers or academic papers from other research areas. +Use the knowledge from the archive and inspiration from academic literature to propose the next interesting agentic system design. +THINK OUTSIDE THE BOX. +""" + +Reflexion_prompt_1 = f""""[EXAMPLE]Carefully review the proposed new architecture and reflect on the following points: + +1. **Interestingness**: Assess whether your proposed architecture is interesting or innovative compared to existing methods in the archive. If you determine that the proposed architecture is not interesting, suggest a new architecture that addresses these shortcomings. +- Make sure to check the difference between the proposed architecture and previous attempts. +- Compare the proposal and the architectures in the archive CAREFULLY, including their actual differences in the implementation. +- Decide whether the current architecture is innovative. +- USE CRITICAL THINKING! + +2. **Implementation Mistakes**: Identify any mistakes you may have made in the implementation. Review the code carefully, debug any issues you find, and provide a corrected version. REMEMBER checking "## WRONG Implementation examples" in the prompt. + +3. **Improvement**: Based on the proposed architecture, suggest improvements in the detailed implementation that could increase its performance or effectiveness. In this step, focus on refining and optimizing the existing implementation without altering the overall design framework, except if you want to propose a different architecture if the current is not interesting. +- Observe carefully about whether the implementation is actually doing what it is supposed to do. +- Check if there is redundant code or unnecessary steps in the implementation. Replace them with effective implementation. +- Try to avoid the implementation being too similar to the previous agent. + +And then, you need to improve or revise the implementation, or implement the new proposed architecture based on the reflection. + +Your response should be organized as follows: + +"reflection": Provide your thoughts on the interestingness of the architecture, identify any mistakes in the implementation, and suggest improvements. + +"thought": Revise your previous proposal or propose a new architecture if necessary, using the same format as the example response. + +"name": Provide a name for the revised or new architecture. (Don't put words like "new" or "improved" in the name.) + +"code": Provide the corrected code or an improved implementation. Make sure you actually implement your fix and improvement in this code. +""" + +Reflexion_prompt_2 = """Using the tips in "## WRONG Implementation examples" section, revise the code further. +Your response should be organized as follows: +Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". +""" + + +def get_init_archive(): + # return [COT]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] + # return [COT_SC]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] + return [Reflexion]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] + + + +def get_prompt(current_archive, adaptive=False): + archive_str = ",\n".join([json.dumps(sol) for sol in current_archive]) + archive_str = f"[{archive_str}]" + prompt = base.replace("[ARCHIVE]", archive_str) + prompt = prompt.replace("[EXAMPLE]", json.dumps(EXAMPLE)) + + return system_prompt, prompt + + +def get_reflexion_prompt(prev_example): + prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" + r1 = Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) if prev_example else Reflexion_prompt_1.replace("[EXAMPLE]", "") + return r1, Reflexion_prompt_2 diff --git a/python/packages/autogen-core/samples/common/adas/utils.py b/python/packages/autogen-core/samples/common/adas/utils.py new file mode 100644 index 000000000000..5876c6127900 --- /dev/null +++ b/python/packages/autogen-core/samples/common/adas/utils.py @@ -0,0 +1,296 @@ +# https://github.com/openai/simple-evals/blob/main/drop_eval.py +""" +DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs +Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner +https://arxiv.org/abs/1903.00161 +""" + +import gzip +import json +import random +import re +import string +from typing import Any, Dict, List, Set, Tuple, Union + +import numpy as np +from scipy.optimize import linear_sum_assignment + + +def _remove_articles(text: str) -> str: + regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) + return re.sub(regex, " ", text) + + +def _white_space_fix(text: str) -> str: + return " ".join(text.split()) + + +EXCLUDE = set(string.punctuation) + + +def _remove_punc(text: str) -> str: + if not _is_number(text): + return "".join(ch for ch in text if ch not in EXCLUDE) + else: + return text + + +def _lower(text: str) -> str: + return text.lower() + + +def _tokenize(text: str) -> List[str]: + return re.split(" |-", text) + + +def _normalize_answer(text: str) -> str: + """Lower text and remove punctuation, articles and extra whitespace.""" + + parts = [ + _white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) + for token in _tokenize(text) + ] + parts = [part for part in parts if part.strip()] + normalized = " ".join(parts).strip() + return normalized + + +def _is_number(text: str) -> bool: + try: + float(text) + return True + except ValueError: + return False + + +def _normalize_number(text: str) -> str: + if _is_number(text): + return str(float(text)) + else: + return text + + +def _answer_to_bags( + answer: Union[str, List[str], Tuple[str, ...]] +) -> Tuple[List[str], List[Set[str]]]: + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans: List[str] = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize_answer(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: + """ + Takes gold and predicted answer sets and first finds the optimal 1-1 alignment + between them and gets maximum metric values over all the answers. + """ + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float: + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ( + (2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) + else 0.0 + ) * 100 + return f1 + + +def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool: + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def get_drop_metrics( + predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]] +) -> Tuple[float, float]: + """ + Takes a predicted answer and a gold answer (that are both either a string or a list of + strings), and returns exact match and the DROP F1 metric for the prediction. If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(gold_bags[0]): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: + """ + Takes an answer JSON blob from the DROP data release and converts it into strings used for + evaluation. + """ + if "number" in answer and answer["number"]: + return tuple([str(answer["number"])]), "number" + elif "spans" in answer and answer["spans"]: + return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" + elif "date" in answer: + return ( + tuple( + [ + "{0} {1} {2}".format( + answer["date"]["day"], answer["date"]["month"], answer["date"]["year"] + ).strip() + ] + ), + "date", + ) + else: + raise ValueError( + f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}" + ) + + +def answer_json_to_string(answer_json): + return json.dumps(answer_json_to_strings(answer_json)) + + +def normalize(s: str) -> str: + """Lower text and remove punctuation, articles and extra whitespace.""" + s = s.lower() + exclude = set(string.punctuation) + s = "".join(char for char in s if char not in exclude) + s = re.sub(r"\b(a|an|the)\b", " ", s) + s = " ".join(s.split()) + return s + + +def fuzzy_match(s1: str, s2: str) -> bool: + s1 = normalize(s1) + s2 = normalize(s2) + + if s1 == "" or s2 == "": + return s1 == s2 + + return s1 in s2 or s2 in s1 + + +def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]: + em_scores = [] + f1_scores = [] + for answer in reference: + if answer.strip() != "": + em, f1 = get_drop_metrics(sample, answer) + em_scores.append(em) + f1_scores.append(f1) + return (max(em_scores), max(f1_scores)) + + +def load_drop(file_path): + with gzip.open(file_path, mode="rb") as f: + test_samples = [json.loads(line) for line in f] + prompt = """You will be asked to read a passage and answer a question.\n""" + few_shot_prompt = """You will be asked to read a passage and answer a question. + +# Examples: +Passage: As of the census of 2000, there were 952 people, 392 households, and 241 families residing in the village. The population density was 952.9 people per square mile (367.6/km²). There were 449 housing units at an average density of 449.4 per square mile (173.4/km²). The racial makeup of the village was 96.11% White (U.S. Census), 0.95% African American (U.S. Census) or Race (United States Census), 0.11% Native American (U.S. Census), 0.11% Asian (U.S. Census), 0.21% from Race (United States Census), and 2.52% from two or more races. 1.05% of the population were Hispanics in the United States or Latino (U.S. Census) of any race.\nQuestion: How many more people, in terms of percentage, were from two or more races compared to being solely Native American or solely Asian?\nAnswer: 2.3 + +# Your Task +--- + +""" + examples = [] + for sample in test_samples: + sample['inputs'] = few_shot_prompt + sample['context'] + sample['targets'] = sample["ref_text"].split("|") + examples.append(sample) + return examples + + +def random_id(length=4): + characters = string.ascii_letters + string.digits # includes both upper/lower case letters and numbers + random_id = ''.join(random.choices(characters, k=length)) + return random_id + + +def bootstrap_confidence_interval(data, num_bootstrap_samples=100000, confidence_level=0.95): + """ + Calculate the bootstrap confidence interval for the mean of 1D accuracy data. + Also returns the median of the bootstrap means. + + Args: + - data (list or array of float): 1D list or array of data points. + - num_bootstrap_samples (int): Number of bootstrap samples. + - confidence_level (float): The desired confidence level (e.g., 0.95 for 95%). + + Returns: + - str: Formatted string with 95% confidence interval and median as percentages with one decimal place. + """ + # Convert data to a numpy array for easier manipulation + data = np.array(data) + + # List to store the means of bootstrap samples + bootstrap_means = [] + + # Generate bootstrap samples and compute the mean for each sample + for _ in range(num_bootstrap_samples): + # Resample with replacement + bootstrap_sample = np.random.choice(data, size=len(data), replace=True) + # Compute the mean of the bootstrap sample + bootstrap_mean = np.mean(bootstrap_sample) + bootstrap_means.append(bootstrap_mean) + + # Convert bootstrap_means to a numpy array for percentile calculation + bootstrap_means = np.array(bootstrap_means) + + # Compute the lower and upper percentiles for the confidence interval + lower_percentile = (1.0 - confidence_level) / 2.0 + upper_percentile = 1.0 - lower_percentile + ci_lower = np.percentile(bootstrap_means, lower_percentile) + ci_upper = np.percentile(bootstrap_means, upper_percentile) + + # Compute the median of the bootstrap means + median = np.median(bootstrap_means) + + # Convert to percentages and format to one decimal place + ci_lower_percent = ci_lower + ci_upper_percent = ci_upper + median_percent = median + + # Return the formatted string with confidence interval and median + return f"95% Bootstrap Confidence Interval: ({ci_lower_percent:.1f}%, {ci_upper_percent:.1f}%), Median: {median_percent:.1f}%" From 16b642017eb0feac692eed582f99d7c653d5fb8c Mon Sep 17 00:00:00 2001 From: root Date: Wed, 20 Nov 2024 16:42:45 -0500 Subject: [PATCH 02/21] wip --- .../autogen-core/samples/common/adas/adas.py | 287 ++++----- .../samples/common/adas/adas_prompt.py | 544 +++++++++++------- 2 files changed, 474 insertions(+), 357 deletions(-) diff --git a/python/packages/autogen-core/samples/common/adas/adas.py b/python/packages/autogen-core/samples/common/adas/adas.py index 3a1c6bad78cf..7bd4cc2b7791 100644 --- a/python/packages/autogen-core/samples/common/adas/adas.py +++ b/python/packages/autogen-core/samples/common/adas/adas.py @@ -18,10 +18,15 @@ from tqdm import tqdm import threading import random +import numpy as np +import requests +from github import Github -from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent -from autogen_core.base import AgentId, AgentType, AgentRuntime, CancellationToken, MessageContext, TopicId from autogen_core.components import RoutedAgent, default_subscription, message_handler +from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core.base import AgentId, AgentType, AgentRuntime, CancellationToken, MessageContext, TopicId +from autogen_core.components import DefaultTopicId +from autogen_core.components.code_executor import CodeBlock, CodeExecutor, extract_markdown_code_blocks from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -29,16 +34,10 @@ SystemMessage, UserMessage, ) - -from autogen_core.application import SingleThreadedAgentRuntime -from autogen_core.components import DefaultTopicId -from autogen_core.components.models import OpenAIChatCompletionClient -from autogen_core.components.tools import FunctionTool, PythonCodeExecutionTool, ToolSchema from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop +from autogen_core.components.tools import FunctionTool, PythonCodeExecutionTool, ToolSchema from autogen_ext.code_executors import DockerCommandLineCodeExecutor #, extract_markdown_code_blocks -from autogen_core.components.code_executor import CodeBlock, CodeExecutor, extract_markdown_code_blocks from autogen_magentic_one.utils import LogHandler -from autogen_core.application.logging import EVENT_LOGGER_NAME # TODO fix imports import sys @@ -57,32 +56,43 @@ SEARCHING_MODE = True +def read_github_file(url): + response = requests.get(url) + if response.status_code == 200: + return response.text + else: + return None -@dataclass -class CodeWritingTask: - task: str - - -@dataclass -class CodeWritingResult: - task: str - code: str - review: str +def print_repo_contents(repo, path="", indent=""): + contents = repo.get_contents(path) + documentation = [] + for content_file in contents: + if content_file.type == "dir": + documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) + else: + if content_file.download_url.endswith('.md'): + print(f"Reading file from {content_file.download_url}") + f = read_github_file(content_file.download_url) + documentation.append("Title: " + content_file.name + "\nContents:\n" + f) + return documentation -@dataclass -class CodeReviewTask: - session_id: str - code_writing_task: str - code_writing_scratchpad: str - code: str +def get_autogen_documentation(): + repo_name = "microsoft/autogen" + directory_name = "python/packages/autogen-core/docs/src/user-guide/core-user-guide" + g = Github() -@dataclass -class CodeReviewResult: - review: str - session_id: str - approved: bool + subdirectories = ['core-concepts', 'framework'] + documentation = [] + for subdir in subdirectories: + try: + repo = g.get_repo(repo_name) + documentation.extend(print_repo_contents(repo, directory_name + '/'+ subdir)) + except Exception as e: + print(f"Error: {e}") + print(f"Found {len(documentation)} pages of documentation") + return documentation @dataclass @@ -126,80 +136,11 @@ class LLMAgentBaseResponse: output: str -# An agent that makes a direct call to the model, and returns json -class SimpleReflectAgent(RoutedAgent): - def __init__(self, description: str, model_client: ChatCompletionClient, system_prompt: str) -> None: - super().__init__(description) - self._system_messages: List[LLMMessage] = [ - SystemMessage( - content=system_prompt, - ) - ] - self._chat_history: List[LLMMessage] = [] - self._model_client = model_client - self._cnt = 0 - - @message_handler - async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> SimpleReflectAgentResponse: - # logging.info(f"{self._description} received message: {message}") - # import pdb; pdb.set_trace() - # model_result = await self._model_client.create( - # self._system_messages + self._chat_history + message.llm_message_list - # ) - print(f"llm_message_list {len(message.llm_message_list)}") - self._chat_history.extend(message.llm_message_list) - - print(f"-----cnt {self._cnt}") - print(f"chat history {len(self._chat_history)}") - self._cnt += 1 - assert isinstance(model_result.content, str) - json_content = json.loads(model_result.content) - return SimpleReflectAgentResponse(json_content=json_content) - - @dataclass class Message: content: str -@default_subscription -class Assistant(RoutedAgent): - def __init__(self, model_client: ChatCompletionClient) -> None: - super().__init__("An assistant agent.") - self._model_client = model_client - self._chat_history: List[LLMMessage] = [ - SystemMessage( - content="""Write Python script in markdown block, and it will be executed. -Always save figures to file in the current directory. Do not use plt.show()""", - ) - ] - - @message_handler - async def handle_message(self, message: Message, ctx: MessageContext) -> None: - self._chat_history.append(UserMessage(content=message.content, source="user")) - result = await self._model_client.create(self._chat_history) - print(f"\n{'-'*80}\nAssistant:\n{result.content}") - self._chat_history.append(AssistantMessage(content=result.content, source="assistant")) # type: ignore - await self.publish_message(Message(content=result.content), DefaultTopicId()) # type: ignore - - -@default_subscription -class Executor(RoutedAgent): - def __init__(self, code_executor: CodeExecutor) -> None: - super().__init__("An executor agent.") - self._code_executor = code_executor - - @message_handler - async def handle_message(self, message: Message, ctx: MessageContext) -> None: - code_blocks = extract_markdown_code_blocks(message.content) - if code_blocks: - result = await self._code_executor.execute_code_blocks( - code_blocks, cancellation_token=ctx.cancellation_token - ) - print(f"\n{'-'*80}\nExecutor:\n{result.output}") - await self.publish_message(Message(content=result.output), DefaultTopicId()) - - class AgentSystem(): def __init__(self) -> None: pass @@ -269,9 +210,6 @@ def call_forward(agent_task_queue): task = generate_task([taskInfo]) # For magentic one using the create_completion_client_from_env() helper - # export CHAT_COMPLETION_PROVIDER='azure' - - agent_model_kwargs = {} result = agent.forward(task, agent_model_kwargs) @@ -297,7 +235,6 @@ def call_forward(agent_task_queue): acc_list.append(f1_score) print(f"f1: {bootstrap_confidence_interval(acc_list)}") - import pdb; pdb.set_trace() return acc_list @@ -308,10 +245,7 @@ class ADASAgent(RoutedAgent): def __init__(self, model_client: ChatCompletionClient, - # system_prompt: str, - # evaluate_agent_type: str, - reflect_agent_type: str, - executor_agent_type: str, + system_prompt: str, args, archive ) -> None: @@ -321,14 +255,45 @@ def __init__(self, # content=system_prompt, # ) # ] - # self._evaluate_agent_id = AgentId(evaluate_agent_type, self.id.key) - self._reflect_agent_id = AgentId(reflect_agent_type, self.id.key) - self._executor_agent_id = AgentId(executor_agent_type, self.id.key) + self._args = args self._archive = archive self._model_client = model_client self._session_memory: Dict[str, List[ADASTask | ADASResult]] = {} + # TODO(yeandy): Add this as a proper Tool https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/tools.html + # pip install pygithub + self._documentation = get_autogen_documentation() + + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content=system_prompt('\n'.join(self._documentation)), + ) + ] + self._chat_history: List[LLMMessage] = [] + self._model_client = model_client + self._cnt = 0 + + @message_handler + async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> SimpleReflectAgentResponse: + logging.info(f"{self._description} received message: {message}") + model_result = await self._model_client.create( + # self._system_messages + self._chat_history + message.llm_message_list + self._system_messages + message.llm_message_list + + ) + print(f"llm_message_list {len(message.llm_message_list)}") + # self._chat_history.extend(message.llm_message_list) + + print(f"-----cnt {self._cnt}") + # print(f"chat history {len(self._chat_history)}") + self._cnt += 1 + assert isinstance(model_result.content, str) + print(f"model_result.content {model_result.content}") + json_content = json.loads(model_result.content) + print(f"finish converting to json") + return SimpleReflectAgentResponse(json_content=json_content) + @message_handler async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None: # Store the messages in a temporary memory for this request only. @@ -369,48 +334,102 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None with open(file_path, 'w') as json_file: json.dump(archive, json_file, indent=4) - import pdb; pdb.set_trace() # Initial prompt for n in range(start, args.n_generation): print(f"============Generation {n + 1}=================") msg_list = [UserMessage(content=message.task, source=self.metadata["type"])] import pdb; pdb.set_trace() try: - response = await self.send_message(LLMMessageList(msg_list), self._reflect_agent_id) + response = await self.send_message(LLMMessageList(msg_list), self.id) + next_solution = response.json_content Reflexion_prompt_1, Reflexion_prompt_2 = get_reflexion_prompt(self._archive[-1] if n > 0 else None) + print(f"Reflexion_prompt_1 {Reflexion_prompt_1}") + print(f"Reflexion_prompt_2 {Reflexion_prompt_2}") + print(f"@@After initial prompt {response}") # Reflexion 1 - next_solution = response.json_content - new_messages = [ + # new_messages = [ + # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + # UserMessage(content=Reflexion_prompt_1, source=self.metadata["type"]), + # ] + new_messages = msg_list + [ AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=Reflexion_prompt_1, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), AgentId('simple_reflect_agent', self.id.key)) + response = await self.send_message(LLMMessageList(new_messages), self.id) + next_solution = response.json_content + print(f"@@After Reflexion_prompt_1 {response}") # Reflexion 2 - next_solution = response.json_content - new_messages = [ + # new_messages = [ + # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + # UserMessage(content=Reflexion_prompt_2, source=self.metadata["type"]), + # ] + new_messages = new_messages + [ AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=Reflexion_prompt_2, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), AgentId('simple_reflect_agent', self.id.key)) + response = await self.send_message(LLMMessageList(new_messages), self.id) + next_solution = response.json_content + # next_solution = {'reflection': 'The previous code attempted to implement an ensemble approach with additional confidence estimation, but there were errors that needed addressing. Specifically:\n1. **Incorrect Use of `publish_message`:** The previously provided code misuses `self.publish_message()` in a context where the function signature might be misleading, as it requires `None` as its return.\n2. **Improper Handling of Topics and Message Types:** The correct usage for publishing and handling message types is essential, utilizing the proper `TopicId` syntax.\n3. **Incorrect Method for Calculating Confidence:** The confidence estimation implementation was overly simplistic, which could lead to skewed results. \n\nThe revised implementation corrects these issues and ensures compliance with best practices.', 'thought': '**Insights:**\nThe next iteration of the agent should refine on the concept of diversified reasoning by incorporating evaluative mechanisms within Worker Agents to self-assess their response confidence and determine when consensus should be approached collaboratively.\n\n**Overall Idea:**\nThe architecture can further benefit from introducing adaptive learning patterns, where Worker Agents adjust their reasoning strategies dynamically based on prior task ratings or other metadata. This enables a feedback loop that improves over time.\n\n**Implementation:**\n- Modify Worker Agents to give confidence ratings in their output.\n- Integrate an orchestrator that places more weight on outputs with higher confidence when synthesizing results.\n- Ensure message handling aligns with idiomatic usage of message types and topics, using `TopicId` properly.', 'name': 'Adaptive Diverse Ensemble', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from collections import Counter\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, LLMMessage, SystemMessage, UserMessage\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\'model\'],\n api_version=model_client_kwargs[\'api_version\'],\n azure_endpoint=model_client_kwargs[\'azure_endpoint\'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class DiverseThoughtTask:\n task: str\n\n @dataclass\n class DiverseThoughtResult:\n result: str\n confidence: float\n\n # Define Diverse Worker Agent\n class DiverseWorkerAgent(RoutedAgent):\n def __init__(self, description: str, model_client: ChatCompletionClient, instruction: str) -> None:\n super().__init__(description)\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: DiverseThoughtTask, ctx: MessageContext) -> None:\n user_prompt = message.task + "\\n" + self._instruction\n model_result = await self._model_client.create([UserMessage(content=user_prompt, source="worker_agent")])\n confidence = self.estimate_confidence(model_result.content)\n assert isinstance(model_result.content, str)\n await self.publish_message(DiverseThoughtResult(result=model_result.content, confidence=confidence), \n topic_id=TopicId("worker_results", self.id.key))\n\n def estimate_confidence(self, text: str) -> float:\n # Improved placeholder for actual confidence estimation method\n # Here, we can use sentiment analysis or other processing as an example\n return min(1.0, max(0.0, len(text) / 100.0))\n\n # Orchestrator Agent for Consensus\n class OrchestratorAgent(RoutedAgent):\n def __init__(self) -> None:\n super().__init__("Orchestrator for Diverse Thoughts")\n\n @message_handler\n async def handle_task(self, message: DiverseThoughtTask, ctx: MessageContext) -> None:\n worker_ids = [AgentId("worker_1", ctx.id.key), AgentId("worker_2", ctx.id.key), AgentId("worker_3", ctx.id.key)]\n results = await asyncio.gather(*[self.send_message(message, worker_id) for worker_id in worker_ids])\n combined_result = self.evaluate_results(results)\n await self.publish_message(DiverseThoughtResult(result=combined_result, confidence=1.0), \n topic_id=TopicId("diverse_result", "orchestrator"))\n\n def evaluate_results(self, results: List[DiverseThoughtResult]) -> str:\n # Implement advanced evaluation, here just demonstrating a weighted result selection based on confidence\n confidences = Counter()\n for res in results:\n confidences[res.result] += res.confidence\n return max(confidences, key=confidences.get)\n\n async def main():\n # Create a queue to collect final answers\n queue = asyncio.Queue[DiverseThoughtResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: DiverseThoughtResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register workers with various strategies\n strategies = ["utilize strict logical reasoning", "incorporate probabilistic reasoning", "focus on evidence-based reasoning"]\n for i, strat in enumerate(strategies, start=1):\n await DiverseWorkerAgent.register(\n runtime, f"worker_{i}", lambda strat=strat: DiverseWorkerAgent(\n description=f"Diverse Worker {i}", model_client=model_client, instruction=strat\n )\n )\n\n # Register Orchestrator\n await OrchestratorAgent.register(runtime, "orchestrator")\n\n # Create closure agent to collect final output result\n result_topic = TypeSubscription(topic_type="diverse_result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime, and publish the first message\n runtime.start()\n await runtime.publish_message(\n message=DiverseThoughtTask(task=task),\n topic_id=TopicId("diverse", "orchestrator")\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).result\n\n return asyncio.run(main())\n'} + print(f"@@After Reflexion_prompt_2 {next_solution}") except Exception as e: # import pdb; pdb.set_trace() print("During LLM generate new solution:") print(e) + import pdb; pdb.set_trace() + n -= 1 continue - # TODO: Evaluate code - next_solution = response.json_content - print(f"final {str(next_solution)}") - import pdb; pdb.set_trace() - acc_list = evaluate_forward_fn(args, next_solution["code"]) - import pdb; pdb.set_trace() - - print("asdf") - # TODO: Maybe not... instantiate many agents to run eval. - # acc_list = await self.send_message(EvaluateTask(), self._evaluate_agent_id) + import pdb; pdb.set_trace() + acc_list = [] + for _ in range(args.debug_max): + print(f"DEBUGGING") + try: + acc_list = evaluate_forward_fn(args, next_solution["code"]) + if np.mean(acc_list) < 0.01 and SEARCHING_MODE: + raise Exception("All 0 accuracy") + break + except Exception as e: + print("During evaluation:") + print(e) + next_solution = response.json_content + # new_messages = [ + # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + # UserMessage(content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source=self.metadata["type"]), + # ] + new_messages = new_messages + [ + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source=self.metadata["type"]), + ] + try: + response = await self.send_message(LLMMessageList(new_messages), self.id) + next_solution = response.json_content + except Exception as e: + print("During LLM generate new solution:") + print(e) + import pdb; pdb.set_trace() + continue + continue + if not acc_list: + n -= 1 + continue + + import pdb; pdb.set_trace() + fitness_str = bootstrap_confidence_interval(acc_list) + next_solution['fitness'] = fitness_str + next_solution['generation'] = n + 1 + if 'debug_thought' in next_solution: + del next_solution['debug_thought'] + if 'reflection' in next_solution: + del next_solution['reflection'] + archive.append(next_solution) + + # save results + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, 'w') as json_file: + json.dump(archive, json_file, indent=4) async def main(args) -> None: runtime = SingleThreadedAgentRuntime() @@ -418,22 +437,12 @@ async def main(args) -> None: archive = get_init_archive() system_prompt, prompt = get_prompt(archive) - # Create the reflect agent - await SimpleReflectAgent.register( - runtime, "simple_reflect_agent", lambda: SimpleReflectAgent( - description='Simple Reflect Agent', - model_client=client, - system_prompt=system_prompt, - ) - ) - await ADASAgent.register( runtime, "adas_agent", lambda: ADASAgent( model_client=client, + system_prompt=system_prompt, args=args, archive=archive, - reflect_agent_type='simple_reflect_agent', - executor_agent_type='executor', ) ) diff --git a/python/packages/autogen-core/samples/common/adas/adas_prompt.py b/python/packages/autogen-core/samples/common/adas/adas_prompt.py index 79a1342895b7..0b577fbf2409 100644 --- a/python/packages/autogen-core/samples/common/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/common/adas/adas_prompt.py @@ -37,7 +37,7 @@ COT = { "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", "name": "Chain-of-Thought", - "code": """def forward(self, task, agent_model_kwargs): + "code": """def forward(self, task, model_client_kwargs): import asyncio import logging import json @@ -60,9 +60,9 @@ # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( - model=agent_model_kwargs['model'], - api_version=agent_model_kwargs['api_version'], - azure_endpoint=agent_model_kwargs['azure_endpoint'], + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], azure_ad_token_provider=token_provider, model_capabilities={ "vision": True, @@ -116,6 +116,7 @@ async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> # Define the main function to set up and run the agent system async def main(): + # Create a queue to collect final answer queue = asyncio.Queue[FinalResult]() async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None: await queue.put(message) @@ -134,11 +135,15 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul instruction=cot_instruction, ) ) + # Create closure agent to collect final output result await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) + # Start the runtime, and publish the first message runtime.start() initial_message = ChainOfThoughtTask(task=task) await runtime.send_message(initial_message, agent_id) # publish_message + + # Keep processing messages until idle. await runtime.stop_when_idle() # Return the first answer from the queue @@ -151,7 +156,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul COT_SC = { "thought": "While an LLM can arrive at the correct answer, its reasoning may vary. By repeatedly asking the same question with high temperature settings, we can generate different reasoning paths. We then combine multiple answers from these Chain-of-Thought (CoT) agents to produce a more accurate final answer through ensembling.", "name": "Self-Consistency with Chain-of-Thought", - "code": """def forward(self, task, agent_model_kwargs): + "code": """def forward(self, task, model_client_kwargs): import asyncio import logging import json @@ -174,9 +179,9 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( - model=agent_model_kwargs['model'], - api_version=agent_model_kwargs['api_version'], - azure_endpoint=agent_model_kwargs['azure_endpoint'], + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], azure_ad_token_provider=token_provider, model_capabilities={ "vision": True, @@ -284,20 +289,24 @@ async def main(): # Initialize the agent runtime runtime = SingleThreadedAgentRuntime() + # Create the agents cot_instruction = "Please think step by step and then solve the task." await WorkerAgent.register( - runtime, "worker", lambda: WorkerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), instruction=cot_instruction) + runtime, "worker", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction) ) await OrchestratorAgent.register( runtime, "orchestrator", lambda: OrchestratorAgent( - model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), worker_agent_types=["worker"] * 5, num_layers=1 + model_client=model_client, worker_agent_types=["worker"] * 5, num_layers=1 ), ) + # Start the runtime, and publish the first message runtime.start() result = await runtime.send_message(UserTask(task=task), AgentId("orchestrator", "default")) + + # Return the result return result.result return asyncio.run(main()) @@ -307,7 +316,7 @@ async def main(): Reflexion = { "thought": "To enhance its performance, an LLM can iteratively improve its answer based on feedback. By reflecting on its previous attempts and incorporating feedback, the model can refine its reasoning and provide a more accurate solution.", "name": "Self-Refine (Reflexion)", - "code": '''def forward(self, task, agent_model_kwargs): + "code": '''def forward(self, task, model_client_kwargs): import asyncio import json import logging @@ -334,9 +343,9 @@ async def main(): # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( - model=agent_model_kwargs['model'], - api_version=agent_model_kwargs['api_version'], - azure_endpoint=agent_model_kwargs['azure_endpoint'], + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], azure_ad_token_provider=token_provider, model_capabilities={ "vision": True, @@ -409,8 +418,6 @@ async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) - assert isinstance(response.content, str) # Extract the answer from the response. answer = self._extract_answer(response.content) - if answer is None: - raise ValueError("Answer not found.") # Create a review task. review_task = ReviewTask( session_id=session_id, @@ -468,8 +475,6 @@ async def handle_review_result(self, message: ReviewResult, ctx: MessageContext) assert isinstance(response.content, str) # Extract the answer from the response. answer = self._extract_answer(response.content) - if answer is None: - raise ValueError("Answer not found.") # Create a new review task. review_task = ReviewTask( session_id=message.session_id, @@ -564,21 +569,27 @@ async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> # Define the main function to set up and run the agent system async def main(): + # Create a queue to collect final answer queue = asyncio.Queue[WritingResult]() async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None: await queue.put(message) + # Initialize the agent runtime runtime = SingleThreadedAgentRuntime() + + # Create agents await ReviewerAgent.register( - runtime, "ReviewerAgent", lambda: ReviewerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini")) + runtime, "ReviewerAgent", lambda: ReviewerAgent(model_client=model_client) ) cot_instruction = "Please think step by step and then solve the task." await WorkerAgent.register( - runtime, "WorkerAgent", lambda: WorkerAgent(model_client=get_chat_completion_client_from_envs(model="gpt-4o-mini"), instruction=cot_instruction) + runtime, "WorkerAgent", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction) ) + # Create closure agent to collect final output result result_topic = TypeSubscription(topic_type="result", agent_type="output_result") await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + # Start the runtime, and publish the first message runtime.start() await runtime.publish_message( message=WritingTask(task=task), @@ -588,7 +599,8 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes # Keep processing messages until idle. await runtime.stop_when_idle() - # Return the answer from the queue + # Return the first answer from the queue + print(f"queue {queue}") return (await queue.get()).answer return asyncio.run(main()) @@ -721,7 +733,13 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes """ } -system_prompt = """You are a helpful assistant. Make sure to return in a WELL-FORMED JSON object.""" +system_prompt = lambda formatted_documentation: f"""You are a helpful assistant. You have an expert understanding of the AutoGen framework, and how to use the Python API. The API documentation are as follows: + +{formatted_documentation} + +This is the end of the documentation. + +Make sure to return in a WELL-FORMED JSON object. Do not add any code blocks around the JSON object.""" base = """# Overview You are an expert machine learning researcher testing various agentic systems. Your objective is to design building blocks such as prompts and control flows within these systems to solve complex tasks. Your aim is to design an optimal agent performing well on the Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs (DROP), which assesses the ability to perform discrete reasoning and comprehend detailed information across multiple paragraphs. @@ -738,176 +756,23 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes # The utility code: ```python -from collections import namedtuple -from typing import Union -import numpy as np -import json - -import openai -import backoff -from utils import random_id - -# Initialize the OpenAI client -client = openai.OpenAI() -# Named tuple for holding task information Info = namedtuple('Info', ['name', 'author', 'content', 'iteration_idx']) -# Format instructions for LLM response -FORMAT_INST = lambda request_keys: f"Reply EXACTLY with the following JSON format.\n{str(request_keys)}\nDO NOT MISS ANY FIELDS AND MAKE SURE THE JSON FORMAT IS CORRECT!\n" - -# Description of the role for the LLM -ROLE_DESC = lambda role: f"You are a {role}." - -@backoff.on_exception(backoff.expo, openai.RateLimitError) -def get_json_response_from_gpt(msg, model, system_message, temperature=0.5): - \""" - Function to get JSON response from GPT model. - - Args: - - msg (str): The user message. - - model (str): The model to use. - - system_message (str): The system message. - - temperature (float): Sampling temperature. - - Returns: - - dict: The JSON response. - \""" - response = client.chat.completions.create( - model=model, - messages=[ - {"role": "system", "content": system_message}, - {"role": "user", "content": msg}, - ], - temperature=temperature, - max_tokens=1024, - stop=None, - response_format={"type": "json_object"} - ) - content = response.choices[0].message.content - json_dict = json.loads(content) - return json_dict - -class LLMAgentBase: - \""" - Base class for an LLM agent. - - Attributes: - - output_fields (list): Fields expected in the output. - - agent_name (str): Name of the agent. - - role (str): Role description for the agent. - - model (str): Model to be used. (option. Keep it default.) - - temperature (float): Sampling temperature. - - id (str): Unique identifier for the agent instance. - \""" - - def __init__(self, output_fields: list, agent_name: str, role='helpful assistant', model='gpt-3.5-turbo-0125', temperature=0.5) -> None: - self.output_fields = output_fields - self.agent_name = agent_name - self.role = role - self.model = model - self.temperature = temperature - self.id = random_id() - - def generate_prompt(self, input_infos, instruction) -> str: - \""" - Generates a prompt for the LLM. - - Args: - - input_infos (list): List of input information. - - instruction (str): Instruction for the task. - - Returns: - - tuple: System prompt and user prompt. - - An example of a generated prompt: - "" - You are a helpful assistant. - - # Output Format: - Reply EXACTLY with the following JSON format. - ... - - # Your Task: - You will be given some number of paired example inputs and outputs. The outputs ... - - ### thinking #1 by Chain-of-Thought Agent hkFo (yourself): - ... - - ### code #1 by Chain-of-Thought Agent hkFo (yourself): - ... - - ### answer by Chain-of-Thought Agent hkFo's code evaluator:... - - - # Instruction: - Please think step by step and then solve the task by writing the code. - "" - \""" - output_fields_and_description = {key: f"Your {key}." if not 'answer' in key else f"Your {key}. Return ONLY the alphabet choice, i.e. A or B or C or D." for key in self.output_fields} - system_prompt = ROLE_DESC(self.role) + "\n\n" + FORMAT_INST(output_fields_and_description) - - input_infos_text = '' - for input_info in input_infos: - if isinstance(input_info, Info): - (field_name, author, content, iteration_idx) = input_info - else: - continue - if author == self.__repr__(): - author += ' (yourself)' - if field_name == 'task': - input_infos_text += f'# Your Task:\n{content}\n\n' - elif iteration_idx != -1: - input_infos_text += f'### {field_name} #{iteration_idx+1} by {author}:\n{content}\n\n' - else: - input_infos_text += f'### {field_name} by {author}:\n{content}\n\n' - - prompt = input_infos_text + instruction - return system_prompt, prompt - - def query(self, input_infos: list, instruction, iteration_idx=-1) -> list[Info]: - \""" - Queries the LLM with provided input information and instruction. - - Args: - - input_infos (list): List of input information. - - instruction (str): Instruction for the task. - - iteration_idx (int): Iteration index for the task. - - Returns: - - output_infos (list[Info]): Output information. - \""" - system_prompt, prompt = self.generate_prompt(input_infos, instruction) - response_json = get_json_response_from_gpt(prompt, self.model, system_prompt, self.temperature) - - output_infos = [] - for key, value in response_json.items(): - info = Info(key, self.__repr__(), value, iteration_idx) - output_infos.append(info) - return output_infos - - def __repr__(self): - return f"{self.agent_name} {self.id}" - - def __call__(self, input_infos: list, instruction, iteration_idx=-1): - # Note: - # The output of the LLM is a list of Info. If you are only querying one output, you should access it with [0]. - # It is a good practice to always include 'thinking' in the output. - return self.query(input_infos, instruction, iteration_idx=iteration_idx) - class AgentArchitecture: \""" Fill in your code here. \""" - def forward(self, taskInfo) -> Union[Info, str]: + def forward(self, task, model_client_kwargs) -> str: \""" Placeholder method for processing task information. Args: - - taskInfo (Info): Task information. + - task (Info): Task information. + - model_client_kwargs (Dict): Information for the AzureOpenAIChatCompletionClient Returns: - - Answer (Union[Info, str]): Your FINAL Answer. Return either a namedtuple Info or a string of answers. + - Answer (str): Your FINAL Answer. Return a string of answers. \""" pass ``` @@ -928,61 +793,300 @@ def forward(self, taskInfo) -> Union[Info, str]: [EXAMPLE] You must use the exact function interface used above. You need to specify the instruction, input information, and the required output fields for various LLM agents to do their specific part of the architecture. -Also, it could be helpful to set the LLM’s role and temperature to further control the LLM’s response. Note that the LLMAgentBase() will automatically parse the output and return a list of “Infos”. You can get the content by Infos.content. -DO NOT FORGET the taskInfo input to LLM if you think it is needed, otherwise LLM will not know about the task. +Also, it could be helpful to set the LLM’s role to further control the LLM’s response. +DO NOT FORGET the `task` input to LLM if you think it is needed, otherwise LLM will not know about the task. ## WRONG Implementation examples: Here are some mistakes you may make: 1. This is WRONG: ``` -feedback, correct = critic_agent([taskInfo, thinking, answer], critic_instruction, i) -feedback_info = verifier_agent([taskInfo, Info('feedback', 'Critic Agent', thinking, 0)], verification_instruction) + +@default_subscription +class WorkerAgent(RoutedAgent): + def __init__(self): + pass + + @message_handler + async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) -> None: + pass + +async def main(): + # Create a queue to collect final answer + queue = asyncio.Queue[FinalResult]() + async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None: + await queue.put(message) + + runtime = SingleThreadedAgentRuntime() + await ReviewerAgent.register( + runtime, "ReviewerAgent", lambda: ReviewerAgent(model_client=model_client) + ) + cot_instruction = "Please think step by step and then solve the task." + await WorkerAgent.register( + runtime, "WorkerAgent", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction) + ) + # Create closure agent to collect final output result + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) + + runtime.start() + await runtime.publish_message( + message=WritingTask(task=task), + topic_id=DefaultTopicId(), + ) + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + # Return the answer from the queue + return (await queue.get()).answer + +return asyncio.run(main()) ``` -It is wrong to use "Info('feedback', 'Critic Agent', thinking, 0)". The returned "feedback" from LLMAgentBase is already Info. +Because the WorkerAgent is subscribed to the `@default_subscription` topic, then there will be conflicts for the ClosureAgent to collect the WritingResult from the same default subscription. Create a new topic using TypeSubscription(topic_type="result", agent_type="output_result") to make this work. 2. This is WRONG: ``` -# Debugging: Log the generated answer -print('Generated Answer:', ...) -feedback_info = verifier_agent([taskInfo, Info('feedback', 'Critic Agent', thinking, 0)], verification_instruction) -if len(feedback_info) < 3: # Check if feedback_info has enough elements - return 'Error: Feedback info incomplete' +async def main(): + + # Initialize the agent runtime + runtime = SingleThreadedAgentRuntime() + + # Create the agents + cot_instruction = "Please think step by step and then solve the task." + await WorkerAgent.register( + runtime, "worker", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction) + ) + await OrchestratorAgent.register( + runtime, + "orchestrator", + lambda: OrchestratorAgent( + model_client=model_client, worker_agent_types=["worker"] * 5, num_layers=1 + ), + ) + + # Start the runtime, and publish the first message + runtime.start() + result = await runtime.send_message(UserTask(task=task), AgentId("orchestrator", "default")) + + # Return the result + return result.result + +return main() ``` -First, the len(feedback_info) will not work. -Second, you should never return an error message. You should always return the best answer you can get. -Third, you should never print anything in the code. -Lastly, again, DO NOT CREATE Info object by yourself. +The `main()` function needs to be called with `asyncio.run(main())` 3. This is WRONG: ``` -all_thinking = [] -all_answers = [] -for agent, role in zip(agents, roles): - outputs = agent([taskInfo], independent_reasoning_instruction.format(role=role)) - all_thinking.append(outputs[0].content) - all_answers.append(outputs[1].content) - -# Aggregate the reasoning paths and answers -aggregated_thinking = '\n'.join(all_thinking) -aggregated_answers = '\n'.join(all_answers) +# Define the Chain-of-Thought Agent +class ChainOfThoughtAgent(RoutedAgent): + def __init__(self, description: str, + model_client: ChatCompletionClient, + system_prompt: str, + instruction: str, + ) -> None: + super().__init__(description) + self._system_messages: List[LLMMessage] = [ + SystemMessage( + content=system_prompt, + ) + ] + self._model_client = model_client + self._instruction = instruction + + @message_handler + async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> FinalResult: + + logging.info(f"{self._description} received message: {message.task}") + user_prompt = message.task + "\\n" + self._instruction + msgs = self._system_messages + [UserMessage(content=user_prompt, source=self.metadata["type"])] + model_result = await self._model_client.create(msgs) + assert isinstance(model_result.content, str) + + await self.publish_message( + message=FinalResult(model_result.content), + topic_id=DefaultTopicId(), + ) ``` -You SHOULD NOT extract the content from the Info object by yourself. You should use the Info object directly. If you want to aggregate the content, you should just put those Info objects into a list and then use the list as input to the next LLM agent. +Any call with `self.publish_message()` will always return None, so make sure to set the output type of the `handle_task` function as `None`. Example: `async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> None:`. 4. This is WRONG: ``` -reasoning_agent = LLMAgentBase(['thinking', 'answer'], 'Reasoning Agent') -response_infos = reasoning_agent([taskInfo] + ..., reasoning_instruction) +class OrchestratorAgent(RoutedAgent): + def __init__( + self, + model_client: ChatCompletionClient, + worker_agent_types: List[str], + num_layers: int, + ) -> None: + super().__init__(description="Aggregator Agent") + self._model_client = model_client + self._worker_agent_types = worker_agent_types + self._num_layers = num_layers + + + @message_handler + async def handle_task(self, message: UserTask, ctx: MessageContext) -> None: + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}") + # Create task for the first layer. + worker_task = WorkerTask(task=message.task, previous_results=[]) + # Iterate over layers. + for i in range(self._num_layers): + # Assign workers for this layer. + worker_ids = [ + AgentId(worker_type, f"{self.id.key}/layer_{i}/worker_{j}") + for j, worker_type in enumerate(self._worker_agent_types) + ] + # Dispatch tasks to workers. + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}") + results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids]) + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}") + # Prepare task for the next layer. + worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results]) + # Perform final aggregation. + print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation") + # system_prompt = "You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:" + system_prompt = "Given all the above solutions, reason over them carefully and provide a final answer." + system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(worker_task.previous_results)]) + model_result = await self._model_client.create( + [SystemMessage(system_prompt), UserMessage(content=message.task, source="user")] + ) + assert isinstance(model_result.content, str) + return FinalResult(result=model_result.content) +``` +Directly returning a message dataclass `FinalResult` requires setting the return type of the `handle_task` function to return `FinalResult`. Example: `async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:`. + +5. This is WRONG: ``` + # Main orchestration + async def main(): + runtime = SingleThreadedAgentRuntime() + + # Register agents + await RetrieverAgent.register(runtime, "retriever_agent") + await ValidatorAgent.register(runtime, "validator_agent") + await ReasoningAgent.register(runtime, "reasoning_agent", lambda: ReasoningAgent(model_client=model_client)) + + # Start runtime + runtime.start() + task_data = task.content if isinstance(task, Info) else task # Assuming task contains raw question + await runtime.publish_message(task_data, AgentId("retriever_agent", "default")) + + # Stop when idle + await runtime.stop_when_idle() + + return asyncio.run(main()) +``` +The first argument into `publish_message` or `send_message` should not be an `Info` object or any other object. It must be a Message dataclass, which has the format similar to: ``` +@dataclass +class Message: + content: str +``` + +6. This is WRONG: ``` +await ctx.publish(AdaptiveResult(result=response.content), topic_id=ctx.default_topic_id()) +``` +Publishing should be called with `self.publish_message()`. + +7. This is WRONG: ``` +await ClosureAgent.register(runtime, "final_collection", collect_final_result, subscriptions=[TypeSubscription("consensus_result", "consensus_agent")]) +``` +The argument passed to `subscriptions` should not be a list. It should be a lambda function to a list. For example: ``` +await ClosureAgent.register(runtime, "final_collection", collect_final_result, subscriptions=lambda: [TypeSubscription("consensus_result", "consensus_agent")]) +``` + +8. This is WRONG: ``` +await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=TypeSubscription("initial_task", "worker_agent").topic_id()) +``` +The `topic_id` needs to be a `TopicId` with or `DefaultTopicId` object. For example: ``` +await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=TopicId(topic_type, source=self.id.key)) +``` +or ``` +await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=TopicId(user_topic_type, source=session_id)) +``` +or ``` +await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=DefaultTopicId()) +``` + +8. This is WRONG: ``` +await OrchestratorAgent.register(runtime, "orchestrator") +``` +You will encounter this error "TypeError: BaseAgent.register() missing 1 required positional argument: 'factory'". The correct solution is: ``` +await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) +``` + +9 This is WRONG: ``` +class OrchestratorAgent(RoutedAgent): + pass -# Extract the final answer from the response_infos -for info in response_infos: - if info.name == 'final_answer': - return info -# Fallback if no answer is found -return Info('answer', 'Final Decision Agent', 'No answer generated.', 0) +async def main(): + await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) + + await runtime.publish_message( + message=DiverseThoughtTask(task='What is the most creative art medium?'), + topic_id=TopicId("diverse", "orchestrator") + ) +``` +You must register subscriptions with the agent runtime through the `add_subscription` method. +``` +async def main(): + await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) + await runtime.add_subscription(TypeSubscription("orchestrator_type", "orchestrator")) + + await runtime.publish_message( + message=DiverseThoughtTask(task='What is the most creative art medium?'), + topic_id=TopicId(type="orchestrator_type") + ) ``` -You should not extract the final answer by yourself. You SHOULD directly return the answer Info. Also, you should always return the best answer you can get. -CORRECT example: ``` -reasoning_agent = LLMAgentBase(['thinking', 'answer'], 'Reasoning Agent') -thinking, answer = reasoning_agent([taskInfo] + ..., reasoning_instruction) -return answer +Now, you can publish directly to a specific topic through the runtime. + +## CORRECT Implementation examples: +Here are some correct patterns you should follow: + +1. This is CORRECT: ``` +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + +# Create an AzureOpenAI model client. +model_client = AzureOpenAIChatCompletionClient( + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, +) ``` +Creating the model client using the model_client_kwargs dictionary. + +2. This is CORRECT: ``` + async def main(): + # Create a queue to collect final answer + queue = asyncio.Queue[WritingResult]() + async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None: + await queue.put(message) + + # Initialize the agent runtime + runtime = SingleThreadedAgentRuntime() + + # Create agents + + # Create closure agent to collect final output result + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + + # Start the runtime, and publish the first message + runtime.start() + await runtime.publish_message() + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + # Return the first answer from the queue + print(f"queue {queue}") + return (await queue.get()).answer + + return asyncio.run(main()) +``` +This is the format for the `main` function. Make sure that when creating a `ClosureAgent`, you have created `queue` from which you can call `return (await queue.get()).answer` at the very end of the `main` function. The datatype of the Queue should be the final message that the agent system publishes to indicate that the system is terminating. +The `result_topic` should have a unique `topic_type`, which can be called "result". # Your task You are deeply familiar with prompting techniques and the agent works from the literature. Your goal is to maximize the specified performance metrics by proposing interestingly new agents. @@ -992,6 +1096,9 @@ def forward(self, taskInfo) -> Union[Info, str]: THINK OUTSIDE THE BOX. """ +# Documentation: https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/docs/src/user-guide/core-user-guide + + Reflexion_prompt_1 = f""""[EXAMPLE]Carefully review the proposed new architecture and reflect on the following points: 1. **Interestingness**: Assess whether your proposed architecture is interesting or innovative compared to existing methods in the archive. If you determine that the proposed architecture is not interesting, suggest a new architecture that addresses these shortcomings. @@ -1029,7 +1136,8 @@ def forward(self, taskInfo) -> Union[Info, str]: def get_init_archive(): # return [COT]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] # return [COT_SC]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] - return [Reflexion]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] + # return [Reflexion]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] + return [COT, COT_SC, Reflexion] # LLM_debate, Take_a_step_back, QD, Role_Assignment] From 4b4a713620cc4893622de92ad7bad06d24bf6ca8 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 22 Nov 2024 15:31:58 -0500 Subject: [PATCH 03/21] update --- .../autogen-core/samples/common/adas/adas.py | 2 +- .../samples/common/adas/adas_prompt.py | 330 ++++++++++++++++-- 2 files changed, 302 insertions(+), 30 deletions(-) diff --git a/python/packages/autogen-core/samples/common/adas/adas.py b/python/packages/autogen-core/samples/common/adas/adas.py index 7bd4cc2b7791..ff7c21666c33 100644 --- a/python/packages/autogen-core/samples/common/adas/adas.py +++ b/python/packages/autogen-core/samples/common/adas/adas.py @@ -71,7 +71,7 @@ def print_repo_contents(repo, path="", indent=""): if content_file.type == "dir": documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) else: - if content_file.download_url.endswith('.md'): + if content_file.download_url.endswith('.md') or content_file.download_url.endswith('.ipynb'): print(f"Reading file from {content_file.download_url}") f = read_github_file(content_file.download_url) documentation.append("Title: " + content_file.name + "\nContents:\n" + f) diff --git a/python/packages/autogen-core/samples/common/adas/adas_prompt.py b/python/packages/autogen-core/samples/common/adas/adas_prompt.py index 0b577fbf2409..11889c088883 100644 --- a/python/packages/autogen-core/samples/common/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/common/adas/adas_prompt.py @@ -610,39 +610,262 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes LLM_debate = { "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", "name": "LLM Debate", - "code": """def forward(self, taskInfo): - # Instruction for initial reasoning - debate_initial_instruction = "Please think step by step and then solve the task." + "code": '''def forward(self, task, model_client_kwargs): + import asyncio + import json + import logging + import re + import sys + import uuid + from dataclasses import dataclass + from typing import Dict, List, Union + from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime + from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription + from autogen_core.components.models import ( + AssistantMessage, + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, + ) + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") - # Instruction for debating and updating the solution based on other agents' solutions - debate_instruction = "Given solutions to the problem from other agents, consider their opinions as additional advice. Please think carefully and provide an updated answer." + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) - # Initialize debate agents with different roles and a moderate temperature for varied reasoning - debate_agents = [LLMAgentBase(['thinking', 'answer'], 'Debate Agent', temperature=0.8, role=role) for role in ['Reading Comprehension Specialist', 'Logical Reasoning Strategist', 'Multidisciplinary Knowledge Integrator']] + @dataclass + class Question: + content: str - # Instruction for final decision-making based on all debates and solutions - final_decision_instruction = "Given all the above thinking and answers, reason over them carefully and provide a final answer." - final_decision_agent = LLMAgentBase(['thinking', 'answer'], 'Final Decision Agent', temperature=0.1) - max_round = 2 # Maximum number of debate rounds - all_thinking = [[] for _ in range(max_round)] - all_answer = [[] for _ in range(max_round)] + @dataclass + class Answer: + content: str + + + @dataclass + class SolverRequest: + content: str + question: str + + + @dataclass + class IntermediateSolverResponse: + content: str + question: str + answer: str + round: int + + + @dataclass + class FinalSolverResponse: + answer: str + + @default_subscription + class Solver(RoutedAgent): + def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None: + super().__init__("A debator.") + self._topic_type = topic_type + self._model_client = model_client + self._num_neighbors = num_neighbors + self._history: List[LLMMessage] = [] + self._buffer: Dict[int, List[IntermediateSolverResponse]] = {} + self._system_messages = [ + SystemMessage( + ( + "You are a helpful assistant with expertise in reasoning. " + "Your task is to assist in solving a reasoning problem by providing " + "a clear and detailed solution. Limit your output within 100 words, " + "and your final answer should be a single string." + ) + ) + ] + self._round = 0 + self._max_round = max_round - # Perform debate rounds - for r in range(max_round): - for i in range(len(debate_agents)): - if r == 0: - thinking, answer = debate_agents[i]([taskInfo], debate_initial_instruction) + @message_handler + async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None: + # Add the question to the memory. + self._history.append(UserMessage(content=message.content, source="user")) + # Make an inference using the model. + model_result = await self._model_client.create(self._system_messages + self._history) + assert isinstance(model_result.content, str) + # Add the response to the memory. + self._history.append(AssistantMessage(content=model_result.content, source=self.metadata["type"])) + print(f"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}") + # Increment the counter. + self._round += 1 + if self._round == self._max_round: + # If the counter reaches the maximum round, publishes a final response. + await self.publish_message(FinalSolverResponse(answer=model_result.content), topic_id=DefaultTopicId()) else: - input_infos = [taskInfo] + [all_thinking[r-1][i]] + all_thinking[r-1][:i] + all_thinking[r-1][i+1:] - thinking, answer = debate_agents[i](input_infos, debate_instruction) - all_thinking[r].append(thinking) - all_answer[r].append(answer) - - # Make the final decision based on all debate results and solutions - thinking, answer = final_decision_agent([taskInfo] + all_thinking[max_round-1] + all_answer[max_round-1], final_decision_instruction) - return answer -""" + # Publish intermediate response to the topic associated with this solver. + print("publish IntermediateSolverResponse") + await self.publish_message( + IntermediateSolverResponse( + content=model_result.content, + question=message.question, + answer=model_result.content, + round=self._round, + ), + topic_id=DefaultTopicId(type=self._topic_type), + ) + + @message_handler + async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None: + # Add neighbor's response to the buffer. + self._buffer.setdefault(message.round, []).append(message) + # Check if all neighbors have responded. + if len(self._buffer[message.round]) == self._num_neighbors: + print( + f"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors." + ) + # Prepare the prompt for the next question. + prompt = "These are the solutions to the problem from other agents:\\n" + for resp in self._buffer[message.round]: + prompt += f"One agent solution: {resp.content}\\n" + prompt += ( + "Using the solutions from other agents as additional information, " + "can you provide your answer to the problem? " + f"The original problem is {message.question}. " + "Your final answer should be a single string." + ) + # Send the question to the agent itself to solve. + await self.send_message(SolverRequest(content=prompt, question=message.question), self.id) + # Clear the buffer. + self._buffer.pop(message.round) + + + @default_subscription + class Aggregator(RoutedAgent): + def __init__(self, num_solvers: int) -> None: + super().__init__("Aggregator") + self._num_solvers = num_solvers + self._buffer: List[FinalSolverResponse] = [] + + @message_handler + async def handle_question(self, message: Question, ctx: MessageContext) -> None: + print(f"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}") + prompt = ( + f"Can you solve the following problem?\\n{message.content}\\n" + "Explain your reasoning. Your final answer should be a single string." + ) + print(f"{'-'*80}\\nAggregator {self.id} publishes initial solver request.") + await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId()) + + @message_handler + async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None: + self._buffer.append(message) + if len(self._buffer) == self._num_solvers: + print(f"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.") + # Find the majority answer. + answers = [resp.answer for resp in self._buffer] + majority_answer = max(set(answers), key=answers.count) + # Publish the aggregated response. + await self.publish_message(Answer(content=majority_answer), topic_id=TopicId("result", self.id.key)) + # Clear the responses. + self._buffer.clear() + print(f"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}") + + + # Define the main function to set up and run the agent system + async def main(): + queue = asyncio.Queue[Answer]() + async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + await queue.put(message) + + runtime = SingleThreadedAgentRuntime() + await Solver.register( + runtime, + "SolverA", + lambda: Solver( + model_client=model_client, + topic_type="SolverA", + num_neighbors=2, + max_round=3, + ), + ) + await Solver.register( + runtime, + "SolverB", + lambda: Solver( + model_client=model_client, + topic_type="SolverB", + num_neighbors=2, + max_round=3, + ), + ) + await Solver.register( + runtime, + "SolverC", + lambda: Solver( + model_client=model_client, + topic_type="SolverC", + num_neighbors=2, + max_round=3, + ), + ) + await Solver.register( + runtime, + "SolverD", + lambda: Solver( + model_client=model_client, + topic_type="SolverD", + num_neighbors=2, + max_round=3, + ), + ) + await Aggregator.register(runtime, "Aggregator", lambda: Aggregator(num_solvers=4)) + + # Subscriptions for topic published to by SolverA. + await runtime.add_subscription(TypeSubscription("SolverA", "SolverD")) + await runtime.add_subscription(TypeSubscription("SolverA", "SolverB")) + + # Subscriptions for topic published to by SolverB. + await runtime.add_subscription(TypeSubscription("SolverB", "SolverA")) + await runtime.add_subscription(TypeSubscription("SolverB", "SolverC")) + + # Subscriptions for topic published to by SolverC. + await runtime.add_subscription(TypeSubscription("SolverC", "SolverB")) + await runtime.add_subscription(TypeSubscription("SolverC", "SolverD")) + + # Subscriptions for topic published to by SolverD. + await runtime.add_subscription(TypeSubscription("SolverD", "SolverC")) + await runtime.add_subscription(TypeSubscription("SolverD", "SolverA")) + + # All solvers and the aggregator subscribe to the default topic. + + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + + runtime.start() + await runtime.publish_message(Question(content=task), DefaultTopicId()) + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + # Return the answer from the queue + res = (await queue.get()).content + print(f"res {res}") + return res + + return asyncio.run(main()) +''' } Take_a_step_back = {"thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", @@ -1032,8 +1255,57 @@ async def main(): topic_id=TopicId(type="orchestrator_type") ) ``` +Or use the `type_subscription()` class decorator on the agent. +``` +@type_subscription(topic_type="orchestrator_type") +class OrchestratorAgent(RoutedAgent): + pass + +async def main(): + await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) + + await runtime.publish_message( + message=DiverseThoughtTask(task='What is the most creative art medium?'), + topic_id=TopicId(type="orchestrator_type") + ) +``` Now, you can publish directly to a specific topic through the runtime. +10. This is WRONG: ``` +class OrchestratorAgent(RoutedAgent): + pass + +async def main(): + await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) + + await runtime.publish_message( + message=DiverseThoughtTask(task='What is the most creative art medium?'), + topic_id=DefaultTopicId() + ) +``` +When there is a single scope of publishing, that is, all agents publish and subscribe to all broadcasted messages, we can use the convenience classes `DefaultTopicId` and `default_subscription()` to simplify our code. +Use the `default_subscription` class decorator on the agent. +``` +@default_subscription +class OrchestratorAgent(RoutedAgent): + pass + +async def main(): + await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) + + await runtime.publish_message( + message=DiverseThoughtTask(task='What is the most creative art medium?'), + topic_id=DefaultTopicId() + ) +``` + +11. This is WRONG: ``` +await runtime.publish_message(DiverseThoughtTask(task='Who is the most creative composer?'), AgentId("consensus_agent", "default")) +``` +The `publish_message` should publish to a topic. Use `TopicId` or `DefaultTopicId`. For example: ``` +await runtime.publish_message(DiverseThoughtTask(task='Who is the most creative composer?'), TopicId("consensus_agent", "default")) +``` + ## CORRECT Implementation examples: Here are some correct patterns you should follow: @@ -1137,8 +1409,8 @@ def get_init_archive(): # return [COT]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] # return [COT_SC]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] # return [Reflexion]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] - return [COT, COT_SC, Reflexion] # LLM_debate, Take_a_step_back, QD, Role_Assignment] - + # return [COT, COT_SC, Reflexion] # LLM_debate, Take_a_step_back, QD, Role_Assignment] + return [LLM_debate] def get_prompt(current_archive, adaptive=False): From dc872f4552e75b416bcddac6c41f8607d8bbf4b1 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Dec 2024 09:55:06 -0500 Subject: [PATCH 04/21] Add reflection prompts; Clean up --- .../autogen-core/samples/common/adas/adas.py | 380 +++++++++--------- .../samples/common/adas/adas_prompt.py | 257 ++++++++---- 2 files changed, 378 insertions(+), 259 deletions(-) diff --git a/python/packages/autogen-core/samples/common/adas/adas.py b/python/packages/autogen-core/samples/common/adas/adas.py index ff7c21666c33..2e44ff2c478b 100644 --- a/python/packages/autogen-core/samples/common/adas/adas.py +++ b/python/packages/autogen-core/samples/common/adas/adas.py @@ -1,32 +1,28 @@ +""" +To run, type +`python packages/autogen-core/samples/common/adas/adas.py --data_filename=` - - - +""" import argparse import asyncio -import os -import logging import json -import re +import logging +import os +import random +import time import uuid -import pickle +import numpy as np from dataclasses import dataclass -from typing import Dict, List, Union +from typing import Dict, List from collections import namedtuple from concurrent.futures import ThreadPoolExecutor from tqdm import tqdm -import threading -import random -import numpy as np -import requests -from github import Github from autogen_core.components import RoutedAgent, default_subscription, message_handler from autogen_core.application import SingleThreadedAgentRuntime -from autogen_core.base import AgentId, AgentType, AgentRuntime, CancellationToken, MessageContext, TopicId +from autogen_core.base import MessageContext from autogen_core.components import DefaultTopicId -from autogen_core.components.code_executor import CodeBlock, CodeExecutor, extract_markdown_code_blocks from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -34,13 +30,10 @@ SystemMessage, UserMessage, ) -from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop -from autogen_core.components.tools import FunctionTool, PythonCodeExecutionTool, ToolSchema -from autogen_ext.code_executors import DockerCommandLineCodeExecutor #, extract_markdown_code_blocks -from autogen_magentic_one.utils import LogHandler # TODO fix imports import sys + sys.path.append("/home/andyye/autogen/python/packages/autogen-core/samples/") from common.utils import get_chat_completion_client_from_envs @@ -51,64 +44,20 @@ logging.basicConfig(level=logging.WARNING) logging.getLogger("autogen_core").setLevel(logging.DEBUG) -Info = namedtuple('Info', ['name', 'author', 'content', 'iteration_idx']) +Info = namedtuple("Info", ["name", "author", "content", "iteration_idx"]) SEARCHING_MODE = True -def read_github_file(url): - response = requests.get(url) - if response.status_code == 200: - return response.text - else: - return None - - -def print_repo_contents(repo, path="", indent=""): - contents = repo.get_contents(path) - documentation = [] - for content_file in contents: - if content_file.type == "dir": - documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) - else: - if content_file.download_url.endswith('.md') or content_file.download_url.endswith('.ipynb'): - print(f"Reading file from {content_file.download_url}") - f = read_github_file(content_file.download_url) - documentation.append("Title: " + content_file.name + "\nContents:\n" + f) - return documentation - - -def get_autogen_documentation(): - repo_name = "microsoft/autogen" - directory_name = "python/packages/autogen-core/docs/src/user-guide/core-user-guide" - g = Github() - - subdirectories = ['core-concepts', 'framework'] - documentation = [] - for subdir in subdirectories: - try: - repo = g.get_repo(repo_name) - documentation.extend(print_repo_contents(repo, directory_name + '/'+ subdir)) - except Exception as e: - print(f"Error: {e}") - print(f"Found {len(documentation)} pages of documentation") - return documentation - - @dataclass class ADASTask: task: str + @dataclass class ADASResult: result: str -@dataclass -class ReflectTask: - session_id: str - task: str - thought: str - @dataclass class LLMMessageList: @@ -131,42 +80,39 @@ class LLMAgentBaseTask: role: str -@dataclass -class LLMAgentBaseResponse: - output: str - - @dataclass class Message: content: str -class AgentSystem(): +class AgentSystem: def __init__(self) -> None: pass + def generate_task(input_infos) -> str: # construct input infos text - input_infos_text = '' + input_infos_text = "" for input_info in input_infos: if isinstance(input_info, Info): (field_name, author, content, iteration_idx) = input_info else: continue - if field_name == 'task': - input_infos_text += f'# Your Task:\n{content}\n\n' + if field_name == "task": + input_infos_text += f"# Your Task:\n{content}\n\n" elif iteration_idx != -1: # input_infos_text += f'### {field_name} #{iteration_idx + 1} by {author}:\n{content}\n\n' - input_infos_text += f'### {field_name} #{iteration_idx + 1}:\n{content}\n\n' + input_infos_text += f"### {field_name} #{iteration_idx + 1}:\n{content}\n\n" else: # input_infos_text += f'### {field_name} by {author}:\n{content}\n\n' - input_infos_text += f'### {field_name}:\n{content}\n\n' + input_infos_text += f"### {field_name}:\n{content}\n\n" prompt = input_infos_text + "# Instruction: \n" return prompt + def evaluate_forward_fn(args, forward_str): # dynamically define forward() # modified from https://github.com/luchris429/DiscoPOP/blob/main/scripts/launch_evo.py @@ -182,24 +128,28 @@ def evaluate_forward_fn(args, forward_str): setattr(AgentSystem, "forward", func) # set seed 0 for valid set - examples = load_drop(args.data_filename)[1:-1] # first one and the last one is for few-shot examples + examples = load_drop(args.data_filename)[ + 1:-1 + ] # first one and the last one is for few-shot examples random.seed(args.shuffle_seed) random.shuffle(examples) if SEARCHING_MODE: - examples = examples[:args.valid_size] * args.n_repeat + examples = examples[: args.valid_size] * args.n_repeat else: - examples = examples[args.valid_size:args.valid_size + args.test_size] * args.n_repeat + examples = ( + examples[args.valid_size : args.valid_size + args.test_size] * args.n_repeat + ) - questions = [example['inputs'] for example in examples] - answers = [example['targets'] for example in examples] + questions = [example["inputs"] for example in examples] + answers = [example["targets"] for example in examples] print(f"problem length: {len(examples)}") max_workers = min(len(examples), args.max_workers) if args.multiprocessing else 1 task_queue = [] for q in questions: - taskInfo = Info('task', 'User', q, -1) + taskInfo = Info("task", "User", q, -1) task_queue.append((taskInfo, AgentSystem())) # agentSystem = AgentSystem() @@ -213,10 +163,15 @@ def call_forward(agent_task_queue): agent_model_kwargs = {} result = agent.forward(task, agent_model_kwargs) + if args.thread_sleep: + print(f"Sleeping for {args.thread_sleep}") + time.sleep(args.thread_sleep) return result with ThreadPoolExecutor(max_workers=max_workers) as executor: - results = list(tqdm(executor.map(call_forward, task_queue), total=len(task_queue))) + results = list( + tqdm(executor.map(call_forward, task_queue), total=len(task_queue)) + ) acc_list = [] for q_idx, res in enumerate(results): @@ -226,7 +181,9 @@ def call_forward(agent_task_queue): else: extracted_answer = res correct_answers = answers[q_idx] - print(f"extracted_answer {extracted_answer}, correct_answers {correct_answers}") + print( + f"extracted_answer {extracted_answer}, correct_answers {correct_answers}" + ) em_score, f1_score = drop_metric(extracted_answer, correct_answers) except Exception as e: acc_list.append(0) @@ -238,60 +195,45 @@ def call_forward(agent_task_queue): return acc_list - @default_subscription class ADASAgent(RoutedAgent): """An agent that performs ADAS.""" - def __init__(self, - model_client: ChatCompletionClient, - system_prompt: str, - args, - archive - ) -> None: + def __init__( + self, model_client: ChatCompletionClient, system_prompt: str, args, archive + ) -> None: super().__init__("An agent searching agent.") - # self._system_messages: List[LLMMessage] = [ - # SystemMessage( - # content=system_prompt, - # ) - # ] - self._args = args self._archive = archive self._model_client = model_client self._session_memory: Dict[str, List[ADASTask | ADASResult]] = {} - # TODO(yeandy): Add this as a proper Tool https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/tools.html - # pip install pygithub - self._documentation = get_autogen_documentation() - self._system_messages: List[LLMMessage] = [ - SystemMessage( - content=system_prompt('\n'.join(self._documentation)), + # SystemMessage is not allowed in o1-preview API. + # SystemMessage( + AssistantMessage( + content=system_prompt, + source=self.id.type, ) ] self._chat_history: List[LLMMessage] = [] self._model_client = model_client - self._cnt = 0 @message_handler - async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> SimpleReflectAgentResponse: + async def handle_task( + self, message: LLMMessageList, ctx: MessageContext + ) -> SimpleReflectAgentResponse: + print(f"Meta-Agent making a LLM call...") logging.info(f"{self._description} received message: {message}") model_result = await self._model_client.create( - # self._system_messages + self._chat_history + message.llm_message_list self._system_messages + message.llm_message_list - ) - print(f"llm_message_list {len(message.llm_message_list)}") - # self._chat_history.extend(message.llm_message_list) - print(f"-----cnt {self._cnt}") - # print(f"chat history {len(self._chat_history)}") - self._cnt += 1 assert isinstance(model_result.content, str) - print(f"model_result.content {model_result.content}") + print(f"Model client result: {model_result.content}") + print("Loading the json string of the content...") json_content = json.loads(model_result.content) - print(f"finish converting to json") + print("Finished loading the json string of the content") return SimpleReflectAgentResponse(json_content=json_content) @message_handler @@ -303,10 +245,12 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None # Process archive file_path = os.path.join(args.save_dir, f"{args.expr_name}_run_archive.json") if os.path.exists(file_path): - with open(file_path, 'r') as json_file: + with open(file_path, "r") as json_file: archive = json.load(json_file) - if "generation" in archive[-1] and isinstance(archive[-1]['generation'], int): - start = archive[-1]['generation'] + if "generation" in archive[-1] and isinstance( + archive[-1]["generation"], int + ): + start = archive[-1]["generation"] else: start = 0 else: @@ -314,10 +258,10 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None start = 0 for solution in archive: - if 'fitness' in solution: + if "fitness" in solution: continue - solution['generation'] = "initial" + solution["generation"] = "initial" print(f"============Initial Archive: {solution['name']}=================") try: acc_list = evaluate_forward_fn(args, solution["code"]) @@ -327,65 +271,108 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None continue fitness_str = bootstrap_confidence_interval(acc_list) - solution['fitness'] = fitness_str + solution["fitness"] = fitness_str # save results os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'w') as json_file: + with open(file_path, "w") as json_file: json.dump(archive, json_file, indent=4) - + # Initial prompt for n in range(start, args.n_generation): print(f"============Generation {n + 1}=================") - msg_list = [UserMessage(content=message.task, source=self.metadata["type"])] - import pdb; pdb.set_trace() + + # Set prompt with updated archive (for n > 0) + _, prompt = get_prompt(archive) + + msg_list = [UserMessage(content=prompt, source=self.metadata["type"])] try: response = await self.send_message(LLMMessageList(msg_list), self.id) next_solution = response.json_content - Reflexion_prompt_1, Reflexion_prompt_2 = get_reflexion_prompt(self._archive[-1] if n > 0 else None) - print(f"Reflexion_prompt_1 {Reflexion_prompt_1}") - print(f"Reflexion_prompt_2 {Reflexion_prompt_2}") - print(f"@@After initial prompt {response}") + ( + reflexion_prompt_1, + reflexion_prompt_2, + reflexion_prompt_3, + reflexion_prompt_4, + ) = get_reflexion_prompt(self._archive[-1] if n > 0 else None) + print(f"--After initial prompt {response}") # Reflexion 1 - # new_messages = [ - # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - # UserMessage(content=Reflexion_prompt_1, source=self.metadata["type"]), - # ] new_messages = msg_list + [ - AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - UserMessage(content=Reflexion_prompt_1, source=self.metadata["type"]), + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_1, source=self.metadata["type"] + ), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) next_solution = response.json_content - print(f"@@After Reflexion_prompt_1 {response}") + print(f"--After reflexion_prompt_1 {response}") # Reflexion 2 - # new_messages = [ - # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - # UserMessage(content=Reflexion_prompt_2, source=self.metadata["type"]), - # ] new_messages = new_messages + [ - AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - UserMessage(content=Reflexion_prompt_2, source=self.metadata["type"]), + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_2, source=self.metadata["type"] + ), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) next_solution = response.json_content - # next_solution = {'reflection': 'The previous code attempted to implement an ensemble approach with additional confidence estimation, but there were errors that needed addressing. Specifically:\n1. **Incorrect Use of `publish_message`:** The previously provided code misuses `self.publish_message()` in a context where the function signature might be misleading, as it requires `None` as its return.\n2. **Improper Handling of Topics and Message Types:** The correct usage for publishing and handling message types is essential, utilizing the proper `TopicId` syntax.\n3. **Incorrect Method for Calculating Confidence:** The confidence estimation implementation was overly simplistic, which could lead to skewed results. \n\nThe revised implementation corrects these issues and ensures compliance with best practices.', 'thought': '**Insights:**\nThe next iteration of the agent should refine on the concept of diversified reasoning by incorporating evaluative mechanisms within Worker Agents to self-assess their response confidence and determine when consensus should be approached collaboratively.\n\n**Overall Idea:**\nThe architecture can further benefit from introducing adaptive learning patterns, where Worker Agents adjust their reasoning strategies dynamically based on prior task ratings or other metadata. This enables a feedback loop that improves over time.\n\n**Implementation:**\n- Modify Worker Agents to give confidence ratings in their output.\n- Integrate an orchestrator that places more weight on outputs with higher confidence when synthesizing results.\n- Ensure message handling aligns with idiomatic usage of message types and topics, using `TopicId` properly.', 'name': 'Adaptive Diverse Ensemble', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from collections import Counter\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, LLMMessage, SystemMessage, UserMessage\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\'model\'],\n api_version=model_client_kwargs[\'api_version\'],\n azure_endpoint=model_client_kwargs[\'azure_endpoint\'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class DiverseThoughtTask:\n task: str\n\n @dataclass\n class DiverseThoughtResult:\n result: str\n confidence: float\n\n # Define Diverse Worker Agent\n class DiverseWorkerAgent(RoutedAgent):\n def __init__(self, description: str, model_client: ChatCompletionClient, instruction: str) -> None:\n super().__init__(description)\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: DiverseThoughtTask, ctx: MessageContext) -> None:\n user_prompt = message.task + "\\n" + self._instruction\n model_result = await self._model_client.create([UserMessage(content=user_prompt, source="worker_agent")])\n confidence = self.estimate_confidence(model_result.content)\n assert isinstance(model_result.content, str)\n await self.publish_message(DiverseThoughtResult(result=model_result.content, confidence=confidence), \n topic_id=TopicId("worker_results", self.id.key))\n\n def estimate_confidence(self, text: str) -> float:\n # Improved placeholder for actual confidence estimation method\n # Here, we can use sentiment analysis or other processing as an example\n return min(1.0, max(0.0, len(text) / 100.0))\n\n # Orchestrator Agent for Consensus\n class OrchestratorAgent(RoutedAgent):\n def __init__(self) -> None:\n super().__init__("Orchestrator for Diverse Thoughts")\n\n @message_handler\n async def handle_task(self, message: DiverseThoughtTask, ctx: MessageContext) -> None:\n worker_ids = [AgentId("worker_1", ctx.id.key), AgentId("worker_2", ctx.id.key), AgentId("worker_3", ctx.id.key)]\n results = await asyncio.gather(*[self.send_message(message, worker_id) for worker_id in worker_ids])\n combined_result = self.evaluate_results(results)\n await self.publish_message(DiverseThoughtResult(result=combined_result, confidence=1.0), \n topic_id=TopicId("diverse_result", "orchestrator"))\n\n def evaluate_results(self, results: List[DiverseThoughtResult]) -> str:\n # Implement advanced evaluation, here just demonstrating a weighted result selection based on confidence\n confidences = Counter()\n for res in results:\n confidences[res.result] += res.confidence\n return max(confidences, key=confidences.get)\n\n async def main():\n # Create a queue to collect final answers\n queue = asyncio.Queue[DiverseThoughtResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: DiverseThoughtResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register workers with various strategies\n strategies = ["utilize strict logical reasoning", "incorporate probabilistic reasoning", "focus on evidence-based reasoning"]\n for i, strat in enumerate(strategies, start=1):\n await DiverseWorkerAgent.register(\n runtime, f"worker_{i}", lambda strat=strat: DiverseWorkerAgent(\n description=f"Diverse Worker {i}", model_client=model_client, instruction=strat\n )\n )\n\n # Register Orchestrator\n await OrchestratorAgent.register(runtime, "orchestrator")\n\n # Create closure agent to collect final output result\n result_topic = TypeSubscription(topic_type="diverse_result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime, and publish the first message\n runtime.start()\n await runtime.publish_message(\n message=DiverseThoughtTask(task=task),\n topic_id=TopicId("diverse", "orchestrator")\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).result\n\n return asyncio.run(main())\n'} - print(f"@@After Reflexion_prompt_2 {next_solution}") + print(f"--After reflexion_prompt_2 {next_solution}") + + # Reflexion 3 + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_3, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_3 {next_solution}") + + # Reflexion 4 + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_4, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_4 {next_solution}") + + # next_solution = {'reflection': 'Upon reviewing the code and the official API documentation, I noticed that the "AzureOpenAIChatCompletionClient" requires the "azure_deployment" parameter, which was missing in the code. According to the documentation, we need to provide "azure_deployment" along with "model", "api_version", and "azure_endpoint". I have updated the code to include the "azure_deployment" parameter when creating the model client. Additionally, I ensured that all other parameters and imports align with the official API documentation.', 'thought': '**Insights:**\nDecomposing complex questions into simpler sub-questions can improve reasoning accuracy by allowing the model to focus on one aspect at a time.\n\n**Overall Idea:**\nImplement an agent system where a `DecomposerAgent` breaks down the main question into sub-questions. `SolverAgents` answer these sub-questions based on the provided passage, and a `ComposerAgent` combines the sub-answers to produce the final answer.\n\n**Implementation:**\n- Define a `DecomposerAgent` that decomposes the main question into sub-questions and distributes them.\n- Define `SolverAgents` that answer sub-questions based on the provided passage.\n- Define a `ComposerAgent` that collects sub-answers and composes the final answer.\n- Use appropriate message classes and ensure correct message passing and subscriptions.\n- The `ComposerAgent` publishes the final answer to the default topic, which is collected by a `ClosureAgent`.', 'name': 'Question Decomposition Agent', 'code': 'def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, default_subscription, RoutedAgent, message_handler, ClosureAgent\n from autogen_core.components.models import AssistantMessage, UserMessage, SystemMessage, ChatCompletionClient\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_core.application import SingleThreadedAgentRuntime\n\n # Create the Azure OpenAI model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n model_client = AzureOpenAIChatCompletionClient(\n azure_deployment=model_client_kwargs["azure_deployment"],\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class Task:\n content: str\n\n @dataclass\n class SubQuestion:\n question: str\n sub_question: str\n\n @dataclass\n class SubQuestionList:\n sub_questions: List[str]\n\n @dataclass\n class SubAnswer:\n sub_question: str\n answer: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class DecomposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Decomposer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are an expert at decomposing complex questions into simpler sub-questions that can be answered individually."\n )\n )\n ]\n\n @message_handler\n async def handle_task(self, message: Task, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Decompose the following question into a list of sub-questions that can help answer the main question:\\n{message.content}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Assuming the model lists the sub-questions in numbered format\n sub_questions = [sq.strip() for sq in response.content.strip().split(\'\\n\') if sq.strip()]\n for sq in sub_questions:\n await self.publish_message(\n SubQuestion(question=message.content, sub_question=sq),\n topic_id=DefaultTopicId(),\n )\n # Send the list of sub-questions to the ComposerAgent\n await self.publish_message(\n SubQuestionList(sub_questions=sub_questions),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, passage: str) -> None:\n super().__init__("Solver Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that answers questions based on the provided passage. "\n "Provide concise and accurate answers."\n )\n )\n ]\n self._passage = passage\n\n @message_handler\n async def handle_sub_question(self, message: SubQuestion, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Passage:\\n{self._passage}\\n\\nQuestion:\\n{message.sub_question}\\n\\nAnswer the question based on the passage."\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n SubAnswer(sub_question=message.sub_question, answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class ComposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Composer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that composes a final answer based on answers to sub-questions."\n )\n )\n ]\n self._num_sub_questions = 0\n self._sub_answers = []\n\n @message_handler\n async def handle_sub_question_list(self, message: SubQuestionList, ctx: MessageContext) -> None:\n self._num_sub_questions = len(message.sub_questions)\n\n @message_handler\n async def handle_sub_answer(self, message: SubAnswer, ctx: MessageContext) -> None:\n self._sub_answers.append(message)\n if len(self._sub_answers) == self._num_sub_questions:\n # All sub-answers have been collected\n # Compose the final answer\n sub_answers_text = \'\\n\'.join(\n f"Sub-question: {sa.sub_question}\\nAnswer: {sa.answer}" for sa in self._sub_answers\n )\n user_message = UserMessage(\n content=(\n f"Based on the following sub-questions and their answers, compose a final comprehensive answer to the main question.\\n{sub_answers_text}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n FinalAnswer(answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n async def main():\n queue = asyncio.Queue()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await DecomposerAgent.register(runtime, "decomposer_agent", lambda: DecomposerAgent(model_client))\n await SolverAgent.register(runtime, "solver_agent", lambda: SolverAgent(model_client, passage=task))\n await ComposerAgent.register(runtime, "composer_agent", lambda: ComposerAgent(model_client))\n\n # ClosureAgent to collect the final answer\n await ClosureAgent.register(runtime, "output_result", output_result)\n\n runtime.start()\n\n # Publish the task to the DecomposerAgent\n await runtime.publish_message(\n Task(content=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())'} + # next_solution = {'reflection': 'Upon reviewing the code and the official API documentation, I noticed that the "AzureOpenAIChatCompletionClient" requires the "azure_deployment" parameter, which was missing in the code. According to the documentation, we need to provide "azure_deployment" along with "model", "api_version", and "azure_endpoint". I have updated the code to include the "azure_deployment" parameter when creating the model client. Additionally, I ensured that all other parameters and imports align with the official API documentation.', 'thought': '**Insights:**\nDecomposing complex questions into simpler sub-questions can improve reasoning accuracy by allowing the model to focus on one aspect at a time.\n\n**Overall Idea:**\nImplement an agent system where a `DecomposerAgent` breaks down the main question into sub-questions. `SolverAgents` answer these sub-questions based on the provided passage, and a `ComposerAgent` combines the sub-answers to produce the final answer.\n\n**Implementation:**\n- Define a `DecomposerAgent` that decomposes the main question into sub-questions and distributes them.\n- Define `SolverAgents` that answer sub-questions based on the provided passage.\n- Define a `ComposerAgent` that collects sub-answers and composes the final answer.\n- Use appropriate message classes and ensure correct message passing and subscriptions.\n- The `ComposerAgent` publishes the final answer to the default topic, which is collected by a `ClosureAgent`.', 'name': 'Question Decomposition Agent', 'code': 'def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, default_subscription, RoutedAgent, message_handler, ClosureAgent\n from autogen_core.components.models import AssistantMessage, UserMessage, SystemMessage, ChatCompletionClient\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_core.application import SingleThreadedAgentRuntime\n\n # Create the Azure OpenAI model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class Task:\n content: str\n\n @dataclass\n class SubQuestion:\n question: str\n sub_question: str\n\n @dataclass\n class SubQuestionList:\n sub_questions: List[str]\n\n @dataclass\n class SubAnswer:\n sub_question: str\n answer: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class DecomposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Decomposer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are an expert at decomposing complex questions into simpler sub-questions that can be answered individually."\n )\n )\n ]\n\n @message_handler\n async def handle_task(self, message: Task, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Decompose the following question into a list of sub-questions that can help answer the main question:\\n{message.content}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Assuming the model lists the sub-questions in numbered format\n sub_questions = [sq.strip() for sq in response.content.strip().split(\'\\n\') if sq.strip()]\n for sq in sub_questions:\n await self.publish_message(\n SubQuestion(question=message.content, sub_question=sq),\n topic_id=DefaultTopicId(),\n )\n # Send the list of sub-questions to the ComposerAgent\n await self.publish_message(\n SubQuestionList(sub_questions=sub_questions),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, passage: str) -> None:\n super().__init__("Solver Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that answers questions based on the provided passage. "\n "Provide concise and accurate answers."\n )\n )\n ]\n self._passage = passage\n\n @message_handler\n async def handle_sub_question(self, message: SubQuestion, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Passage:\\n{self._passage}\\n\\nQuestion:\\n{message.sub_question}\\n\\nAnswer the question based on the passage."\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n SubAnswer(sub_question=message.sub_question, answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class ComposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Composer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that composes a final answer based on answers to sub-questions."\n )\n )\n ]\n self._num_sub_questions = 0\n self._sub_answers = []\n\n @message_handler\n async def handle_sub_question_list(self, message: SubQuestionList, ctx: MessageContext) -> None:\n self._num_sub_questions = len(message.sub_questions)\n\n @message_handler\n async def handle_sub_answer(self, message: SubAnswer, ctx: MessageContext) -> None:\n self._sub_answers.append(message)\n if len(self._sub_answers) == self._num_sub_questions:\n # All sub-answers have been collected\n # Compose the final answer\n sub_answers_text = \'\\n\'.join(\n f"Sub-question: {sa.sub_question}\\nAnswer: {sa.answer}" for sa in self._sub_answers\n )\n user_message = UserMessage(\n content=(\n f"Based on the following sub-questions and their answers, compose a final comprehensive answer to the main question.\\n{sub_answers_text}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n FinalAnswer(answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n async def main():\n queue = asyncio.Queue()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await DecomposerAgent.register(runtime, "decomposer_agent", lambda: DecomposerAgent(model_client))\n await SolverAgent.register(runtime, "solver_agent", lambda: SolverAgent(model_client, passage=task))\n await ComposerAgent.register(runtime, "composer_agent", lambda: ComposerAgent(model_client))\n\n # ClosureAgent to collect the final answer\n await ClosureAgent.register(runtime, "output_result", output_result)\n\n runtime.start()\n\n # Publish the task to the DecomposerAgent\n await runtime.publish_message(\n Task(content=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())'} + + # next_solution = {'reflection': '**Reflection:**\n\nAfter reviewing the "## WRONG Implementation examples" section, specifically example 8, I realized that for the `ClosureAgent` to receive the final `Answer` message correctly, the topic source in `publish_message` must match the agent key of the `ClosureAgent`. In the previous code, the `ReasoningAgent` published to `TopicId("result", self.id.type)` where `self.id.type` is `"reasoning_agent"`, but the `ClosureAgent` was registered with the agent key `"output_result"`. This mismatch would prevent the message from being delivered to the `ClosureAgent`.\n\nTo fix this, I adjusted the `ReasoningAgent` to publish the final `Answer` to `TopicId("result", "output_result")`, ensuring that the topic source matches the agent key of the `ClosureAgent`, which is `"output_result"`. This follows the correct pattern outlined in the examples and ensures that the message is correctly routed to the `ClosureAgent`.\n', 'thought': '**Insights:**\nBy parallelizing the generation of multiple reasoning paths, we can improve the efficiency of the agent. Additionally, ensuring that the agents and subscriptions are correctly set up will avoid common mistakes.\n\n**Overall Idea:**\nThe improved agent will generate multiple reasoning paths concurrently and aggregate the final answers to select the most common one. This enhances efficiency without altering the overall design.\n\n**Implementation:**\n- Use `asyncio.gather` to run multiple model calls concurrently in the `ReasoningAgent`.\n- Ensure that the agents and subscriptions are correctly registered.\n- Confirm that the `ClosureAgent` collects the final answer accurately.', 'name': 'Self-Consistency Chain-of-Thought Agent', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n from collections import Counter\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True\n },\n )\n\n @dataclass\n class Question:\n content: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class ReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, num_samples: int) -> None:\n super().__init__("Reasoning Agent")\n self._model_client = model_client\n self._num_samples = num_samples\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n async def generate_answer() -> str:\n prompt = ("Please solve the following problem step-by-step:\\n\\n"\n f"{message.content}\\n\\n"\n "Your final answer should be a single string.")\n messages = [\n SystemMessage("You are a helpful assistant for solving reasoning problems."),\n UserMessage(content=prompt, source="user"),\n ]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n return response.content.strip()\n\n # Generate the reasoning paths concurrently\n tasks = [generate_answer() for _ in range(self._num_samples)]\n final_answers = await asyncio.gather(*tasks)\n\n # Aggregate the final answers and select the most common one\n most_common_answer = Counter(final_answers).most_common(1)[0][0]\n\n # Publish the final answer, ensuring topic source matches ClosureAgent\'s agent key\n await self.publish_message(Answer(content=most_common_answer), topic_id=TopicId("result", "output_result"))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await ReasoningAgent.register(runtime, "reasoning_agent", lambda: ReasoningAgent(model_client, num_samples=5))\n\n result_topic = TypeSubscription(topic_type="result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).content\n\n return asyncio.run(main())'} + # next_solution = {'reflection': '**Reflection:**\n\nAfter reviewing the "## WRONG Implementation examples" section, specifically example 8, I realized that for the `ClosureAgent` to receive the final `Answer` message correctly, the topic source in `publish_message` must match the agent key of the `ClosureAgent`. In the previous code, the `ReasoningAgent` published to `TopicId("result", self.id.type)` where `self.id.type` is `"reasoning_agent"`, but the `ClosureAgent` was registered with the agent key `"output_result"`. This mismatch would prevent the message from being delivered to the `ClosureAgent`.\n\nTo fix this, I adjusted the `ReasoningAgent` to publish the final `Answer` to `TopicId("result", "output_result")`, ensuring that the topic source matches the agent key of the `ClosureAgent`, which is `"output_result"`. This follows the correct pattern outlined in the examples and ensures that the message is correctly routed to the `ClosureAgent`.\n', 'thought': '**Insights:**\nBy parallelizing the generation of multiple reasoning paths, we can improve the efficiency of the agent. Additionally, ensuring that the agents and subscriptions are correctly set up will avoid common mistakes.\n\n**Overall Idea:**\nThe improved agent will generate multiple reasoning paths concurrently and aggregate the final answers to select the most common one. This enhances efficiency without altering the overall design.\n\n**Implementation:**\n- Use `asyncio.gather` to run multiple model calls concurrently in the `ReasoningAgent`.\n- Ensure that the agents and subscriptions are correctly registered.\n- Confirm that the `ClosureAgent` collects the final answer accurately.', 'name': 'Self-Consistency Chain-of-Thought Agent', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n from collections import Counter\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True\n },\n )\n\n @dataclass\n class Question:\n content: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class ReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, num_samples: int) -> None:\n super().__init__("Reasoning Agent")\n self._model_client = model_client\n self._num_samples = num_samples\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n async def generate_answer() -> str:\n prompt = ("Please solve the following problem step-by-step:\\n\\n"\n f"{message.content}\\n\\n"\n "Your final answer should be a single string.")\n messages = [\n SystemMessage("You are a helpful assistant for solving reasoning problems."),\n UserMessage(content=prompt, source="user"),\n ]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n return response.content.strip()\n\n # Generate the reasoning paths concurrently\n tasks = [generate_answer() for _ in range(self._num_samples)]\n final_answers = await asyncio.gather(*tasks)\n\n # Aggregate the final answers and select the most common one\n most_common_answer = Counter(final_answers).most_common(1)[0][0]\n\n # Publish the final answer, ensuring topic source matches ClosureAgent\'s agent key\n await self.publish_message(Answer(content=most_common_answer), topic_id=TopicId("result", "output_result"))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await ReasoningAgent.register(runtime, "reasoning_agent", lambda: ReasoningAgent(model_client, num_samples=5))\n\n result_topic = TypeSubscription(topic_type="result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n res = (await queue.get()).content\n\n await runtime.stop()\n\n return res\n\n return asyncio.run(main())'} except Exception as e: - # import pdb; pdb.set_trace() - print("During LLM generate new solution:") + print("Exception occured during the generation of new solution:") print(e) - import pdb; pdb.set_trace() n -= 1 continue - import pdb; pdb.set_trace() acc_list = [] for _ in range(args.debug_max): - print(f"DEBUGGING") + print("Evaluate code of newly generated solution. Debug loop...") try: + print(next_solution["code"]) acc_list = evaluate_forward_fn(args, next_solution["code"]) if np.mean(acc_list) < 0.01 and SEARCHING_MODE: raise Exception("All 0 accuracy") @@ -393,59 +380,62 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None except Exception as e: print("During evaluation:") print(e) - next_solution = response.json_content - # new_messages = [ - # AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - # UserMessage(content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source=self.metadata["type"]), - # ] new_messages = new_messages + [ - AssistantMessage(content=str(next_solution), source=self.metadata["type"]), - UserMessage(content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source=self.metadata["type"]), + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", + source=self.metadata["type"], + ), ] try: - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) next_solution = response.json_content except Exception as e: print("During LLM generate new solution:") print(e) - import pdb; pdb.set_trace() continue continue if not acc_list: n -= 1 continue - import pdb; pdb.set_trace() fitness_str = bootstrap_confidence_interval(acc_list) - next_solution['fitness'] = fitness_str - next_solution['generation'] = n + 1 + next_solution["fitness"] = fitness_str + next_solution["generation"] = n + 1 - if 'debug_thought' in next_solution: - del next_solution['debug_thought'] - if 'reflection' in next_solution: - del next_solution['reflection'] + if "debug_thought" in next_solution: + del next_solution["debug_thought"] + if "reflection" in next_solution: + del next_solution["reflection"] archive.append(next_solution) # save results os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, 'w') as json_file: + with open(file_path, "w") as json_file: json.dump(archive, json_file, indent=4) + async def main(args) -> None: runtime = SingleThreadedAgentRuntime() client = get_chat_completion_client_from_envs(model="gpt-4o-mini") archive = get_init_archive() - system_prompt, prompt = get_prompt(archive) + system_prompt, prompt = get_prompt(archive) await ADASAgent.register( - runtime, "adas_agent", lambda: ADASAgent( + runtime, + "adas_agent", + lambda: ADASAgent( model_client=client, system_prompt=system_prompt, args=args, archive=archive, - ) + ), ) - + runtime.start() # Publish an initial message to trigger the ADAS search to start. @@ -458,26 +448,38 @@ async def main(args) -> None: await runtime.stop_when_idle() -# python packages/autogen-core/samples/common/adas/adas.py --data_filename=/home/andyye/ADAS/dataset/drop_v0_dev.jsonl.gz --valid_size=1 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run ADAS") - parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.") - parser.add_argument('--data_filename', type=str, default="dataset/drop_v0_dev.jsonl.gz") - parser.add_argument('--valid_size', type=int, default=128) - parser.add_argument('--test_size', type=int, default=800) - parser.add_argument('--shuffle_seed', type=int, default=0) - parser.add_argument('--n_repeat', type=int, default=1) - parser.add_argument('--multiprocessing', action='store_true', default=True) - parser.add_argument('--max_workers', type=int, default=48) - parser.add_argument('--debug', action='store_true', default=True) - parser.add_argument('--save_dir', type=str, default='results/') - parser.add_argument('--expr_name', type=str, default="drop_gpt3.5_results") - parser.add_argument('--n_generation', type=int, default=30) - parser.add_argument('--debug_max', type=int, default=3) - parser.add_argument('--model', - type=str, - default='gpt-4o-2024-05-13', - choices=['gpt-4-turbo-2024-04-09', 'gpt-3.5-turbo-0125', 'gpt-4o-2024-05-13']) + parser.add_argument( + "--verbose", action="store_true", help="Enable verbose logging." + ) + parser.add_argument( + "--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz" + ) + parser.add_argument("--valid_size", type=int, default=128) + parser.add_argument("--test_size", type=int, default=800) + parser.add_argument("--shuffle_seed", type=int, default=0) + parser.add_argument("--n_repeat", type=int, default=1) + parser.add_argument("--multiprocessing", action="store_true", default=True) + parser.add_argument("--max_workers", type=int, default=48) + parser.add_argument("--debug", action="store_true", default=True) + parser.add_argument("--save_dir", type=str, default="results/") + parser.add_argument("--expr_name", type=str, default="drop_gpt3.5_results") + parser.add_argument("--n_generation", type=int, default=30) + parser.add_argument("--debug_max", type=int, default=3) + parser.add_argument( + "--thread_sleep", + type=int, + default=0, + help="Amount of time to sleep between new threads." + "This is to mitigate any errors due to request limits with Azure or AutoGen", + ) + parser.add_argument( + "--model", + type=str, + default="gpt-4o-2024-05-13", + choices=["gpt-4-turbo-2024-04-09", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"], + ) args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.WARNING) diff --git a/python/packages/autogen-core/samples/common/adas/adas_prompt.py b/python/packages/autogen-core/samples/common/adas/adas_prompt.py index 11889c088883..3af71df5a779 100644 --- a/python/packages/autogen-core/samples/common/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/common/adas/adas_prompt.py @@ -1,4 +1,6 @@ import json +import requests +from github import Github EXAMPLE = { "thought": "**Insights:**\nYour insights on what should be the next interesting agent.\n**Overall Idea:**\nyour reasoning and the overall concept behind the agent design.\n**Implementation:**\ndescribe the implementation step by step.", @@ -6,33 +8,59 @@ "code": """def forward(self, taskInfo): # Your code here return answer -""" +""", } -# COT = { -# "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", -# "name": "Chain-of-Thought", -# "code": """def forward(self, taskInfo): -# # Instruction for the Chain-of-Thought (CoT) approach -# # It is an important practice that allows the LLM to think step by step before solving the task. -# cot_instruction = "Please think step by step and then solve the task." -# # Instantiate a new LLM agent specifically for CoT -# # To allow LLM thinking before answering, we need to set an additional output field 'thinking'. -# cot_agent = LLMAgentBase(['thinking', 'answer'], 'Chain-of-Thought Agent') +def read_github_file(url): + response = requests.get(url) + if response.status_code == 200: + return response.text + else: + return None -# # Prepare the inputs for the CoT agent -# # The input should be a list of Info, and the first one is often the taskInfo -# cot_agent_inputs = [taskInfo] -# # Get the response from the CoT agent -# thinking, answer = cot_agent(cot_agent_inputs, cot_instruction) +def print_repo_contents(repo, path="", indent=""): + contents = repo.get_contents(path) + documentation = [] + for content_file in contents: + if content_file.type == "dir": + documentation.extend( + print_repo_contents(repo, content_file.path, indent + "│ ") + ) + else: + if content_file.download_url.endswith( + ".md" + ) or content_file.download_url.endswith(".ipynb"): + print(f"Reading file from {content_file.download_url}") + f = read_github_file(content_file.download_url) + documentation.append( + "Title: " + content_file.name + "\nContents:\n" + f + ) + return documentation + + +# TODO: pip install pygithub +def get_autogen_documentation(): + repo_name = "microsoft/autogen" + directory_name = "python/packages/autogen-core/docs/src/user-guide/core-user-guide" + g = Github() + + subdirectories = ["core-concepts", "framework"] + documentation = [] + for subdir in subdirectories: + try: + repo = g.get_repo(repo_name) + documentation.extend( + print_repo_contents(repo, directory_name + "/" + subdir) + ) + except Exception as e: + print(f"Error: {e}") + print(f"Found {len(documentation)} pages of documentation") + return documentation -# # Return only the final answer -# return answer -# """ -# } +DOCUMENTATION = "\n".join(get_autogen_documentation()) COT = { "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", @@ -150,7 +178,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul return (await queue.get()).result return asyncio.run(main()) -""" +""", } COT_SC = { @@ -310,7 +338,7 @@ async def main(): return result.result return asyncio.run(main()) -""" +""", } Reflexion = { @@ -604,13 +632,13 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes return (await queue.get()).answer return asyncio.run(main()) -''' +''', } LLM_debate = { "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", "name": "LLM Debate", - "code": '''def forward(self, task, model_client_kwargs): + "code": """def forward(self, task, model_client_kwargs): import asyncio import json import logging @@ -860,17 +888,17 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct await runtime.stop_when_idle() # Return the answer from the queue - res = (await queue.get()).content - print(f"res {res}") - return res + return (await queue.get()).content return asyncio.run(main()) -''' +""", } -Take_a_step_back = {"thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", - "name": "Step-back Abstraction", - "code": """def forward(self, taskInfo): +# TODO(yeandy): Take a Step Back currently not used as a seed in the archive. Refactor using the AutoGen API +Take_a_step_back = { + "thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", + "name": "Step-back Abstraction", + "code": """def forward(self, taskInfo): # Instruction for understanding the principles involved in the task principle_instruction = "What are the physics, chemistry or biology principles and concepts involved in solving this task? First think step by step. Then list all involved principles and explain them." @@ -887,12 +915,14 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct # Use the principles to solve the task thinking, answer = cot_agent([taskInfo, thinking, principle], cot_instruction) return answer -""" - } +""", +} -QD = {"thought": "Similar to Quality-Diversity methods, let LLM generate multiple diverse interesting solutions could help. By encouraging the model to explore different reasoning paths, we can increase the chances of finding the best solution.", - "name": "Quality-Diversity", - "code": """def forward(self, taskInfo): +# TODO(yeandy): QD currently not used as a seed in the archive. Refactor using the AutoGen API +QD = { + "thought": "Similar to Quality-Diversity methods, let LLM generate multiple diverse interesting solutions could help. By encouraging the model to explore different reasoning paths, we can increase the chances of finding the best solution.", + "name": "Quality-Diversity", + "code": """def forward(self, taskInfo): # Instruction for initial reasoning cot_initial_instruction = "Please think step by step and then solve the task." @@ -925,12 +955,14 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct # Make the final decision based on all generated answers thinking, answer = final_decision_agent([taskInfo] + possible_answers, final_decision_instruction) return answer -""" - } +""", +} -Role_Assignment = {"thought": "Similar to Auto-GPT and expert prompting, we can use dynamic control flow in the design to let the agent decide what expert we should use.", - "name": "Dynamic Assignment of Roles", - "code": """def forward(self, taskInfo): +# TODO(yeandy): Role Assignment currently not used as a seed in the archive. Refactor using the AutoGen API +Role_Assignment = { + "thought": "Similar to Auto-GPT and expert prompting, we can use dynamic control flow in the design to let the agent decide what expert we should use.", + "name": "Dynamic Assignment of Roles", + "code": """def forward(self, taskInfo): # Instruction for step-by-step reasoning cot_instruction = "Please think step by step and then solve the task." expert_agents = [LLMAgentBase(['thinking', 'answer'], 'Expert Agent', role=role) for role in ['Reading Comprehension Specialist', 'Logical Reasoning Strategist', 'Multidisciplinary Knowledge Integrator', 'Helpful Assistant']] @@ -953,16 +985,11 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct thinking, answer = expert_agents[expert_id]([taskInfo], cot_instruction) return answer -""" - } - -system_prompt = lambda formatted_documentation: f"""You are a helpful assistant. You have an expert understanding of the AutoGen framework, and how to use the Python API. The API documentation are as follows: - -{formatted_documentation} +""", +} -This is the end of the documentation. -Make sure to return in a WELL-FORMED JSON object. Do not add any code blocks around the JSON object.""" +system_prompt = """You are a helpful assistant. Make sure to return in a WELL-FORMED JSON object. Do not add any code blocks around the JSON object.""" base = """# Overview You are an expert machine learning researcher testing various agentic systems. Your objective is to design building blocks such as prompts and control flows within these systems to solve complex tasks. Your aim is to design an optimal agent performing well on the Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs (DROP), which assesses the ability to perform discrete reasoning and comprehend detailed information across multiple paragraphs. @@ -1007,9 +1034,9 @@ def forward(self, task, model_client_kwargs) -> str: The fitness value is the median and 95% Bootstrap Confidence Interval of the correct rate on a validation question set. Your GOAL is to maximize the "fitness". # Output Instruction and Example: -The first key should be ("thought"), and it should capture your thought process for designing the next function. In the "thought" section, first reason about what should be the next interesting agent to try, then describe your reasoning and the overall concept behind the agent design, and finally detail the implementation steps. +The first key should be ("thought"), and it should capture your thought process for designing the next function. In the "thought" section, first reason about what should be the next interesting agent to try, then describe your reasoning and the overall concept behind the agent design, and finally detail the implementation steps. Make sure to talk about the agent(s) that are supposted to start and end the system. The second key ("name") corresponds to the name of your next agent architecture. -Finally, the last key ("code") corresponds to the exact “forward()” function in Python code that you would like to try. You must write a COMPLETE CODE in "code": Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets. +The last key ("code") corresponds to the exact “forward()” function in Python code that you would like to try. You must write a COMPLETE CODE in "code": Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets. Here is an example of the output format for the next agent architecture: @@ -1213,6 +1240,31 @@ class Message: ``` 8. This is WRONG: ``` +async def main(): + queue = asyncio.Queue[FinalDecision]() + + async def output_result(_runtime, _id, message, _ctx): + await queue.put(message) + + # Closure Agent for collecting results + result_topic = TopicId("result", "output_result") + await runtime.add_subscription(TypeSubscription("result", "output_result")) + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) +``` +The function `output_result` that the `ClosureAgent` must follow the following signature: ``` +async def main(): + async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + await queue.put(message) + + # Closure Agent for collecting results + result_topic = TopicId("result", "output_result") + await runtime.add_subscription(TypeSubscription("result", "output_result")) + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) +``` +where the type of `message` can be whatever dataclass is used by the agent publishing the final message. In this case, it is the `Answer` dataclass. +Additionally, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. + +9. This is WRONG: ``` await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=TypeSubscription("initial_task", "worker_agent").topic_id()) ``` The `topic_id` needs to be a `TopicId` with or `DefaultTopicId` object. For example: ``` @@ -1225,14 +1277,14 @@ class Message: await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=DefaultTopicId()) ``` -8. This is WRONG: ``` +10. This is WRONG: ``` await OrchestratorAgent.register(runtime, "orchestrator") ``` You will encounter this error "TypeError: BaseAgent.register() missing 1 required positional argument: 'factory'". The correct solution is: ``` await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) ``` -9 This is WRONG: ``` +11. This is WRONG: ``` class OrchestratorAgent(RoutedAgent): pass @@ -1271,7 +1323,7 @@ async def main(): ``` Now, you can publish directly to a specific topic through the runtime. -10. This is WRONG: ``` +12. This is WRONG: ``` class OrchestratorAgent(RoutedAgent): pass @@ -1299,7 +1351,7 @@ async def main(): ) ``` -11. This is WRONG: ``` +13. This is WRONG: ``` await runtime.publish_message(DiverseThoughtTask(task='Who is the most creative composer?'), AgentId("consensus_agent", "default")) ``` The `publish_message` should publish to a topic. Use `TopicId` or `DefaultTopicId`. For example: ``` @@ -1326,7 +1378,7 @@ async def main(): }, ) ``` -Creating the model client using the model_client_kwargs dictionary. +Creating the model client using the model_client_kwargs dictionary. Do not modify this part. 2. This is CORRECT: ``` async def main(): @@ -1358,7 +1410,47 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes return asyncio.run(main()) ``` This is the format for the `main` function. Make sure that when creating a `ClosureAgent`, you have created `queue` from which you can call `return (await queue.get()).answer` at the very end of the `main` function. The datatype of the Queue should be the final message that the agent system publishes to indicate that the system is terminating. -The `result_topic` should have a unique `topic_type`, which can be called "result". +The `result_topic` should have a unique `topic_type`, which should be called "result". The agent that publishes the final message MUST publish to the same topic_id + +3. This is CORRECT: ``` +@default_subscription +class Coordinator(RoutedAgent): + + @message_handler + async def handle_response(self, message: SolverResponse, ctx: MessageContext) -> None: + self._buffer[message.reasoning_type] = message + if len(self._buffer) >= self._num_solvers: + selected_answer = self.decide_based_on_type(self._buffer, message.reasoning_type) + await self.publish_message(Answer(content=selected_answer), topic_id=TopicId('result', self.id.type)) + +async def main(): + queue = asyncio.Queue[Answer]() + + async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + await queue.put(message) + + runtime = SingleThreadedAgentRuntime() + + await DeductiveSolver.register(runtime, "deductive_solver", lambda: DeductiveSolver(model_client)) + await InductiveSolver.register(runtime, "inductive_solver", lambda: InductiveSolver(model_client)) + await AbductiveSolver.register(runtime, "abductive_solver", lambda: AbductiveSolver(model_client)) + await Coordinator.register(runtime, "coordinator", lambda: Coordinator(num_solvers=3)) + + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + + runtime.start() + await runtime.publish_message(Question(content=task, reasoning_type='general'), DefaultTopicId()) + + await runtime.stop_when_idle() + + return (await queue.get()).content +``` +The `Coordinator` agent is publishing the final message. It publishes to the topic_id object `TopicId('result', self.id.type)`, where the `type` is `result`, and the `source` is `self.id.type`. This matches the result topic `TypeSubscription(topic_type="result", agent_type="output_result")`, which the `ClosureAgent` subscribes to. Importantly, the `topic_type="result"` matches the topic type "result" used in `publish_message` by the Coordinator agent. +In other words, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. + +## Documentation +[DOCUMENTATION] # Your task You are deeply familiar with prompting techniques and the agent works from the literature. Your goal is to maximize the specified performance metrics by proposing interestingly new agents. @@ -1366,9 +1458,9 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes Be creative when thinking about the next interesting agent to try. You are encouraged to draw inspiration from related agent papers or academic papers from other research areas. Use the knowledge from the archive and inspiration from academic literature to propose the next interesting agentic system design. THINK OUTSIDE THE BOX. -""" -# Documentation: https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/docs/src/user-guide/core-user-guide +Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +""" Reflexion_prompt_1 = f""""[EXAMPLE]Carefully review the proposed new architecture and reflect on the following points: @@ -1379,7 +1471,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes - Decide whether the current architecture is innovative. - USE CRITICAL THINKING! -2. **Implementation Mistakes**: Identify any mistakes you may have made in the implementation. Review the code carefully, debug any issues you find, and provide a corrected version. REMEMBER checking "## WRONG Implementation examples" in the prompt. +2. **Implementation Mistakes**: Identify any mistakes you may have made in the implementation. Review the code carefully, debug any issues you find, and provide a corrected version. REMEMBER checking "## Documentation" in the prompt. 3. **Improvement**: Based on the proposed architecture, suggest improvements in the detailed implementation that could increase its performance or effectiveness. In this step, focus on refining and optimizing the existing implementation without altering the overall design framework, except if you want to propose a different architecture if the current is not interesting. - Observe carefully about whether the implementation is actually doing what it is supposed to do. @@ -1390,27 +1482,46 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes Your response should be organized as follows: -"reflection": Provide your thoughts on the interestingness of the architecture, identify any mistakes in the implementation, and suggest improvements. +"reflection": Provide your thoughts on the interestingness of the architecture, identify any mistakes in the implementation, and suggest improvements. Make sure to note which agent should publish the last message. "thought": Revise your previous proposal or propose a new architecture if necessary, using the same format as the example response. "name": Provide a name for the revised or new architecture. (Don't put words like "new" or "improved" in the name.) "code": Provide the corrected code or an improved implementation. Make sure you actually implement your fix and improvement in this code. + +Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. """ Reflexion_prompt_2 = """Using the tips in "## WRONG Implementation examples" section, revise the code further. Your response should be organized as follows: Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". + +Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +""" + +Reflexion_prompt_3 = """Using the tips in "## CORRECT Implementation examples" section, revise the code further. +Your response should be organized as follows: +Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". + +Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +""" + +Reflexion_prompt_4 = """Using the official API documentation in "## Documentation" section, revise the code further. +Your response should be organized as follows: +Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". + +Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. """ def get_init_archive(): - # return [COT]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] - # return [COT_SC]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] - # return [Reflexion]#, COT_SC, Reflexion, LLM_debate, Take_a_step_back, QD, Role_Assignment] - # return [COT, COT_SC, Reflexion] # LLM_debate, Take_a_step_back, QD, Role_Assignment] - return [LLM_debate] + return [ + COT, + COT_SC, + Reflexion, + LLM_debate, + ] # TODO: Take_a_step_back, QD, Role_Assignment def get_prompt(current_archive, adaptive=False): @@ -1418,11 +1529,17 @@ def get_prompt(current_archive, adaptive=False): archive_str = f"[{archive_str}]" prompt = base.replace("[ARCHIVE]", archive_str) prompt = prompt.replace("[EXAMPLE]", json.dumps(EXAMPLE)) - + prompt = prompt.replace("[DOCUMENTATION]", json.dumps(DOCUMENTATION)) return system_prompt, prompt def get_reflexion_prompt(prev_example): - prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" - r1 = Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) if prev_example else Reflexion_prompt_1.replace("[EXAMPLE]", "") - return r1, Reflexion_prompt_2 + prev_example_str = ( + "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" + ) + r1 = ( + Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) + if prev_example + else Reflexion_prompt_1.replace("[EXAMPLE]", "") + ) + return r1, Reflexion_prompt_2, Reflexion_prompt_3, Reflexion_prompt_4 From 3e88e7f4597e671606c1e6a8462e762534a83cdd Mon Sep 17 00:00:00 2001 From: root Date: Wed, 4 Dec 2024 13:34:16 -0500 Subject: [PATCH 05/21] Clean up; Add README --- .../autogen-core/samples/adas/README.md | 301 +++++++++++ .../autogen-core/samples/adas/adas.py | 465 +++++++++++++++++ .../samples/{common => }/adas/adas_prompt.py | 227 +++++++- .../autogen-core/samples/adas/utils.py | 62 +++ .../adas/utils.py => adas/utils_drop.py} | 113 ++-- .../autogen-core/samples/common/adas/adas.py | 490 ------------------ 6 files changed, 1092 insertions(+), 566 deletions(-) create mode 100644 python/packages/autogen-core/samples/adas/README.md create mode 100644 python/packages/autogen-core/samples/adas/adas.py rename python/packages/autogen-core/samples/{common => }/adas/adas_prompt.py (86%) create mode 100644 python/packages/autogen-core/samples/adas/utils.py rename python/packages/autogen-core/samples/{common/adas/utils.py => adas/utils_drop.py} (75%) delete mode 100644 python/packages/autogen-core/samples/common/adas/adas.py diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md new file mode 100644 index 000000000000..fe590a41e3cc --- /dev/null +++ b/python/packages/autogen-core/samples/adas/README.md @@ -0,0 +1,301 @@ +# User Guide for using ADAS in AutoGen + +## Motivation + +The Automated Design of Agentic Systems [paper](https://arxiv.org/pdf/2408.08435) introduces a way to automatically create powerful agentic system designs. This is motivated by the observation that in the field of machine learning, hand-designed solutions are often replaced by learned solutions over time. + +We intend to implement this concept using the AutoGen framework, with the intention of discovering novel systems built directly with the AutoGen API. + +## Background + +### Summary + +ADAS uses a meta-agent to generate creative and novel agent systems. Base agent systems (also known as building blocks) are defined entirely as code, which an LLM (powering the meta-agent) reads and interprets. The LLM then writes new code that defines a novel agent system, which can hopefully be more powerful at accomplishing the task at hand. + +### Key Concepts + +- **Agent System:** A software system designed to perform tasks autonomously. It can include one or more agents --which we will refer to as base agents-- and should be able to complete a task end-to-end (E2E), from receiving input, and producing a final output. Examples of an Agent System include “Chain-of-Thought" reasoning and planning, or “Self-Reflection”. +- **Building block:** A fundamental component or module that can be used to construct more complex systems. These building blocks are the basic units that can be combined and recombined in various ways to create different agentic systems. Effective building blocks include “Chain-of-Thought" reasoning and planning, or “Self-Reflection”. +- **Base agent:** The agent(s) within an Agent System that interact with each other using the event-based / messaging protocol as defined by AutoGen 0.4 API, and tries to accomplish the task as defined by the benchmark. +- **Foundation Models (FMs):** Used as modules within agentic systems for tasks requiring flexible reasoning and planning. Examples include GPT-3.5, GPT-4.o, Claude-Sonnet, Llama-70B, Gemini, etc. +- **Compound Agent System:** A complex system composed of multiple simpler agentic systems or building blocks. These individual components work together to perform more sophisticated tasks than they could individually. By combining building blocks, one can create a more powerful and versatile agentic system capable of handling a wide range of tasks and challenges. +- **Meta Agent Search:** An algorithm where a meta-agent iteratively programs new agents, tests their performance, and refines them based on an archive of previous discoveries. +- **Archive:** A file containing a list of 1) seed Agent Systems (Chain-of-Thought, Self-Reflection, etc.) which are manually defined, or 1) Agent Systems discovered by the meta-agent. +- **Meta Agent:** The agent that, given context of the benchmark and archive Agent Systems, tries to write code for novel Agent Systems. + +### Methodology + +- **Search Space:** Agents are defined in code, allowing the discovery of any possible agentic system. +- **Evaluation:** In the original ADAS paper, the meta-agent evaluates new agents on tasks across multiple domains, including coding, science, and math. We can adapt our code/dataset/evaluation to suit our purposes. + +### Performance + +In the original paper, the discovered agents significantly outperformed state-of-the-art hand-designed agents, demonstrating robustness and generality across domains. + +To see the results of early experiments with ADAS in AutoGen, please see the Results section. + +## ADAS in AutoGen + +We have refactored the building block Agent Systems found in the original ADAS code to run using the AutoGen API. Specifically, we decided to implement these Agent Systems at the AutoGen-Core level of abstraction (rather than at the AutoGen-Agentchat level). + +The vision for going down this path is that the meta-agent can design, using AutoGen-Core building blocks, a new (multi-)agent system, which if proven useful (after going through a period of testing/adoption by the team), be incorporated into the official AgentChat API. + +See this document for more on the design tradeoffs between AutoGen-Core and AutoGen-Agentchat API. + +### 4 manually crafted Agent Systems serving as the seeds to the archive +- More will be added over time + +### Prompt to Meta-Agent + +- Instructions: Generate novel code with name and thought of the new system +- Output and formatting requirements: Must be JSON, with `thought`, `name`, `code` (with the `forward` function) +- Examples of how to use or not use the AutoGen-Core API + - Wrong ways to use the AutoGen-Core API + - Correct ways to use the AutoGen-Core API +- Historical context (archive) of previous Agent Systems. + - Documentation from official AutoGen website. Currently only parsing .md and .ipynb files from the [core-concepts](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/core-concepts/index.html) and [framework](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/index.html) sections + +### Meta-Agent does 5 iterations of LLM calls to create and edit code + +- The original prompt contains all the instructions (generate novel code with name and thought of the new system), output and formatting requirements, examples of how to use or not use the API, and historical context (archive) of previous Agent Systems. +- 4 rounds of reflection: + - Round 1 to reflect on interestingness, implementation mistakes, and improvement + - Round 2 to revise based on the tips from the Wrong Implementation section in the original prompt + - Round 3 to revise based on the tips of Correct Implementation + - Round 4 to revise based on the tips from the official API documentation + +### Meta-Agent will try again fresh if it encounters (code compilation) errors when trying to execute + +An example of an exception is the following: +``` +Error during evaluation:\nClosureAgent.register() takes 4 positional arguments but 5 positional arguments (and 1 keyword-only argument) were given\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source='adas_agent') +``` + +Note: The `adas.py` script can still get stuck even if the code of the agent system compiles, but the agent system itself hangs due to a message being published to a topic that is not used. See this [section](#the-code-that-the-meta-agent-does-not-compile) under the Troubleshooting section for details on how to address this issue. + +### Notable arguments to the script + +Please see the `adas.py` file for details of all available settings. + +- `data_filename`: the name of full path of the dataset location +- `benchmark_specific_utils_file`: Benchmark-specific utility file to load the dataset and also evaluate the outputs. This file must contain the load_dataset and compute_metrics functions +- `meta_agent_model_config`: JSON string of the AzureOpenAIChatCompletionClient settings for the Meta-Agent. +- `base_agent_model_config`: JSON string of the AzureOpenAIChatCompletionClient settings for the Base Agent. +- `n_generation`: number of generations of new agents that the meta-agent tries to discover +- `expr_name`: name of the output file containing both the original/seed and newly generated agent systems, as well as their fitness scores. +- `max_workers`: the number of threads to spin up in a ThreadPoolExecutor, to parallelize the execution of the particular Agent System that is currently being evaluated. + +## QuickStart + +### Install AutoGen-Core API + +Follow the instructions here: [Installation — AutoGen](https://github.com/microsoft/autogen.git). Here is a summary: + +```bash +python3 -m venv .venv +source .venv/bin/activate + +# Install package at latest dev tag +pip install 'autogen-core==0.4.0.dev6' +# Or install in editable mode if you are modifying/testing AutoGen code at the same time +# git clone -b yeandy_adas https://github.com/yeandy/autogen.git +cd autogen/python +pip install -e packages/autogen-core +``` + +### Agent System code definitions +The ADAS framework will run E2E agent systems that are defined entirely inside a Python function. This function that encapsulates this logic is called `forward`, with two arguments `task` and `model_client_kwargs`. It looks like + +```python +def forward(self, task, model_client_kwargs): + # Agent logic + result = model_client.create(task, ...) + return result +``` +The first argument is called `task` (represented as a string), which includes the prompt and input data. For example, the string could look like this: +``` +You will be asked to read a passage and answer a question. + +# Examples: +Passage: As of the census of 2000, there were 952 people, 392 households, and 241 families residing in the village. The population density was 952.9 people per square mile (367.6/km²). There were 449 housing units at an average density of 449.4 per square mile (173.4/km²). The racial makeup of the village was 96.11% White (U.S. Census), 0.95% African American (U.S. Census) or Race (United States Census), 0.11% Native American (U.S. Census), 0.11% Asian (U.S. Census), 0.21% from Race (United States Census), and 2.52% from two or more races. 1.05% of the population were Hispanics in the United States or Latino (U.S. Census) of any race.\nQuestion: How many more people, in terms of percentage, were from two or more races compared to being solely Native American or solely Asian?\nAnswer: 2.3 + +# Your Task +--- +``` + +The second argument is called `model_client_kwargs` dictionary, which contains information about what type of LLM to use for the base agents. For example, it would look like this. To pass this information to the script, please see the following section on passing a JSON string of this information as a flag to the `adas.py` command. +```python +agent_model_kwargs = { + 'api_version': '2024-08-01-preview', + 'azure_endpoint': 'https://-aoai1.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-08-01-preview', + 'model_capabilities': {'function_calling': True, 'json_output': True, 'vision': True}, + 'azure_ad_token_provider': 'DEFAULT', + 'model': 'gpt-35-turbo' +} +``` +Finally, the output of this `forward` function should be the answer that the agent system comes up with. + +There are several agent systems, along with their entire code inside the forward functions, already seeded in the archive. You can find this in the `adas_prompt.py` file. + +#### Adding new agent systems (optional) +If you want to seed additional agent systems to the archive, follow the pattern used by the existing seed agent systems. Make sure to include name, thought, and code. Additionally, make sure to add the new system to the get_init_archive() function inside the adas.py file. + +Note: If you add a new agent system after you’ve started generating new Agent Systems (next [section](#generate-new-agent-systems)), the meta-agent will not pick up this new seed agent system. This is because it will try to first detect the results file defined by the expr_name flag, and reference that file for the agent archive, instead of the `adas_prompt.py` file. + +### Generate new Agent Systems +#### Prepare your dataset +First download your dataset locally. + +Then create a copy the file called `utils_benchmark_template.py`, and name it with a suffix corresponding to your benchmark. For example, see `utils_drop.py`. Place this under the `adas` directory. + +Under the `load_dataset` function, add logic to load in your dataset. Do any preprocessing that is required, such as adding instructions or any few-shot examples with the actual input data. + +```python +# utils_my_benchmark.py +def load_dataset(filename: str) -> List[Dict[str, Any]]: + df = pd.read_csv(filename) + data = [{"inputs": "Your job is to solve this math problem: " + inputs, "targets": targets} for inputs, targets in df] + return data +``` +#### Prepare your evaluation function +In the same `utils_my_benchmark.py` file, add logic to the compute_metrics function to evaluate the ground truth against the predictions made by the agent system. + +```python +# utils_my_benchmark.py +def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: + return np.square(np.subtract(A, B)).mean() +``` +### Choose the LLMs +#### Choose the LLMs for the meta-agent +Recommendation is GPT-4o, but you can choose whatever model you have access to on Azure. + +o1-preview is also reported to be great at writing code, and we suggest you try that if you have access. Caveat: Beta version has [limitations](https://platform.openai.com/docs/guides/reasoning/beta-limitations#beta-limitations) such as not supporting `SystemMessages`. + +This should be passed as a JSON string to the `meta_agent_model_config` flag. +```bash +--meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://andyye-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": "o1-preview-2024-09-12"}' +``` +#### Choose the LLM for the base agents used within the agent system +The paper authors use GPT-3.5 (for cost purposes), but we recommend GPT-4o for better quality. + +This should be passed as a JSON string to the `base_agent_model_config` flag. +```bash +--base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://andyye-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' +``` +### Run ADAS +```bash +python packages/autogen-core/samples/adas/adas.py \ + --data_filename=/home//ADAS/dataset/drop_v0_dev.jsonl.gz \ + --n_generation=150 \ + --expr_name=drop_o1_preview_meta_gpt4o_base_results \ + --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12"}' \ + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' \ + --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_drop.py' +``` +You can increase the number of generations for the meta-agent to try creating. Note that if there is any compilation error, the count of the generation will be skipped. (Potential bug, or at least confusing behavior). You can also increase the number of threads to run evaluation of the agent system at any time. +```bash +python3 adas.py --n_generations 50 --max_workers 10 +``` +## Results for DROP benchmark +### Best Agent System that the Meta-Agent discovered +TODO + +See this [section]() for the full list of discovered Agent Systems. + +### Performance with different LLMs +The LLM that the Meta-Agent or the Base Agents use These are the results for the DROP benchmark +``` +Meta-Agent | Base Agent +-----------|----------- +O1-preview | GPT4o +GPT3.5 | O1-preview +TODO | TODO +TODO | TODO +GPT4o | TODO +TODO | TODO +TODO | TODO +``` +## Troubleshooting +### Exceed token limit +If you are exceeding the token rate limit on your Azure AI Studio deployment + +#### Increase the Rate Limit in Azure AI Studio +TODO: Insert image + +#### Setting the number of max_workers=1 +This can be an argument passed to `adas.py` + +#### Add sleep time after the forward function for each thread runs. Setting to 10 seconds is a good place to start +```bash +adas.py --thread_sleep=10 +``` +### The code that the meta-agent does not compile +You may observe issues related to incorrect JSON or string formatting. This can occur after the meta-agent returns from querying any of its 5 prompts. For example: +``` +Expecting ',' delimiter: line 5 column 11 (char 1626) +``` +Or other errors you may see are “unterminated string literal”, etc. In this case, when trying to dynamically execute this code, the Meta-Agent will hit an exception and try the whole generation process again. + +You shouldn’t need to do anything. Just note that since the meta-agent is starting over, it will prolong the time to generate a new Agent System. Additionally, the script will increment the `Generation` number. As a result, in the results file, you may see some generations being skipped. (This behavior can probably be altered to make more sense to the user) + +### The code that the meta-agent does compile, but hangs during code execution + +The code for the Agent System compiles with no issue, but the systems hangs during execution of the AutoGen code. There are a few reasons and solutions. + +#### Messages published to topics to which no Agents are subscribed +``` +INFO:autogen_core:Calling message handler for output_result with message type FinalAnswer published by coordinator_agent/default +ERROR:autogen_core:Error processing publish message +Traceback (most recent call last): File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 372, in _process_publish agent = await self._get_agent(agent_id) +File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 620, in _get_agent raise LookupError(f"Agent with name {agent_id.type} not found.") +LookupError: Agent with name output_result not found. +``` +The easiest solution is to terminate the program with `Ctrl + \` command, and then rerun the `adas.py` script. + +#### Certain Agent Systems will hang if `max_workers` is not equal to 1. + +Another reason is if `max_workers` is not equal to 1. This means we spin up multiple threads to run the agent systems in parallel on the individual validation dataset. This has been overserved for the LLM_debate Agent System. The solution is to terminate the program, set `max_workers=1`, and rerun with this setting just for this agent system during its code execution / evaluation. You can then terminate after this has finished evaluating, and try again without `max_workers=1`. + +The reason for this is unknown. + +#### If you want to debug +1. Copy the code that the meta-agent produces +2. Put it in triple quotes: """def forward():\n pass""", and print that string. +3. Copy what is printed to console into a new file and try running it yourself to assist with debugging. + +### Event loop is closed + +If you see something like this during execution of the Agent System code, it should be fine. +``` +INFO:autogen_core:Calling message handler for reasoning_agent with message type Question published by Unknown +ERROR:asyncio:Task exception was never retrieved future: exception=RuntimeError('Event loop is closed')> +Traceback (most recent call last): File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpx/_client.py", line 2031, in aclose await self._transport.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpx/_transports/default.py", line 389, in aclose await self._pool.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 313, in aclose await self._close_connections(closing_connections) File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 305, in _close_connections await connection.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection.py", line 171, in aclose await self._connection.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/http11.py", line 265, in aclose await self._network_stream.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 55, in aclose await self._stream.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/anyio/streams/tls.py", line 202, in aclose await self.transport_stream.aclose() + File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 1202, in aclose self._transport.close() + File "/usr/lib/python3.10/asyncio/selector_events.py", line 706, in close self._loop.call_soon(self._call_connection_lost, None) + File "/usr/lib/python3.10/asyncio/base_events.py", line 753, in call_soon self._check_closed() + File "/usr/lib/python3.10/asyncio/base_events.py", line 515, in _check_closed raise RuntimeError('Event loop is closed') +RuntimeError: Event loop is closed INFO:autogen_core.events:{"prompt_tokens": 720, "completion_tokens": 104, "type": "LLMCall"} +INFO:autogen_core.events:{"prompt_tokens": 720, "completion_tokens": 149, "type": "LLMCall"} INFO:autogen_core.events:{"prompt_tokens": 720, "completion_tokens": 149, "type": "LLMCall"} +INFO:autogen_core.events:{"prompt_tokens": 720, "completion_tokens": 149, "type": "LLMCall"} +INFO:autogen_core.events:{"prompt_tokens": 720, "completion_tokens": 170, "type": "LLMCall"} +INFO:autogen_core:Publishing message of type Answer to all subscribers: {'content': 'To solve this problem, we need to carefully read the passage and identify the distances of the field goals mentioned in each quarter:\n\n1. Sebastian Janikowski got a 38-yard field goal.\n2. Jason Elam got a 23-yard field goal.\n3. Jason Elam got a 20-yard field goal.\n\nWe need to find out how many field goals of the game were longer than 40 yards. Only one field goal was longer than 40 yards, which was Sebastian Janikowski's 52-yard field goal attempt in overtime, but it didn't count because it was no good.\n\nTherefore, the number of field goals longer than 40 yards in the game is 0.\n\nSo the final answer is "0".'} INFO:autogen_core:Calling message handler for output_result with message type Answer published by reasoning_agent/default +``` + +The thread still seems to finish with no issue and will still return an answer. + +The reason for this is unknown. + +## Ongoing work + +- Finish adding Quality-Diversity, Role_Assignment, and Take_A_Step_Back Agent Systems to the archive +- Improve prompts to the meta-agent to reduce code errors +- Add extra_create_args options such as `temperature`, `max_completion_tokens`, `top_p` in the model client `create()`. i.e. `extra_create_args={"temperature": 0.0}` + +## Appendix + diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py new file mode 100644 index 000000000000..fcc6b7ab5a4b --- /dev/null +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -0,0 +1,465 @@ +""" +To run, type +`python packages/autogen-core/samples/common/adas/adas.py --data_filename=` + +""" + +import argparse +import asyncio +import importlib +import json +import logging +import os +import random +import time +import uuid +from collections import namedtuple +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from typing import Dict, List + +import numpy as np +from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt +from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core.base import MessageContext +from autogen_core.components import DefaultTopicId, RoutedAgent, default_subscription, message_handler +from autogen_core.components.models import ( + AssistantMessage, + ChatCompletionClient, + LLMMessage, + SystemMessage, + UserMessage, +) +from autogen_ext.models import AzureOpenAIChatCompletionClient +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from tqdm import tqdm +from utils import bootstrap_confidence_interval + +logging.basicConfig(level=logging.WARNING) +logging.getLogger("autogen_core").setLevel(logging.DEBUG) + +Info = namedtuple("Info", ["name", "author", "content", "iteration_idx"]) + +SEARCHING_MODE = True + + +@dataclass +class ADASTask: + task: str + + +@dataclass +class LLMMessageList: + llm_message_list: List[LLMMessage] + + +@dataclass +class LLMResponse: + json_content: Dict[str, str] + + +class AgentSystem: + def __init__(self) -> None: + pass + + +def generate_task(input_infos) -> str: + # construct input infos text + input_infos_text = "" + for input_info in input_infos: + if isinstance(input_info, Info): + (field_name, author, content, iteration_idx) = input_info + else: + continue + + if field_name == "task": + input_infos_text += f"# Your Task:\n{content}\n\n" + elif iteration_idx != -1: + input_infos_text += f"### {field_name} #{iteration_idx + 1}:\n{content}\n\n" + else: + input_infos_text += f"### {field_name}:\n{content}\n\n" + + prompt = input_infos_text + "# Instruction: \n" + return prompt + + +def evaluate_forward_fn(arguments, forward_str): + # Dynamically import benchmark-specific module given the path to the python file. + # File must contain load_dataset and compute_metrics functions + print(f"Loading functions from {arguments.benchmark_specific_utils_file}") + spec = importlib.util.spec_from_file_location("module_name", arguments.benchmark_specific_utils_file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # dynamically define forward() + # modified from https://github.com/luchris429/DiscoPOP/blob/main/scripts/launch_evo.py + namespace = {} + print(f"forward str {forward_str}") + exec(forward_str, globals(), namespace) + names = list(namespace.keys()) + if len(names) != 1: + raise AssertionError(f"{len(names)} things in namespace. Please only provide 1") + func = namespace[names[0]] + if not callable(func): + raise AssertionError(f"{func} is not callable") + AgentSystem.forward = func + + # set seed 0 for valid set + examples = module.load_dataset(arguments.data_filename)[ + 1:-1 + ] # first one and the last one is for few-shot examples + random.seed(arguments.shuffle_seed) + random.shuffle(examples) + + if SEARCHING_MODE: + examples = examples[: arguments.valid_size] * arguments.n_repeat + else: + examples = ( + examples[arguments.valid_size : arguments.valid_size + arguments.test_size] * arguments.n_repeat + ) + + questions = [example["inputs"] for example in examples] + answers = [example["targets"] for example in examples] + + print(f"problem length: {len(examples)}") + max_workers = min(len(examples), arguments.max_workers) if arguments.multiprocessing else 1 + + task_queue = [] + for q in questions: + taskInfo = Info("task", "User", q, -1) + task_queue.append((taskInfo, AgentSystem())) + + def call_forward(agent_task_queue): + taskInfo, agent = agent_task_queue + print(f"taskInfo {taskInfo}") + task = generate_task([taskInfo]) + + result = agent.forward(task, arguments.base_agent_model_config) + if arguments.thread_sleep: + print(f"Sleeping for {arguments.thread_sleep}") + time.sleep(arguments.thread_sleep) + return result + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + results = list( + tqdm(executor.map(call_forward, task_queue), total=len(task_queue)) + ) + + acc_list = module.compute_metrics(results, answers) + + print(f"f1: {bootstrap_confidence_interval(acc_list)}") + return acc_list + + +@default_subscription +class ADASAgent(RoutedAgent): + """An agent that performs ADAS.""" + + def __init__( + self, model_client: ChatCompletionClient, system_prompt: str, args, archive + ) -> None: + super().__init__("An agent searching agent.") + self._args = args + self._archive = archive + self._model_client = model_client + self._session_memory: Dict[str, List[ADASTask]] = {} + + self._system_messages: List[LLMMessage] = [ + # SystemMessage is not allowed in o1-preview API. + # SystemMessage( + AssistantMessage( + content=system_prompt, + source=self.id.type, + ) + ] + self._chat_history: List[LLMMessage] = [] + self._model_client = model_client + + @message_handler + async def handle_task( + self, message: LLMMessageList, ctx: MessageContext + ) -> LLMResponse: + print("Meta-Agent making a LLM call...") + logging.info(f"{self._description} received message: {message}") + model_result = await self._model_client.create( + self._system_messages + message.llm_message_list + ) + + assert isinstance(model_result.content, str) + print(f"Model client result: {model_result.content}") + print("Loading the json string of the content...") + json_content = json.loads(model_result.content) + print("Finished loading the json string of the content") + return LLMResponse(json_content=json_content) + + @message_handler + async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None: + # Store the messages in a temporary memory for this request only. + session_id = str(uuid.uuid4()) + self._session_memory.setdefault(session_id, []).append(message) + + # Process archive + file_path = os.path.join(self._args.save_dir, f"{self._args.expr_name}_run_archive.json") + if os.path.exists(file_path): + with open(file_path, "r") as json_file: # noqa: ASYNC101 + archive = json.load(json_file) + if "generation" in archive[-1] and isinstance( + archive[-1]["generation"], int + ): + start = archive[-1]["generation"] + else: + start = 0 + else: + archive = get_init_archive() + start = 0 + + for solution in archive: + if "fitness" in solution: + continue + + solution["generation"] = "initial" + print(f"============Initial Archive: {solution['name']}=================") + try: + acc_list = evaluate_forward_fn(self._args, solution["code"]) + except Exception as e: + print("During evaluating initial archive:") + print(e) + continue + + fitness_str = bootstrap_confidence_interval(acc_list) + solution["fitness"] = fitness_str + + # save results + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, "w") as json_file: # noqa: ASYNC101 + json.dump(archive, json_file, indent=4) + + # Initial prompt + for n in range(start, self._args.n_generation): + print(f"============Generation {n + 1}=================") + + # Set prompt with updated archive (for n > 0) + _, prompt = get_prompt(archive) + + msg_list = [UserMessage(content=prompt, source=self.metadata["type"])] + try: + response = await self.send_message(LLMMessageList(msg_list), self.id) + next_solution = response.json_content + ( + reflexion_prompt_1, + reflexion_prompt_2, + reflexion_prompt_3, + reflexion_prompt_4, + ) = get_reflexion_prompt(self._archive[-1] if n > 0 else None) + print(f"--After initial prompt {response}") + + # Reflexion 1 + new_messages = msg_list + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_1, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_1 {response}") + + # Reflexion 2 + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_2, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_2 {next_solution}") + + # Reflexion 3 + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_3, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_3 {next_solution}") + + # Reflexion 4 + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=reflexion_prompt_4, source=self.metadata["type"] + ), + ] + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + print(f"--After reflexion_prompt_4 {next_solution}") + except Exception as e: + print("Exception occured during the generation of new solution:") + print(e) + n -= 1 + continue + + acc_list = [] + for _ in range(self._args.debug_max): + print("Evaluate code of newly generated solution. Debug loop...") + try: + print(next_solution["code"]) + acc_list = evaluate_forward_fn(self._args, next_solution["code"]) + if np.mean(acc_list) < 0.01 and SEARCHING_MODE: + raise Exception("All 0 accuracy") + break + except Exception as e: + print("During evaluation:") + print(e) + new_messages = new_messages + [ + AssistantMessage( + content=str(next_solution), source=self.metadata["type"] + ), + UserMessage( + content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", + source=self.metadata["type"], + ), + ] + try: + response = await self.send_message( + LLMMessageList(new_messages), self.id + ) + next_solution = response.json_content + except Exception as e: + print("During LLM generate new solution:") + print(e) + continue + continue + if not acc_list: + n -= 1 + continue + + fitness_str = bootstrap_confidence_interval(acc_list) + next_solution["fitness"] = fitness_str + next_solution["generation"] = n + 1 + + if "debug_thought" in next_solution: + del next_solution["debug_thought"] + if "reflection" in next_solution: + del next_solution["reflection"] + archive.append(next_solution) + + # save results + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, "w") as json_file: # noqa: ASYNC101 + json.dump(archive, json_file, indent=4) + + +async def main(arguments) -> None: + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + # Create an AzureOpenAI model client. + client = AzureOpenAIChatCompletionClient( + model= arguments.meta_agent_model_config["model"], + api_version=arguments.meta_agent_model_config["api_version"], + azure_endpoint=arguments.meta_agent_model_config["azure_endpoint"], + azure_ad_token_provider=token_provider, + model_capabilities=arguments.meta_agent_model_config["model_capabilities"], + ) + + runtime = SingleThreadedAgentRuntime() + + archive = get_init_archive() + system_prompt, prompt = get_prompt(archive) + + await ADASAgent.register( + runtime, + "adas_agent", + lambda: ADASAgent( + model_client=client, + system_prompt=system_prompt, + args=arguments, + archive=archive, + ), + ) + + runtime.start() + + # Publish an initial message to trigger the ADAS search to start. + await runtime.publish_message( + message=ADASTask(task=prompt), + topic_id=DefaultTopicId(), + ) + + # Keep processing messages until idle. + await runtime.stop_when_idle() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run ADAS") + parser.add_argument( + "--verbose", action="store_true", help="Enable verbose logging." + ) + parser.add_argument( + "--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz" + ) + parser.add_argument("--valid_size", type=int, default=128) + parser.add_argument("--test_size", type=int, default=800) + parser.add_argument("--shuffle_seed", type=int, default=0) + parser.add_argument("--n_repeat", type=int, default=1) + parser.add_argument("--multiprocessing", action="store_true", default=True) + parser.add_argument("--max_workers", type=int, default=48) + parser.add_argument("--save_dir", type=str, default="results/") + parser.add_argument("--expr_name", type=str, default="drop_gpt3.5_results") + parser.add_argument("--n_generation", type=int, default=30) + parser.add_argument("--debug_max", type=int, default=3) + parser.add_argument( + "--thread_sleep", + type=int, + default=0, + help="Amount of time to sleep between new threads." + "This is to mitigate any errors due to request limits with Azure or AutoGen", + ) + parser.add_argument( + "--benchmark_specific_utils_file", + type=str, + default="/home/andyye/autogen/python/packages/autogen-core/samples/adas/utils_drop.py", + help="File must contain load_dataset and compute_metrics functions." + ) + parser.add_argument( + "--meta_agent_model_config", + type=str, + default="{}", + help="JSON string of the AzureOpenAIChatCompletionClient settings for the Meta-Agent." + ) + parser.add_argument( + "--base_agent_model_config", + type=str, + default="{}", + help="JSON string of the AzureOpenAIChatCompletionClient settings for the Base Agent." + ) + arguments = parser.parse_args() + arguments.base_agent_model_config = json.loads(arguments.base_agent_model_config) + arguments.meta_agent_model_config = json.loads(arguments.meta_agent_model_config) + if arguments.verbose: + logging.basicConfig(level=logging.WARNING) + logging.getLogger("autogen_core").setLevel(logging.DEBUG) + handler = logging.FileHandler("adas.log") + logging.getLogger("autogen_core").addHandler(handler) + + asyncio.run(main(arguments)) diff --git a/python/packages/autogen-core/samples/common/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py similarity index 86% rename from python/packages/autogen-core/samples/common/adas/adas_prompt.py rename to python/packages/autogen-core/samples/adas/adas_prompt.py index 3af71df5a779..1f059175fad6 100644 --- a/python/packages/autogen-core/samples/common/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -1,4 +1,5 @@ import json + import requests from github import Github @@ -894,6 +895,194 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct """, } +Tree_of_thought = { + "thought": "By using a tree search strategy, the model can explore multiple branches of thoughts, where at any step of the problem, multiple independent thoughts are generated and evaluated to find the most useful ones.", + "name": "Tree of Thought", + "code": """def forward(self, task, model_client_kwargs): + import asyncio + import logging + from dataclasses import dataclass + from typing import List, Dict, Any + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId + from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId + from autogen_core.components.models import ( + ChatCompletionClient, + SystemMessage, + UserMessage, + AssistantMessage, + LLMMessage, + ) + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + from autogen_core.application.logging import TRACE_LOGGER_NAME + + # Configure logging as per documentation + logging.basicConfig(level=logging.WARNING) + logger = logging.getLogger(TRACE_LOGGER_NAME) + logger.setLevel(logging.INFO) + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=model_client_kwargs['model'], + api_version=model_client_kwargs['api_version'], + azure_endpoint=model_client_kwargs['azure_endpoint'], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + @dataclass + class Message: + content: str + + @dataclass + class FinalAnswer: + answer: str + + @default_subscription + class TreeOfThoughtsAgent(RoutedAgent): + def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): + super().__init__("TreeOfThoughtsAgent") + self._model_client = model_client + self._max_depth = max_depth + self._beam_width = beam_width + self._system_messages = [ + SystemMessage( + content="You are a helpful assistant who reasons step-by-step to solve complex problems.") + ] + + async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: + # Generate multiple thoughts using the model + thoughts = [] + # Create multiple async tasks to generate thoughts in parallel + tasks = [] + for _ in range(num_thoughts): + tasks.append(self._model_client.create( + prompt, + extra_create_args={"temperature": 1.0}, + cancellation_token=cancellation_token, + )) + responses = await asyncio.gather(*tasks) + for response in responses: + thoughts.append(response.content.strip()) + return thoughts + + async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[str]: + # Batch evaluation of thoughts + eval_prompt = [ + SystemMessage(content="You are an assistant that evaluates reasoning steps for solving a problem."), + UserMessage( + content=f"Evaluate the following thoughts for their usefulness in solving the problem. Rank them from most useful to least useful and provide the rankings.\\n\\nThoughts:\\n" + "\\n".join( + [f"{i+1}. {t}" for i, t in enumerate(thoughts)]), + source="user" + ) + ] + eval_response = await self._model_client.create( + eval_prompt, + cancellation_token=ctx.cancellation_token, + ) + # Parse the response to extract rankings + rankings_text = eval_response.content.strip() + # For simplicity, assume the model outputs the rankings as a list of numbers + rankings = [] + for line in rankings_text.split('\\n'): + line = line.strip() + if line and line[0].isdigit(): + rankings.append(int(line[0]) - 1) # Subtract 1 to get index + # Select top-k thoughts + best_thoughts = [thoughts[i] for i in rankings[:self._beam_width]] + return best_thoughts + + @message_handler + async def handle_message(self, message: Message, ctx: MessageContext) -> None: + logger.info(f"Received task: {message.content}") + initial_prompt = self._system_messages + [UserMessage(content=message.content, source="user")] + tree = [[]] # Initialize the tree with an empty path + for depth in range(self._max_depth): + new_branches = [] + logger.info(f"Depth {depth+1}") + for path in tree: + # Build the prompt up to this point + prompt = initial_prompt.copy() + for thought in path: + prompt.append(AssistantMessage(content=thought, source="assistant")) + # Generate thoughts + thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) + logger.info(f"Generated thoughts: {thoughts}") + # Evaluate thoughts + best_thoughts = await self.evaluate_thoughts(thoughts, ctx) + logger.info(f"Best thoughts: {best_thoughts}") + # Expand tree with best thoughts + for thought in best_thoughts: + new_path = path + [thought] + new_branches.append(new_path) + # Update tree with new branches + if not new_branches: + logger.info("No more branches to expand.") + break # No more thoughts to expand + tree = new_branches + # After reaching max depth, select the best path + # For simplicity, select the first path + best_path = tree[0] + final_answer = best_path[-1] + logger.info(f"Final answer: {final_answer}") + # Publish the final answer + await self.publish_message( + FinalAnswer(answer=final_answer), + topic_id=TopicId(type="result", source=self.id.key) + ) + + # Main function + async def main(): + # Create a queue to collect the final answer + queue = asyncio.Queue[FinalAnswer]() + + async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None: + await queue.put(message) + + # Initialize runtime + runtime = SingleThreadedAgentRuntime() + + # Register TreeOfThoughtsAgent + await TreeOfThoughtsAgent.register( + runtime, + "TreeOfThoughtsAgent", + lambda: TreeOfThoughtsAgent(model_client) + ) + + # Register ClosureAgent with agent key matching self.id.key (default is "default") + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register( + runtime, + "output_result", + output_result, + subscriptions=lambda: [result_topic] + ) + + # Start the runtime + runtime.start() + + # Publish initial message to TreeOfThoughtsAgent + await runtime.publish_message( + Message(content=task), + topic_id=DefaultTopicId() + ) + + # Wait until idle + await runtime.stop_when_idle() + + # Return the final answer + final_message = await queue.get() + return final_message.answer + return asyncio.run(main()) +""", +} + # TODO(yeandy): Take a Step Back currently not used as a seed in the archive. Refactor using the AutoGen API Take_a_step_back = { "thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", @@ -1358,6 +1547,13 @@ async def main(): await runtime.publish_message(DiverseThoughtTask(task='Who is the most creative composer?'), TopicId("consensus_agent", "default")) ``` +14. This is WRONG: ``` +UserMessage(content=content) +``` +Two arguments are required: The message, as well as the source. Make sure to add the source as the second argument. For example: ``` +UserMessage(content=content, source=self.metadata["type"]) +``` + ## CORRECT Implementation examples: Here are some correct patterns you should follow: @@ -1409,7 +1605,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes return asyncio.run(main()) ``` -This is the format for the `main` function. Make sure that when creating a `ClosureAgent`, you have created `queue` from which you can call `return (await queue.get()).answer` at the very end of the `main` function. The datatype of the Queue should be the final message that the agent system publishes to indicate that the system is terminating. +This is the format for the `main` function. Make sure that when creating a `ClosureAgent`, you have created `queue` from which you can call `return (await queue.get()).answer` at the very end of the `main` function. The datatype of the Queue should be the final message that the agent system publishes to indicate that the system is terminating. The `result_topic` should have a unique `topic_type`, which should be called "result". The agent that publishes the final message MUST publish to the same topic_id 3. This is CORRECT: ``` @@ -1459,13 +1655,12 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct Use the knowledge from the archive and inspiration from academic literature to propose the next interesting agentic system design. THINK OUTSIDE THE BOX. -Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +Make sure to return in a WELL-FORMED JSON object. Key and values of all JSON entries must be enclosed in double quotes " or \"\"\", and not single quotes \'. To reduce any JSON parsing errors when using the `json.loads()` function, there should be not be any multiline strings in the JSON object. Use the newline character `\n` if necessary. Additionally, to reduce any JSON parsing errors when using the `json.loads()` function, the JSON object itself cannot be multiline. It must be a single line. Finally, do not add any code blocks around the JSON object. """ +Reflexion_prompt_1 = """"[EXAMPLE]Carefully review the proposed new architecture and reflect on the following points: -Reflexion_prompt_1 = f""""[EXAMPLE]Carefully review the proposed new architecture and reflect on the following points: - -1. **Interestingness**: Assess whether your proposed architecture is interesting or innovative compared to existing methods in the archive. If you determine that the proposed architecture is not interesting, suggest a new architecture that addresses these shortcomings. +1. **Interestingness**: Assess whether your proposed architecture is interesting or innovative compared to existing methods in the archive. If you determine that the proposed architecture is not interesting, suggest a new architecture that addresses these shortcomings. - Make sure to check the difference between the proposed architecture and previous attempts. - Compare the proposal and the architectures in the archive CAREFULLY, including their actual differences in the implementation. - Decide whether the current architecture is innovative. @@ -1490,38 +1685,40 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct "code": Provide the corrected code or an improved implementation. Make sure you actually implement your fix and improvement in this code. -Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +Make sure to return in a WELL-FORMED JSON object. Key and values of all JSON entries must be enclosed in double quotes " or \"\"\", and not single quotes \'. To reduce any JSON parsing errors when using the `json.loads()` function, there should be not be any multiline strings in the JSON object. Use the newline character `\n` if necessary. Additionally, to reduce any JSON parsing errors when using the `json.loads()` function, the JSON object itself cannot be multiline. It must be a single line. Finally, do not add any code blocks around the JSON object. """ Reflexion_prompt_2 = """Using the tips in "## WRONG Implementation examples" section, revise the code further. Your response should be organized as follows: Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". -Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +Make sure to return in a WELL-FORMED JSON object. Key and values of all JSON entries must be enclosed in double quotes " or \"\"\", and not single quotes \'. To reduce any JSON parsing errors when using the `json.loads()` function, there should be not be any multiline strings in the JSON object. Use the newline character `\n` if necessary. Additionally, to reduce any JSON parsing errors when using the `json.loads()` function, the JSON object itself cannot be multiline. It must be a single line. Finally, do not add any code blocks around the JSON object. """ Reflexion_prompt_3 = """Using the tips in "## CORRECT Implementation examples" section, revise the code further. Your response should be organized as follows: Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". -Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +Make sure to return in a WELL-FORMED JSON object. Key and values of all JSON entries must be enclosed in double quotes " or \"\"\", and not single quotes \'. To reduce any JSON parsing errors when using the `json.loads()` function, there should be not be any multiline strings in the JSON object. Use the newline character `\n` if necessary. Additionally, to reduce any JSON parsing errors when using the `json.loads()` function, the JSON object itself cannot be multiline. It must be a single line. Finally, do not add any code blocks around the JSON object. """ Reflexion_prompt_4 = """Using the official API documentation in "## Documentation" section, revise the code further. Your response should be organized as follows: Put your new reflection thinking in "reflection". Repeat the previous "thought" and "name", and update the corrected version of the code in "code". -Make sure to return in a WELL-FORMED JSON object. Key and values of all entries must be enclosed in double quotes " or \"\"\", and not single quotes '. Additionally, any multiline string in the JSON object should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. The JSON object itself may also be multiline, and should contain the newline character `\n` to reduce any JSON parsing errors when using the `json.loads()` function. Finally, do not add any code blocks around the JSON object. +Make sure to return in a WELL-FORMED JSON object. Key and values of all JSON entries must be enclosed in double quotes " or \"\"\", and not single quotes \'. To reduce any JSON parsing errors when using the `json.loads()` function, there should be not be any multiline strings in the JSON object. Use the newline character `\n` if necessary. Additionally, to reduce any JSON parsing errors when using the `json.loads()` function, the JSON object itself cannot be multiline. It must be a single line. Finally, do not add any code blocks around the JSON object. """ def get_init_archive(): - return [ - COT, - COT_SC, - Reflexion, - LLM_debate, - ] # TODO: Take_a_step_back, QD, Role_Assignment + return [Tree_of_thought] + # return [ + # COT, + # COT_SC, + # Reflexion, + # LLM_debate, + # Tree_of_thought, + # ] # TODO: Take_a_step_back, QD, Role_Assignment def get_prompt(current_archive, adaptive=False): diff --git a/python/packages/autogen-core/samples/adas/utils.py b/python/packages/autogen-core/samples/adas/utils.py new file mode 100644 index 000000000000..1a7591286474 --- /dev/null +++ b/python/packages/autogen-core/samples/adas/utils.py @@ -0,0 +1,62 @@ +""" +Benchmark-agnostic utilities +""" + +import random +import string + +import numpy as np + + +def random_id(length=4): + characters = string.ascii_letters + string.digits # includes both upper/lower case letters and numbers + random_id = "".join(random.choices(characters, k=length)) + return random_id + + +def bootstrap_confidence_interval(data, num_bootstrap_samples=100000, confidence_level=0.95): + """ + Calculate the bootstrap confidence interval for the mean of 1D accuracy data. + Also returns the median of the bootstrap means. + + Args: + - data (list or array of float): 1D list or array of data points. + - num_bootstrap_samples (int): Number of bootstrap samples. + - confidence_level (float): The desired confidence level (e.g., 0.95 for 95%). + + Returns: + - str: Formatted string with 95% confidence interval and median as percentages with one decimal place. + """ + # Convert data to a numpy array for easier manipulation + data = np.array(data) + + # List to store the means of bootstrap samples + bootstrap_means = [] + + # Generate bootstrap samples and compute the mean for each sample + for _ in range(num_bootstrap_samples): + # Resample with replacement + bootstrap_sample = np.random.choice(data, size=len(data), replace=True) + # Compute the mean of the bootstrap sample + bootstrap_mean = np.mean(bootstrap_sample) + bootstrap_means.append(bootstrap_mean) + + # Convert bootstrap_means to a numpy array for percentile calculation + bootstrap_means = np.array(bootstrap_means) + + # Compute the lower and upper percentiles for the confidence interval + lower_percentile = (1.0 - confidence_level) / 2.0 + upper_percentile = 1.0 - lower_percentile + ci_lower = np.percentile(bootstrap_means, lower_percentile) + ci_upper = np.percentile(bootstrap_means, upper_percentile) + + # Compute the median of the bootstrap means + median = np.median(bootstrap_means) + + # Convert to percentages and format to one decimal place + ci_lower_percent = ci_lower + ci_upper_percent = ci_upper + median_percent = median + + # Return the formatted string with confidence interval and median + return f"95% Bootstrap Confidence Interval: ({ci_lower_percent:.1f}%, {ci_upper_percent:.1f}%), Median: {median_percent:.1f}%" diff --git a/python/packages/autogen-core/samples/common/adas/utils.py b/python/packages/autogen-core/samples/adas/utils_drop.py similarity index 75% rename from python/packages/autogen-core/samples/common/adas/utils.py rename to python/packages/autogen-core/samples/adas/utils_drop.py index 5876c6127900..617797f81156 100644 --- a/python/packages/autogen-core/samples/common/adas/utils.py +++ b/python/packages/autogen-core/samples/adas/utils_drop.py @@ -1,5 +1,7 @@ -# https://github.com/openai/simple-evals/blob/main/drop_eval.py -""" +"""Utility file for DROP benchmark. + +https://github.com/openai/simple-evals/blob/main/drop_eval.py + DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner https://arxiv.org/abs/1903.00161 @@ -210,7 +212,7 @@ def fuzzy_match(s1: str, s2: str) -> bool: return s1 in s2 or s2 in s1 -def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]: +def compute_drop_metrics(sample: str, reference: list[str]) -> Tuple[float, float]: em_scores = [] f1_scores = [] for answer in reference: @@ -221,10 +223,53 @@ def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]: return (max(em_scores), max(f1_scores)) -def load_drop(file_path): +def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: + """ + Calculates the score based on a list of predictions and labels. + + Args: + predictions: A list of predictions that the agent system predicts + and returns as its final answer. + labels: A list of ground truth labels from the dataset. + + Returns: + A list of metrics, where each corresponds to the computed score for each prediction. + """ + acc_list = [] + for q_idx, res in enumerate(predictions): + try: + correct_answers = labels[q_idx] + print( + f"extracted_answer {res}, correct_answers {correct_answers}" + ) + em_score, f1_score = compute_drop_metrics(res, correct_answers) + except Exception: + acc_list.append(0) + continue + + acc_list.append(f1_score) + return acc_list + + +def load_dataset(file_path: str) -> List[Dict[str, Any]]: + """ + Loads in a dataset, with both input and targets, based on a file path. + + Args: + file_path: A string representing the path of the dataset. + + Returns: + A list of dicts, where each dict has 'input' and 'targets' keys + corresponding to the input and ground truth labels, respectively. + + The 'input' should be a string containing the task instruction, + (optional) few-shot contexts, and the actual input data. + + The 'output' can be of any data type. Note that this will be used + in the `compute_metrics` function that benchmark uses. + """ with gzip.open(file_path, mode="rb") as f: test_samples = [json.loads(line) for line in f] - prompt = """You will be asked to read a passage and answer a question.\n""" few_shot_prompt = """You will be asked to read a passage and answer a question. # Examples: @@ -236,61 +281,7 @@ def load_drop(file_path): """ examples = [] for sample in test_samples: - sample['inputs'] = few_shot_prompt + sample['context'] - sample['targets'] = sample["ref_text"].split("|") + sample["inputs"] = few_shot_prompt + sample["context"] + sample["targets"] = sample["ref_text"].split("|") examples.append(sample) return examples - - -def random_id(length=4): - characters = string.ascii_letters + string.digits # includes both upper/lower case letters and numbers - random_id = ''.join(random.choices(characters, k=length)) - return random_id - - -def bootstrap_confidence_interval(data, num_bootstrap_samples=100000, confidence_level=0.95): - """ - Calculate the bootstrap confidence interval for the mean of 1D accuracy data. - Also returns the median of the bootstrap means. - - Args: - - data (list or array of float): 1D list or array of data points. - - num_bootstrap_samples (int): Number of bootstrap samples. - - confidence_level (float): The desired confidence level (e.g., 0.95 for 95%). - - Returns: - - str: Formatted string with 95% confidence interval and median as percentages with one decimal place. - """ - # Convert data to a numpy array for easier manipulation - data = np.array(data) - - # List to store the means of bootstrap samples - bootstrap_means = [] - - # Generate bootstrap samples and compute the mean for each sample - for _ in range(num_bootstrap_samples): - # Resample with replacement - bootstrap_sample = np.random.choice(data, size=len(data), replace=True) - # Compute the mean of the bootstrap sample - bootstrap_mean = np.mean(bootstrap_sample) - bootstrap_means.append(bootstrap_mean) - - # Convert bootstrap_means to a numpy array for percentile calculation - bootstrap_means = np.array(bootstrap_means) - - # Compute the lower and upper percentiles for the confidence interval - lower_percentile = (1.0 - confidence_level) / 2.0 - upper_percentile = 1.0 - lower_percentile - ci_lower = np.percentile(bootstrap_means, lower_percentile) - ci_upper = np.percentile(bootstrap_means, upper_percentile) - - # Compute the median of the bootstrap means - median = np.median(bootstrap_means) - - # Convert to percentages and format to one decimal place - ci_lower_percent = ci_lower - ci_upper_percent = ci_upper - median_percent = median - - # Return the formatted string with confidence interval and median - return f"95% Bootstrap Confidence Interval: ({ci_lower_percent:.1f}%, {ci_upper_percent:.1f}%), Median: {median_percent:.1f}%" diff --git a/python/packages/autogen-core/samples/common/adas/adas.py b/python/packages/autogen-core/samples/common/adas/adas.py deleted file mode 100644 index 2e44ff2c478b..000000000000 --- a/python/packages/autogen-core/samples/common/adas/adas.py +++ /dev/null @@ -1,490 +0,0 @@ -""" -To run, type -`python packages/autogen-core/samples/common/adas/adas.py --data_filename=` - -""" - -import argparse -import asyncio -import json -import logging -import os -import random -import time -import uuid -import numpy as np -from dataclasses import dataclass -from typing import Dict, List -from collections import namedtuple -from concurrent.futures import ThreadPoolExecutor -from tqdm import tqdm - -from autogen_core.components import RoutedAgent, default_subscription, message_handler -from autogen_core.application import SingleThreadedAgentRuntime -from autogen_core.base import MessageContext -from autogen_core.components import DefaultTopicId -from autogen_core.components.models import ( - AssistantMessage, - ChatCompletionClient, - LLMMessage, - SystemMessage, - UserMessage, -) - -# TODO fix imports -import sys - -sys.path.append("/home/andyye/autogen/python/packages/autogen-core/samples/") -from common.utils import get_chat_completion_client_from_envs - -from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt -from utils import random_id, bootstrap_confidence_interval, load_drop, drop_metric - - -logging.basicConfig(level=logging.WARNING) -logging.getLogger("autogen_core").setLevel(logging.DEBUG) - -Info = namedtuple("Info", ["name", "author", "content", "iteration_idx"]) - -SEARCHING_MODE = True - - -@dataclass -class ADASTask: - task: str - - -@dataclass -class ADASResult: - result: str - - -@dataclass -class LLMMessageList: - llm_message_list: List[LLMMessage] - - -@dataclass -class SimpleReflectAgentResponse: - json_content: Dict[str, str] - # content: str - - -@dataclass -class LLMAgentBaseTask: - system_message: LLMMessage - instruction: LLMMessage - input_infos: List[Info] - iteration_idx: int - output_fields: List[str] - role: str - - -@dataclass -class Message: - content: str - - -class AgentSystem: - def __init__(self) -> None: - pass - - -def generate_task(input_infos) -> str: - - # construct input infos text - input_infos_text = "" - for input_info in input_infos: - if isinstance(input_info, Info): - (field_name, author, content, iteration_idx) = input_info - else: - continue - - if field_name == "task": - input_infos_text += f"# Your Task:\n{content}\n\n" - elif iteration_idx != -1: - # input_infos_text += f'### {field_name} #{iteration_idx + 1} by {author}:\n{content}\n\n' - input_infos_text += f"### {field_name} #{iteration_idx + 1}:\n{content}\n\n" - else: - # input_infos_text += f'### {field_name} by {author}:\n{content}\n\n' - input_infos_text += f"### {field_name}:\n{content}\n\n" - - prompt = input_infos_text + "# Instruction: \n" - return prompt - - -def evaluate_forward_fn(args, forward_str): - # dynamically define forward() - # modified from https://github.com/luchris429/DiscoPOP/blob/main/scripts/launch_evo.py - namespace = {} - print(f"forward str {forward_str}") - exec(forward_str, globals(), namespace) - names = list(namespace.keys()) - if len(names) != 1: - raise AssertionError(f"{len(names)} things in namespace. Please only provide 1") - func = namespace[names[0]] - if not callable(func): - raise AssertionError(f"{func} is not callable") - setattr(AgentSystem, "forward", func) - - # set seed 0 for valid set - examples = load_drop(args.data_filename)[ - 1:-1 - ] # first one and the last one is for few-shot examples - random.seed(args.shuffle_seed) - random.shuffle(examples) - - if SEARCHING_MODE: - examples = examples[: args.valid_size] * args.n_repeat - else: - examples = ( - examples[args.valid_size : args.valid_size + args.test_size] * args.n_repeat - ) - - questions = [example["inputs"] for example in examples] - answers = [example["targets"] for example in examples] - - print(f"problem length: {len(examples)}") - max_workers = min(len(examples), args.max_workers) if args.multiprocessing else 1 - - task_queue = [] - for q in questions: - taskInfo = Info("task", "User", q, -1) - task_queue.append((taskInfo, AgentSystem())) - - # agentSystem = AgentSystem() - - def call_forward(agent_task_queue): - taskInfo, agent = agent_task_queue - print(f"taskInfo {taskInfo}") - task = generate_task([taskInfo]) - - # For magentic one using the create_completion_client_from_env() helper - agent_model_kwargs = {} - - result = agent.forward(task, agent_model_kwargs) - if args.thread_sleep: - print(f"Sleeping for {args.thread_sleep}") - time.sleep(args.thread_sleep) - return result - - with ThreadPoolExecutor(max_workers=max_workers) as executor: - results = list( - tqdm(executor.map(call_forward, task_queue), total=len(task_queue)) - ) - - acc_list = [] - for q_idx, res in enumerate(results): - try: - if isinstance(res, Info): - extracted_answer = res.content - else: - extracted_answer = res - correct_answers = answers[q_idx] - print( - f"extracted_answer {extracted_answer}, correct_answers {correct_answers}" - ) - em_score, f1_score = drop_metric(extracted_answer, correct_answers) - except Exception as e: - acc_list.append(0) - continue - - acc_list.append(f1_score) - - print(f"f1: {bootstrap_confidence_interval(acc_list)}") - return acc_list - - -@default_subscription -class ADASAgent(RoutedAgent): - """An agent that performs ADAS.""" - - def __init__( - self, model_client: ChatCompletionClient, system_prompt: str, args, archive - ) -> None: - super().__init__("An agent searching agent.") - self._args = args - self._archive = archive - self._model_client = model_client - self._session_memory: Dict[str, List[ADASTask | ADASResult]] = {} - - self._system_messages: List[LLMMessage] = [ - # SystemMessage is not allowed in o1-preview API. - # SystemMessage( - AssistantMessage( - content=system_prompt, - source=self.id.type, - ) - ] - self._chat_history: List[LLMMessage] = [] - self._model_client = model_client - - @message_handler - async def handle_task( - self, message: LLMMessageList, ctx: MessageContext - ) -> SimpleReflectAgentResponse: - print(f"Meta-Agent making a LLM call...") - logging.info(f"{self._description} received message: {message}") - model_result = await self._model_client.create( - self._system_messages + message.llm_message_list - ) - - assert isinstance(model_result.content, str) - print(f"Model client result: {model_result.content}") - print("Loading the json string of the content...") - json_content = json.loads(model_result.content) - print("Finished loading the json string of the content") - return SimpleReflectAgentResponse(json_content=json_content) - - @message_handler - async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None: - # Store the messages in a temporary memory for this request only. - session_id = str(uuid.uuid4()) - self._session_memory.setdefault(session_id, []).append(message) - - # Process archive - file_path = os.path.join(args.save_dir, f"{args.expr_name}_run_archive.json") - if os.path.exists(file_path): - with open(file_path, "r") as json_file: - archive = json.load(json_file) - if "generation" in archive[-1] and isinstance( - archive[-1]["generation"], int - ): - start = archive[-1]["generation"] - else: - start = 0 - else: - archive = get_init_archive() - start = 0 - - for solution in archive: - if "fitness" in solution: - continue - - solution["generation"] = "initial" - print(f"============Initial Archive: {solution['name']}=================") - try: - acc_list = evaluate_forward_fn(args, solution["code"]) - except Exception as e: - print("During evaluating initial archive:") - print(e) - continue - - fitness_str = bootstrap_confidence_interval(acc_list) - solution["fitness"] = fitness_str - - # save results - os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "w") as json_file: - json.dump(archive, json_file, indent=4) - - # Initial prompt - for n in range(start, args.n_generation): - print(f"============Generation {n + 1}=================") - - # Set prompt with updated archive (for n > 0) - _, prompt = get_prompt(archive) - - msg_list = [UserMessage(content=prompt, source=self.metadata["type"])] - try: - response = await self.send_message(LLMMessageList(msg_list), self.id) - next_solution = response.json_content - ( - reflexion_prompt_1, - reflexion_prompt_2, - reflexion_prompt_3, - reflexion_prompt_4, - ) = get_reflexion_prompt(self._archive[-1] if n > 0 else None) - print(f"--After initial prompt {response}") - - # Reflexion 1 - new_messages = msg_list + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_1, source=self.metadata["type"] - ), - ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) - next_solution = response.json_content - print(f"--After reflexion_prompt_1 {response}") - - # Reflexion 2 - new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_2, source=self.metadata["type"] - ), - ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) - next_solution = response.json_content - print(f"--After reflexion_prompt_2 {next_solution}") - - # Reflexion 3 - new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_3, source=self.metadata["type"] - ), - ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) - next_solution = response.json_content - print(f"--After reflexion_prompt_3 {next_solution}") - - # Reflexion 4 - new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_4, source=self.metadata["type"] - ), - ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) - next_solution = response.json_content - print(f"--After reflexion_prompt_4 {next_solution}") - - # next_solution = {'reflection': 'Upon reviewing the code and the official API documentation, I noticed that the "AzureOpenAIChatCompletionClient" requires the "azure_deployment" parameter, which was missing in the code. According to the documentation, we need to provide "azure_deployment" along with "model", "api_version", and "azure_endpoint". I have updated the code to include the "azure_deployment" parameter when creating the model client. Additionally, I ensured that all other parameters and imports align with the official API documentation.', 'thought': '**Insights:**\nDecomposing complex questions into simpler sub-questions can improve reasoning accuracy by allowing the model to focus on one aspect at a time.\n\n**Overall Idea:**\nImplement an agent system where a `DecomposerAgent` breaks down the main question into sub-questions. `SolverAgents` answer these sub-questions based on the provided passage, and a `ComposerAgent` combines the sub-answers to produce the final answer.\n\n**Implementation:**\n- Define a `DecomposerAgent` that decomposes the main question into sub-questions and distributes them.\n- Define `SolverAgents` that answer sub-questions based on the provided passage.\n- Define a `ComposerAgent` that collects sub-answers and composes the final answer.\n- Use appropriate message classes and ensure correct message passing and subscriptions.\n- The `ComposerAgent` publishes the final answer to the default topic, which is collected by a `ClosureAgent`.', 'name': 'Question Decomposition Agent', 'code': 'def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, default_subscription, RoutedAgent, message_handler, ClosureAgent\n from autogen_core.components.models import AssistantMessage, UserMessage, SystemMessage, ChatCompletionClient\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_core.application import SingleThreadedAgentRuntime\n\n # Create the Azure OpenAI model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n model_client = AzureOpenAIChatCompletionClient(\n azure_deployment=model_client_kwargs["azure_deployment"],\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class Task:\n content: str\n\n @dataclass\n class SubQuestion:\n question: str\n sub_question: str\n\n @dataclass\n class SubQuestionList:\n sub_questions: List[str]\n\n @dataclass\n class SubAnswer:\n sub_question: str\n answer: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class DecomposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Decomposer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are an expert at decomposing complex questions into simpler sub-questions that can be answered individually."\n )\n )\n ]\n\n @message_handler\n async def handle_task(self, message: Task, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Decompose the following question into a list of sub-questions that can help answer the main question:\\n{message.content}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Assuming the model lists the sub-questions in numbered format\n sub_questions = [sq.strip() for sq in response.content.strip().split(\'\\n\') if sq.strip()]\n for sq in sub_questions:\n await self.publish_message(\n SubQuestion(question=message.content, sub_question=sq),\n topic_id=DefaultTopicId(),\n )\n # Send the list of sub-questions to the ComposerAgent\n await self.publish_message(\n SubQuestionList(sub_questions=sub_questions),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, passage: str) -> None:\n super().__init__("Solver Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that answers questions based on the provided passage. "\n "Provide concise and accurate answers."\n )\n )\n ]\n self._passage = passage\n\n @message_handler\n async def handle_sub_question(self, message: SubQuestion, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Passage:\\n{self._passage}\\n\\nQuestion:\\n{message.sub_question}\\n\\nAnswer the question based on the passage."\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n SubAnswer(sub_question=message.sub_question, answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class ComposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Composer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that composes a final answer based on answers to sub-questions."\n )\n )\n ]\n self._num_sub_questions = 0\n self._sub_answers = []\n\n @message_handler\n async def handle_sub_question_list(self, message: SubQuestionList, ctx: MessageContext) -> None:\n self._num_sub_questions = len(message.sub_questions)\n\n @message_handler\n async def handle_sub_answer(self, message: SubAnswer, ctx: MessageContext) -> None:\n self._sub_answers.append(message)\n if len(self._sub_answers) == self._num_sub_questions:\n # All sub-answers have been collected\n # Compose the final answer\n sub_answers_text = \'\\n\'.join(\n f"Sub-question: {sa.sub_question}\\nAnswer: {sa.answer}" for sa in self._sub_answers\n )\n user_message = UserMessage(\n content=(\n f"Based on the following sub-questions and their answers, compose a final comprehensive answer to the main question.\\n{sub_answers_text}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n FinalAnswer(answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n async def main():\n queue = asyncio.Queue()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await DecomposerAgent.register(runtime, "decomposer_agent", lambda: DecomposerAgent(model_client))\n await SolverAgent.register(runtime, "solver_agent", lambda: SolverAgent(model_client, passage=task))\n await ComposerAgent.register(runtime, "composer_agent", lambda: ComposerAgent(model_client))\n\n # ClosureAgent to collect the final answer\n await ClosureAgent.register(runtime, "output_result", output_result)\n\n runtime.start()\n\n # Publish the task to the DecomposerAgent\n await runtime.publish_message(\n Task(content=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())'} - # next_solution = {'reflection': 'Upon reviewing the code and the official API documentation, I noticed that the "AzureOpenAIChatCompletionClient" requires the "azure_deployment" parameter, which was missing in the code. According to the documentation, we need to provide "azure_deployment" along with "model", "api_version", and "azure_endpoint". I have updated the code to include the "azure_deployment" parameter when creating the model client. Additionally, I ensured that all other parameters and imports align with the official API documentation.', 'thought': '**Insights:**\nDecomposing complex questions into simpler sub-questions can improve reasoning accuracy by allowing the model to focus on one aspect at a time.\n\n**Overall Idea:**\nImplement an agent system where a `DecomposerAgent` breaks down the main question into sub-questions. `SolverAgents` answer these sub-questions based on the provided passage, and a `ComposerAgent` combines the sub-answers to produce the final answer.\n\n**Implementation:**\n- Define a `DecomposerAgent` that decomposes the main question into sub-questions and distributes them.\n- Define `SolverAgents` that answer sub-questions based on the provided passage.\n- Define a `ComposerAgent` that collects sub-answers and composes the final answer.\n- Use appropriate message classes and ensure correct message passing and subscriptions.\n- The `ComposerAgent` publishes the final answer to the default topic, which is collected by a `ClosureAgent`.', 'name': 'Question Decomposition Agent', 'code': 'def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, default_subscription, RoutedAgent, message_handler, ClosureAgent\n from autogen_core.components.models import AssistantMessage, UserMessage, SystemMessage, ChatCompletionClient\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_core.application import SingleThreadedAgentRuntime\n\n # Create the Azure OpenAI model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True,\n },\n )\n\n @dataclass\n class Task:\n content: str\n\n @dataclass\n class SubQuestion:\n question: str\n sub_question: str\n\n @dataclass\n class SubQuestionList:\n sub_questions: List[str]\n\n @dataclass\n class SubAnswer:\n sub_question: str\n answer: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class DecomposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Decomposer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are an expert at decomposing complex questions into simpler sub-questions that can be answered individually."\n )\n )\n ]\n\n @message_handler\n async def handle_task(self, message: Task, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Decompose the following question into a list of sub-questions that can help answer the main question:\\n{message.content}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Assuming the model lists the sub-questions in numbered format\n sub_questions = [sq.strip() for sq in response.content.strip().split(\'\\n\') if sq.strip()]\n for sq in sub_questions:\n await self.publish_message(\n SubQuestion(question=message.content, sub_question=sq),\n topic_id=DefaultTopicId(),\n )\n # Send the list of sub-questions to the ComposerAgent\n await self.publish_message(\n SubQuestionList(sub_questions=sub_questions),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, passage: str) -> None:\n super().__init__("Solver Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that answers questions based on the provided passage. "\n "Provide concise and accurate answers."\n )\n )\n ]\n self._passage = passage\n\n @message_handler\n async def handle_sub_question(self, message: SubQuestion, ctx: MessageContext) -> None:\n user_message = UserMessage(\n content=(\n f"Passage:\\n{self._passage}\\n\\nQuestion:\\n{message.sub_question}\\n\\nAnswer the question based on the passage."\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n SubAnswer(sub_question=message.sub_question, answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n @default_subscription\n class ComposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__("Composer Agent")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(\n content=(\n "You are a helpful assistant that composes a final answer based on answers to sub-questions."\n )\n )\n ]\n self._num_sub_questions = 0\n self._sub_answers = []\n\n @message_handler\n async def handle_sub_question_list(self, message: SubQuestionList, ctx: MessageContext) -> None:\n self._num_sub_questions = len(message.sub_questions)\n\n @message_handler\n async def handle_sub_answer(self, message: SubAnswer, ctx: MessageContext) -> None:\n self._sub_answers.append(message)\n if len(self._sub_answers) == self._num_sub_questions:\n # All sub-answers have been collected\n # Compose the final answer\n sub_answers_text = \'\\n\'.join(\n f"Sub-question: {sa.sub_question}\\nAnswer: {sa.answer}" for sa in self._sub_answers\n )\n user_message = UserMessage(\n content=(\n f"Based on the following sub-questions and their answers, compose a final comprehensive answer to the main question.\\n{sub_answers_text}"\n ),\n source="user"\n )\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages=messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n await self.publish_message(\n FinalAnswer(answer=response.content),\n topic_id=DefaultTopicId(),\n )\n\n async def main():\n queue = asyncio.Queue()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await DecomposerAgent.register(runtime, "decomposer_agent", lambda: DecomposerAgent(model_client))\n await SolverAgent.register(runtime, "solver_agent", lambda: SolverAgent(model_client, passage=task))\n await ComposerAgent.register(runtime, "composer_agent", lambda: ComposerAgent(model_client))\n\n # ClosureAgent to collect the final answer\n await ClosureAgent.register(runtime, "output_result", output_result)\n\n runtime.start()\n\n # Publish the task to the DecomposerAgent\n await runtime.publish_message(\n Task(content=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())'} - - # next_solution = {'reflection': '**Reflection:**\n\nAfter reviewing the "## WRONG Implementation examples" section, specifically example 8, I realized that for the `ClosureAgent` to receive the final `Answer` message correctly, the topic source in `publish_message` must match the agent key of the `ClosureAgent`. In the previous code, the `ReasoningAgent` published to `TopicId("result", self.id.type)` where `self.id.type` is `"reasoning_agent"`, but the `ClosureAgent` was registered with the agent key `"output_result"`. This mismatch would prevent the message from being delivered to the `ClosureAgent`.\n\nTo fix this, I adjusted the `ReasoningAgent` to publish the final `Answer` to `TopicId("result", "output_result")`, ensuring that the topic source matches the agent key of the `ClosureAgent`, which is `"output_result"`. This follows the correct pattern outlined in the examples and ensures that the message is correctly routed to the `ClosureAgent`.\n', 'thought': '**Insights:**\nBy parallelizing the generation of multiple reasoning paths, we can improve the efficiency of the agent. Additionally, ensuring that the agents and subscriptions are correctly set up will avoid common mistakes.\n\n**Overall Idea:**\nThe improved agent will generate multiple reasoning paths concurrently and aggregate the final answers to select the most common one. This enhances efficiency without altering the overall design.\n\n**Implementation:**\n- Use `asyncio.gather` to run multiple model calls concurrently in the `ReasoningAgent`.\n- Ensure that the agents and subscriptions are correctly registered.\n- Confirm that the `ClosureAgent` collects the final answer accurately.', 'name': 'Self-Consistency Chain-of-Thought Agent', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n from collections import Counter\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True\n },\n )\n\n @dataclass\n class Question:\n content: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class ReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, num_samples: int) -> None:\n super().__init__("Reasoning Agent")\n self._model_client = model_client\n self._num_samples = num_samples\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n async def generate_answer() -> str:\n prompt = ("Please solve the following problem step-by-step:\\n\\n"\n f"{message.content}\\n\\n"\n "Your final answer should be a single string.")\n messages = [\n SystemMessage("You are a helpful assistant for solving reasoning problems."),\n UserMessage(content=prompt, source="user"),\n ]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n return response.content.strip()\n\n # Generate the reasoning paths concurrently\n tasks = [generate_answer() for _ in range(self._num_samples)]\n final_answers = await asyncio.gather(*tasks)\n\n # Aggregate the final answers and select the most common one\n most_common_answer = Counter(final_answers).most_common(1)[0][0]\n\n # Publish the final answer, ensuring topic source matches ClosureAgent\'s agent key\n await self.publish_message(Answer(content=most_common_answer), topic_id=TopicId("result", "output_result"))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await ReasoningAgent.register(runtime, "reasoning_agent", lambda: ReasoningAgent(model_client, num_samples=5))\n\n result_topic = TypeSubscription(topic_type="result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).content\n\n return asyncio.run(main())'} - # next_solution = {'reflection': '**Reflection:**\n\nAfter reviewing the "## WRONG Implementation examples" section, specifically example 8, I realized that for the `ClosureAgent` to receive the final `Answer` message correctly, the topic source in `publish_message` must match the agent key of the `ClosureAgent`. In the previous code, the `ReasoningAgent` published to `TopicId("result", self.id.type)` where `self.id.type` is `"reasoning_agent"`, but the `ClosureAgent` was registered with the agent key `"output_result"`. This mismatch would prevent the message from being delivered to the `ClosureAgent`.\n\nTo fix this, I adjusted the `ReasoningAgent` to publish the final `Answer` to `TopicId("result", "output_result")`, ensuring that the topic source matches the agent key of the `ClosureAgent`, which is `"output_result"`. This follows the correct pattern outlined in the examples and ensures that the message is correctly routed to the `ClosureAgent`.\n', 'thought': '**Insights:**\nBy parallelizing the generation of multiple reasoning paths, we can improve the efficiency of the agent. Additionally, ensuring that the agents and subscriptions are correctly set up will avoid common mistakes.\n\n**Overall Idea:**\nThe improved agent will generate multiple reasoning paths concurrently and aggregate the final answers to select the most common one. This enhances efficiency without altering the overall design.\n\n**Implementation:**\n- Use `asyncio.gather` to run multiple model calls concurrently in the `ReasoningAgent`.\n- Ensure that the agents and subscriptions are correctly registered.\n- Confirm that the `ClosureAgent` collects the final answer accurately.', 'name': 'Self-Consistency Chain-of-Thought Agent', 'code': 'def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n from collections import Counter\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs["model"],\n api_version=model_client_kwargs["api_version"],\n azure_endpoint=model_client_kwargs["azure_endpoint"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n "vision": True,\n "function_calling": True,\n "json_output": True\n },\n )\n\n @dataclass\n class Question:\n content: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class ReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, num_samples: int) -> None:\n super().__init__("Reasoning Agent")\n self._model_client = model_client\n self._num_samples = num_samples\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n async def generate_answer() -> str:\n prompt = ("Please solve the following problem step-by-step:\\n\\n"\n f"{message.content}\\n\\n"\n "Your final answer should be a single string.")\n messages = [\n SystemMessage("You are a helpful assistant for solving reasoning problems."),\n UserMessage(content=prompt, source="user"),\n ]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n return response.content.strip()\n\n # Generate the reasoning paths concurrently\n tasks = [generate_answer() for _ in range(self._num_samples)]\n final_answers = await asyncio.gather(*tasks)\n\n # Aggregate the final answers and select the most common one\n most_common_answer = Counter(final_answers).most_common(1)[0][0]\n\n # Publish the final answer, ensuring topic source matches ClosureAgent\'s agent key\n await self.publish_message(Answer(content=most_common_answer), topic_id=TopicId("result", "output_result"))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await ReasoningAgent.register(runtime, "reasoning_agent", lambda: ReasoningAgent(model_client, num_samples=5))\n\n result_topic = TypeSubscription(topic_type="result", agent_type="output_result")\n await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n res = (await queue.get()).content\n\n await runtime.stop()\n\n return res\n\n return asyncio.run(main())'} - except Exception as e: - print("Exception occured during the generation of new solution:") - print(e) - n -= 1 - continue - - acc_list = [] - for _ in range(args.debug_max): - print("Evaluate code of newly generated solution. Debug loop...") - try: - print(next_solution["code"]) - acc_list = evaluate_forward_fn(args, next_solution["code"]) - if np.mean(acc_list) < 0.01 and SEARCHING_MODE: - raise Exception("All 0 accuracy") - break - except Exception as e: - print("During evaluation:") - print(e) - new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", - source=self.metadata["type"], - ), - ] - try: - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) - next_solution = response.json_content - except Exception as e: - print("During LLM generate new solution:") - print(e) - continue - continue - if not acc_list: - n -= 1 - continue - - fitness_str = bootstrap_confidence_interval(acc_list) - next_solution["fitness"] = fitness_str - next_solution["generation"] = n + 1 - - if "debug_thought" in next_solution: - del next_solution["debug_thought"] - if "reflection" in next_solution: - del next_solution["reflection"] - archive.append(next_solution) - - # save results - os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "w") as json_file: - json.dump(archive, json_file, indent=4) - - -async def main(args) -> None: - runtime = SingleThreadedAgentRuntime() - client = get_chat_completion_client_from_envs(model="gpt-4o-mini") - archive = get_init_archive() - system_prompt, prompt = get_prompt(archive) - - await ADASAgent.register( - runtime, - "adas_agent", - lambda: ADASAgent( - model_client=client, - system_prompt=system_prompt, - args=args, - archive=archive, - ), - ) - - runtime.start() - - # Publish an initial message to trigger the ADAS search to start. - await runtime.publish_message( - message=ADASTask(task=prompt), - topic_id=DefaultTopicId(), - ) - - # Keep processing messages until idle. - await runtime.stop_when_idle() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run ADAS") - parser.add_argument( - "--verbose", action="store_true", help="Enable verbose logging." - ) - parser.add_argument( - "--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz" - ) - parser.add_argument("--valid_size", type=int, default=128) - parser.add_argument("--test_size", type=int, default=800) - parser.add_argument("--shuffle_seed", type=int, default=0) - parser.add_argument("--n_repeat", type=int, default=1) - parser.add_argument("--multiprocessing", action="store_true", default=True) - parser.add_argument("--max_workers", type=int, default=48) - parser.add_argument("--debug", action="store_true", default=True) - parser.add_argument("--save_dir", type=str, default="results/") - parser.add_argument("--expr_name", type=str, default="drop_gpt3.5_results") - parser.add_argument("--n_generation", type=int, default=30) - parser.add_argument("--debug_max", type=int, default=3) - parser.add_argument( - "--thread_sleep", - type=int, - default=0, - help="Amount of time to sleep between new threads." - "This is to mitigate any errors due to request limits with Azure or AutoGen", - ) - parser.add_argument( - "--model", - type=str, - default="gpt-4o-2024-05-13", - choices=["gpt-4-turbo-2024-04-09", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"], - ) - args = parser.parse_args() - if args.verbose: - logging.basicConfig(level=logging.WARNING) - logging.getLogger("autogen_core").setLevel(logging.DEBUG) - handler = logging.FileHandler("adas.log") - logging.getLogger("autogen_core").addHandler(handler) - - asyncio.run(main(args)) From 78ff8e19a1b2dcf67af51fd4b752a94150f8bff4 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Dec 2024 10:38:02 -0500 Subject: [PATCH 06/21] Update documentation --- .../autogen-core/samples/adas/README.md | 124 +++++++++++------- .../autogen-core/samples/adas/adas.py | 114 +++++----------- .../autogen-core/samples/adas/adas_prompt.py | 35 ++--- .../docs/azure_ai_studio_edit_deployment.png | Bin 0 -> 128648 bytes .../samples/adas/utils_benchmark_template.py | 42 ++++++ .../autogen-core/samples/adas/utils_drop.py | 29 +--- 6 files changed, 174 insertions(+), 170 deletions(-) create mode 100644 python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png create mode 100644 python/packages/autogen-core/samples/adas/utils_benchmark_template.py diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index fe590a41e3cc..f76d09761808 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -2,9 +2,9 @@ ## Motivation -The Automated Design of Agentic Systems [paper](https://arxiv.org/pdf/2408.08435) introduces a way to automatically create powerful agentic system designs. This is motivated by the observation that in the field of machine learning, hand-designed solutions are often replaced by learned solutions over time. +The Automated Design of Agentic Systems (ADAS) [paper](https://arxiv.org/pdf/2408.08435) introduces a way to automatically create powerful agentic system designs. This is motivated by the observation that in the field of machine learning, hand-designed solutions are often replaced by learned solutions over time. -We intend to implement this concept using the AutoGen framework, with the intention of discovering novel systems built directly with the AutoGen API. +We have implemented this concept using the AutoGen framework, with the intention of discovering novel systems built directly with the AutoGen API. ## Background @@ -14,13 +14,13 @@ ADAS uses a meta-agent to generate creative and novel agent systems. Base agent ### Key Concepts -- **Agent System:** A software system designed to perform tasks autonomously. It can include one or more agents --which we will refer to as base agents-- and should be able to complete a task end-to-end (E2E), from receiving input, and producing a final output. Examples of an Agent System include “Chain-of-Thought" reasoning and planning, or “Self-Reflection”. -- **Building block:** A fundamental component or module that can be used to construct more complex systems. These building blocks are the basic units that can be combined and recombined in various ways to create different agentic systems. Effective building blocks include “Chain-of-Thought" reasoning and planning, or “Self-Reflection”. +- **Agent System:** A software system designed to perform tasks autonomously. It can include one or more agents --which we will refer to as base agents-- and should be able to complete a task end-to-end (E2E), from receiving input, and producing a final output. Examples of an Agent System include "Chain-of-Thought" reasoning and planning, or "Self-Reflection". +- **Building block:** A fundamental component or module that can be used to construct more complex systems. These building blocks are the basic units that can be combined and recombined in various ways to create different agentic systems. Effective building blocks include "Chain-of-Thought" reasoning and planning, or "Self-Reflection". - **Base agent:** The agent(s) within an Agent System that interact with each other using the event-based / messaging protocol as defined by AutoGen 0.4 API, and tries to accomplish the task as defined by the benchmark. - **Foundation Models (FMs):** Used as modules within agentic systems for tasks requiring flexible reasoning and planning. Examples include GPT-3.5, GPT-4.o, Claude-Sonnet, Llama-70B, Gemini, etc. - **Compound Agent System:** A complex system composed of multiple simpler agentic systems or building blocks. These individual components work together to perform more sophisticated tasks than they could individually. By combining building blocks, one can create a more powerful and versatile agentic system capable of handling a wide range of tasks and challenges. - **Meta Agent Search:** An algorithm where a meta-agent iteratively programs new agents, tests their performance, and refines them based on an archive of previous discoveries. -- **Archive:** A file containing a list of 1) seed Agent Systems (Chain-of-Thought, Self-Reflection, etc.) which are manually defined, or 1) Agent Systems discovered by the meta-agent. +- **Archive:** A list of 1) seed Agent Systems (Chain-of-Thought, Self-Reflection, etc.) which are manually defined, or 2) Agent Systems discovered by the meta-agent. - **Meta Agent:** The agent that, given context of the benchmark and archive Agent Systems, tries to write code for novel Agent Systems. ### Methodology @@ -32,15 +32,15 @@ ADAS uses a meta-agent to generate creative and novel agent systems. Base agent In the original paper, the discovered agents significantly outperformed state-of-the-art hand-designed agents, demonstrating robustness and generality across domains. -To see the results of early experiments with ADAS in AutoGen, please see the Results section. +To see the [results](#results-for-drop-benchmark) of early experiments with ADAS in AutoGen, please see the Results section. ## ADAS in AutoGen -We have refactored the building block Agent Systems found in the original ADAS code to run using the AutoGen API. Specifically, we decided to implement these Agent Systems at the AutoGen-Core level of abstraction (rather than at the AutoGen-Agentchat level). +We have refactored the building block Agent Systems found in the original ADAS code to run using the AutoGen API. Specifically, we decided to implement these Agent Systems at the `AutoGen-Core` level of abstraction (rather than at the `AutoGen-AgentChat` level). -The vision for going down this path is that the meta-agent can design, using AutoGen-Core building blocks, a new (multi-)agent system, which if proven useful (after going through a period of testing/adoption by the team), be incorporated into the official AgentChat API. +The vision for going down this path is that the meta-agent can design, using `AutoGen-Core` building blocks, a new (multi-)agent system, which if proven useful (after going through a period of testing/adoption by the team), be incorporated into the official `AgentChat` API. -See this document for more on the design tradeoffs between AutoGen-Core and AutoGen-Agentchat API. +See this document for more on the design tradeoffs between AutoGen-Core and `AutoGen-AgentChat` API. ### 4 manually crafted Agent Systems serving as the seeds to the archive - More will be added over time @@ -48,20 +48,20 @@ See this document for more on the design tradeoffs between AutoGen-Core and Auto ### Prompt to Meta-Agent - Instructions: Generate novel code with name and thought of the new system -- Output and formatting requirements: Must be JSON, with `thought`, `name`, `code` (with the `forward` function) +- Output and formatting requirements: Must be JSON, with `thought`, `name`, `code` (which must be written inside a `forward` function) - Examples of how to use or not use the AutoGen-Core API - Wrong ways to use the AutoGen-Core API - Correct ways to use the AutoGen-Core API - Historical context (archive) of previous Agent Systems. - - Documentation from official AutoGen website. Currently only parsing .md and .ipynb files from the [core-concepts](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/core-concepts/index.html) and [framework](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/index.html) sections +- Documentation from official AutoGen website. Currently only parsing `.md` and `.ipynb` files from the [core-concepts](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/core-concepts/index.html) and [framework](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/index.html) sections. ### Meta-Agent does 5 iterations of LLM calls to create and edit code -- The original prompt contains all the instructions (generate novel code with name and thought of the new system), output and formatting requirements, examples of how to use or not use the API, and historical context (archive) of previous Agent Systems. +- The initial prompt to the meta-agent contains all the instructions (generate novel code with name and thought of the new system), output and formatting requirements, examples of how to use or not use the API, and historical context (archive) of previous Agent Systems. - 4 rounds of reflection: - Round 1 to reflect on interestingness, implementation mistakes, and improvement - - Round 2 to revise based on the tips from the Wrong Implementation section in the original prompt - - Round 3 to revise based on the tips of Correct Implementation + - Round 2 to revise based on the tips from the "Wrong Implementation" section in the original prompt + - Round 3 to revise based on the tips from the "Correct Implementation" section in the original prompt - Round 4 to revise based on the tips from the official API documentation ### Meta-Agent will try again fresh if it encounters (code compilation) errors when trying to execute @@ -70,20 +70,21 @@ An example of an exception is the following: ``` Error during evaluation:\nClosureAgent.register() takes 4 positional arguments but 5 positional arguments (and 1 keyword-only argument) were given\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source='adas_agent') ``` +The current behavior is that the meta-agent will start over the generation sequence (using the 5 steps of LLM calls). -Note: The `adas.py` script can still get stuck even if the code of the agent system compiles, but the agent system itself hangs due to a message being published to a topic that is not used. See this [section](#the-code-that-the-meta-agent-does-not-compile) under the Troubleshooting section for details on how to address this issue. +Note: The `adas.py` script can still get stuck even if the code of the agent system compiles. This can occur because the agent system itself hangs due to a message being published to a topic that is not used. See this [section](#the-code-that-the-meta-agent-does-not-compile) under the Troubleshooting section for details on how to address this issue. ### Notable arguments to the script Please see the `adas.py` file for details of all available settings. - `data_filename`: the name of full path of the dataset location -- `benchmark_specific_utils_file`: Benchmark-specific utility file to load the dataset and also evaluate the outputs. This file must contain the load_dataset and compute_metrics functions -- `meta_agent_model_config`: JSON string of the AzureOpenAIChatCompletionClient settings for the Meta-Agent. -- `base_agent_model_config`: JSON string of the AzureOpenAIChatCompletionClient settings for the Base Agent. +- `benchmark_specific_utils_file`: Benchmark-specific utility file to load the dataset and also evaluate the outputs. This file must contain the `load_dataset` and `compute_metrics` functions +- `meta_agent_model_config`: JSON string of the `AzureOpenAIChatCompletionClient` settings for the Meta-Agent. +- `base_agent_model_config`: JSON string of the `AzureOpenAIChatCompletionClient` settings for the Base Agent. - `n_generation`: number of generations of new agents that the meta-agent tries to discover - `expr_name`: name of the output file containing both the original/seed and newly generated agent systems, as well as their fitness scores. -- `max_workers`: the number of threads to spin up in a ThreadPoolExecutor, to parallelize the execution of the particular Agent System that is currently being evaluated. +- `max_workers`: the number of threads to spin up in a `ThreadPoolExecutor`, to parallelize the execution of the particular Agent System that is currently being evaluated. ## QuickStart @@ -95,12 +96,16 @@ Follow the instructions here: [Installation — AutoGen](https://github.com/micr python3 -m venv .venv source .venv/bin/activate +# Clone the ADAS repo to be able to use the DROP dataset for the sample. +# Not required if you do not plan on evaluating with DROP, and intend to run with your own dataset / benchmark. +git clone https://github.com/ShengranHu/ADAS.git && cd .. + # Install package at latest dev tag -pip install 'autogen-core==0.4.0.dev6' -# Or install in editable mode if you are modifying/testing AutoGen code at the same time -# git clone -b yeandy_adas https://github.com/yeandy/autogen.git +pip install 'autogen-core==0.4.0.dev7' + +# Clone the AutoGen package, and switch to branch with the adas script. +git clone -b yeandy_adas https://github.com/yeandy/autogen.git cd autogen/python -pip install -e packages/autogen-core ``` ### Agent System code definitions @@ -146,7 +151,7 @@ Note: If you add a new agent system after you’ve started generating new Agent #### Prepare your dataset First download your dataset locally. -Then create a copy the file called `utils_benchmark_template.py`, and name it with a suffix corresponding to your benchmark. For example, see `utils_drop.py`. Place this under the `adas` directory. +Then create a copy the file called `utils_benchmark_template.py`, and name it with a suffix corresponding to your benchmark. For example, see `utils_drop.py`. Place this under the `adas` directory. This file will later be passed to the `benchmark_specific_utils_file` flag when running the script. Under the `load_dataset` function, add logic to load in your dataset. Do any preprocessing that is required, such as adding instructions or any few-shot examples with the actual input data. @@ -184,6 +189,7 @@ This should be passed as a JSON string to the `base_agent_model_config` flag. ``` ### Run ADAS ```bash +# For DROP benchmark python packages/autogen-core/samples/adas/adas.py \ --data_filename=/home//ADAS/dataset/drop_v0_dev.jsonl.gz \ --n_generation=150 \ @@ -191,39 +197,52 @@ python packages/autogen-core/samples/adas/adas.py \ --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12"}' \ --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' \ --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_drop.py' + +# For your own benchmark +python packages/autogen-core/samples/adas/adas.py \ + --data_filename=/home//my_benchmark_data.csv \ + --n_generation=150 \ + --expr_name=drop_o1_preview_meta_gpt4o_base_results \ + --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12"}' \ + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' \ + --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_my_benchmark.py' ``` -You can increase the number of generations for the meta-agent to try creating. Note that if there is any compilation error, the count of the generation will be skipped. (Potential bug, or at least confusing behavior). You can also increase the number of threads to run evaluation of the agent system at any time. +You can also increase the number of generations for the meta-agent to try creating. Note that if there is any compilation error, the count of the generation will be skipped. (Potential bug, or at least confusing behavior). + +You can also increase or decrease the number of threads to run evaluation of the agent system at any time. Note: There is currently some behavior (bug?) where if `max_workers` is not 1, the code hangs for certain systems. See this [section](#certain-agent-systems-will-hang-if-max_workers-is-not-equal-to-1) for details. ```bash -python3 adas.py --n_generations 50 --max_workers 10 +python3 adas.py --n_generations 100 --max_workers 1 ``` ## Results for DROP benchmark ### Best Agent System that the Meta-Agent discovered +See this [section](#all-agent-systems-that-the-meta-agent-discovered-for-drop) for the full list of discovered Agent Systems. +#### Meta-Agent used o1-preview, and Base Agents used GPT3.5 +``` TODO - -See this [section]() for the full list of discovered Agent Systems. +``` +#### Meta-Agent used o1-preview, and Base Agents used GPT4.0 +``` +TODO +``` ### Performance with different LLMs The LLM that the Meta-Agent or the Base Agents use These are the results for the DROP benchmark -``` -Meta-Agent | Base Agent ------------|----------- -O1-preview | GPT4o -GPT3.5 | O1-preview -TODO | TODO -TODO | TODO -GPT4o | TODO -TODO | TODO -TODO | TODO -``` +| **Meta-Agent \ Base Agent** | **o1-preview** | **GPT-4o** | **GPT-3.5** | +|------------------------------|----------------|------------|-------------| +| **o1-preview** | TODO | TODO | TODO | +| **GPT-4o** | TODO | TODO | TODO | + ## Troubleshooting ### Exceed token limit -If you are exceeding the token rate limit on your Azure AI Studio deployment +If you are exceeding the token rate limit on your Azure AI Studio deployment, try the following strategies. #### Increase the Rate Limit in Azure AI Studio -TODO: Insert image +Go to your deployment, and update the Tokens per Minute Rate Limit. +![Update GPT-3.5 deployment](./docs/azure_ai_studio_edit_deployment.png) + #### Setting the number of max_workers=1 -This can be an argument passed to `adas.py` +This can be an argument passed to `adas.py`. #### Add sleep time after the forward function for each thread runs. Setting to 10 seconds is a good place to start ```bash @@ -247,14 +266,14 @@ The code for the Agent System compiles with no issue, but the systems hangs duri INFO:autogen_core:Calling message handler for output_result with message type FinalAnswer published by coordinator_agent/default ERROR:autogen_core:Error processing publish message Traceback (most recent call last): File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 372, in _process_publish agent = await self._get_agent(agent_id) -File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 620, in _get_agent raise LookupError(f"Agent with name {agent_id.type} not found.") + File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 620, in _get_agent raise LookupError(f"Agent with name {agent_id.type} not found.") LookupError: Agent with name output_result not found. ``` The easiest solution is to terminate the program with `Ctrl + \` command, and then rerun the `adas.py` script. #### Certain Agent Systems will hang if `max_workers` is not equal to 1. -Another reason is if `max_workers` is not equal to 1. This means we spin up multiple threads to run the agent systems in parallel on the individual validation dataset. This has been overserved for the LLM_debate Agent System. The solution is to terminate the program, set `max_workers=1`, and rerun with this setting just for this agent system during its code execution / evaluation. You can then terminate after this has finished evaluating, and try again without `max_workers=1`. +Another reason is if `max_workers` is not equal to 1. This means we spin up multiple threads to run the agent systems in parallel on the individual validation dataset. This has been overserved for the `LLM_debate` or `Tree of Thought` Agent System. The solution is to terminate the program, set `max_workers=1`, and rerun with this setting just for this agent system during its code execution / evaluation. Unfortunately, this means longer time due to single thread of execution. You can then terminate after this has finished evaluating, and try again without `max_workers=1`. The reason for this is unknown. @@ -299,3 +318,20 @@ The reason for this is unknown. ## Appendix +### All Agent Systems that the Meta-Agent discovered for DROP + +#### Meta-Agent used o1-preview, and Base Agents used GPT3.5 +``` +TODO +``` +#### Meta-Agent used o1-preview, and Base Agents used GPT4.0 + +Tree of Thought +``` + +``` + +Tree of Thought Imp +``` + +``` \ No newline at end of file diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index fcc6b7ab5a4b..8308a3653935 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -105,18 +105,14 @@ def evaluate_forward_fn(arguments, forward_str): AgentSystem.forward = func # set seed 0 for valid set - examples = module.load_dataset(arguments.data_filename)[ - 1:-1 - ] # first one and the last one is for few-shot examples + examples = module.load_dataset(arguments.data_filename)[1:-1] # first one and the last one is for few-shot examples random.seed(arguments.shuffle_seed) random.shuffle(examples) if SEARCHING_MODE: examples = examples[: arguments.valid_size] * arguments.n_repeat else: - examples = ( - examples[arguments.valid_size : arguments.valid_size + arguments.test_size] * arguments.n_repeat - ) + examples = examples[arguments.valid_size : arguments.valid_size + arguments.test_size] * arguments.n_repeat questions = [example["inputs"] for example in examples] answers = [example["targets"] for example in examples] @@ -141,9 +137,7 @@ def call_forward(agent_task_queue): return result with ThreadPoolExecutor(max_workers=max_workers) as executor: - results = list( - tqdm(executor.map(call_forward, task_queue), total=len(task_queue)) - ) + results = list(tqdm(executor.map(call_forward, task_queue), total=len(task_queue))) acc_list = module.compute_metrics(results, answers) @@ -155,9 +149,7 @@ def call_forward(agent_task_queue): class ADASAgent(RoutedAgent): """An agent that performs ADAS.""" - def __init__( - self, model_client: ChatCompletionClient, system_prompt: str, args, archive - ) -> None: + def __init__(self, model_client: ChatCompletionClient, system_prompt: str, args, archive) -> None: super().__init__("An agent searching agent.") self._args = args self._archive = archive @@ -176,14 +168,10 @@ def __init__( self._model_client = model_client @message_handler - async def handle_task( - self, message: LLMMessageList, ctx: MessageContext - ) -> LLMResponse: + async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> LLMResponse: print("Meta-Agent making a LLM call...") logging.info(f"{self._description} received message: {message}") - model_result = await self._model_client.create( - self._system_messages + message.llm_message_list - ) + model_result = await self._model_client.create(self._system_messages + message.llm_message_list) assert isinstance(model_result.content, str) print(f"Model client result: {model_result.content}") @@ -201,11 +189,9 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None # Process archive file_path = os.path.join(self._args.save_dir, f"{self._args.expr_name}_run_archive.json") if os.path.exists(file_path): - with open(file_path, "r") as json_file: # noqa: ASYNC101 + with open(file_path, "r") as json_file: # noqa: ASYNC101 archive = json.load(json_file) - if "generation" in archive[-1] and isinstance( - archive[-1]["generation"], int - ): + if "generation" in archive[-1] and isinstance(archive[-1]["generation"], int): start = archive[-1]["generation"] else: start = 0 @@ -231,7 +217,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None # save results os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "w") as json_file: # noqa: ASYNC101 + with open(file_path, "w") as json_file: # noqa: ASYNC101 json.dump(archive, json_file, indent=4) # Initial prompt @@ -255,61 +241,37 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None # Reflexion 1 new_messages = msg_list + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_1, source=self.metadata["type"] - ), + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=reflexion_prompt_1, source=self.metadata["type"]), ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) + response = await self.send_message(LLMMessageList(new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_1 {response}") # Reflexion 2 new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_2, source=self.metadata["type"] - ), + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=reflexion_prompt_2, source=self.metadata["type"]), ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) + response = await self.send_message(LLMMessageList(new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_2 {next_solution}") # Reflexion 3 new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_3, source=self.metadata["type"] - ), + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=reflexion_prompt_3, source=self.metadata["type"]), ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) + response = await self.send_message(LLMMessageList(new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_3 {next_solution}") # Reflexion 4 new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), - UserMessage( - content=reflexion_prompt_4, source=self.metadata["type"] - ), + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), + UserMessage(content=reflexion_prompt_4, source=self.metadata["type"]), ] - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) + response = await self.send_message(LLMMessageList(new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_4 {next_solution}") except Exception as e: @@ -331,18 +293,14 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None print("During evaluation:") print(e) new_messages = new_messages + [ - AssistantMessage( - content=str(next_solution), source=self.metadata["type"] - ), + AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage( content=f"Error during evaluation:\n{e}\nCarefully consider where you went wrong in your latest implementation. Using insights from previous attempts, try to debug the current code to implement the same thought. Repeat your previous thought in 'thought', and put your thinking for debugging in 'debug_thought'", source=self.metadata["type"], ), ] try: - response = await self.send_message( - LLMMessageList(new_messages), self.id - ) + response = await self.send_message(LLMMessageList(new_messages), self.id) next_solution = response.json_content except Exception as e: print("During LLM generate new solution:") @@ -365,17 +323,15 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None # save results os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "w") as json_file: # noqa: ASYNC101 + with open(file_path, "w") as json_file: # noqa: ASYNC101 json.dump(archive, json_file, indent=4) async def main(arguments) -> None: - token_provider = get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" - ) + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") # Create an AzureOpenAI model client. client = AzureOpenAIChatCompletionClient( - model= arguments.meta_agent_model_config["model"], + model=arguments.meta_agent_model_config["model"], api_version=arguments.meta_agent_model_config["api_version"], azure_endpoint=arguments.meta_agent_model_config["azure_endpoint"], azure_ad_token_provider=token_provider, @@ -412,12 +368,8 @@ async def main(arguments) -> None: if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run ADAS") - parser.add_argument( - "--verbose", action="store_true", help="Enable verbose logging." - ) - parser.add_argument( - "--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz" - ) + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.") + parser.add_argument("--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz") parser.add_argument("--valid_size", type=int, default=128) parser.add_argument("--test_size", type=int, default=800) parser.add_argument("--shuffle_seed", type=int, default=0) @@ -433,25 +385,25 @@ async def main(arguments) -> None: type=int, default=0, help="Amount of time to sleep between new threads." - "This is to mitigate any errors due to request limits with Azure or AutoGen", + "This is to mitigate any errors due to request limits with Azure or AutoGen", ) parser.add_argument( "--benchmark_specific_utils_file", type=str, - default="/home/andyye/autogen/python/packages/autogen-core/samples/adas/utils_drop.py", - help="File must contain load_dataset and compute_metrics functions." + default="utils_drop.py", + help="File must contain load_dataset and compute_metrics functions.", ) parser.add_argument( "--meta_agent_model_config", type=str, default="{}", - help="JSON string of the AzureOpenAIChatCompletionClient settings for the Meta-Agent." + help="JSON string of the AzureOpenAIChatCompletionClient settings for the Meta-Agent.", ) parser.add_argument( "--base_agent_model_config", type=str, default="{}", - help="JSON string of the AzureOpenAIChatCompletionClient settings for the Base Agent." + help="JSON string of the AzureOpenAIChatCompletionClient settings for the Base Agent.", ) arguments = parser.parse_args() arguments.base_agent_model_config = json.loads(arguments.base_agent_model_config) diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index 1f059175fad6..c3ef5ab9f1a4 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -26,18 +26,12 @@ def print_repo_contents(repo, path="", indent=""): documentation = [] for content_file in contents: if content_file.type == "dir": - documentation.extend( - print_repo_contents(repo, content_file.path, indent + "│ ") - ) + documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) else: - if content_file.download_url.endswith( - ".md" - ) or content_file.download_url.endswith(".ipynb"): + if content_file.download_url.endswith(".md") or content_file.download_url.endswith(".ipynb"): print(f"Reading file from {content_file.download_url}") f = read_github_file(content_file.download_url) - documentation.append( - "Title: " + content_file.name + "\nContents:\n" + f - ) + documentation.append("Title: " + content_file.name + "\nContents:\n" + f) return documentation @@ -52,9 +46,7 @@ def get_autogen_documentation(): for subdir in subdirectories: try: repo = g.get_repo(repo_name) - documentation.extend( - print_repo_contents(repo, directory_name + "/" + subdir) - ) + documentation.extend(print_repo_contents(repo, directory_name + "/" + subdir)) except Exception as e: print(f"Error: {e}") print(f"Found {len(documentation)} pages of documentation") @@ -1711,14 +1703,13 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct def get_init_archive(): - return [Tree_of_thought] - # return [ - # COT, - # COT_SC, - # Reflexion, - # LLM_debate, - # Tree_of_thought, - # ] # TODO: Take_a_step_back, QD, Role_Assignment + return [ + COT, + COT_SC, + Reflexion, + LLM_debate, + Tree_of_thought, + ] # TODO: Take_a_step_back, QD, Role_Assignment def get_prompt(current_archive, adaptive=False): @@ -1731,9 +1722,7 @@ def get_prompt(current_archive, adaptive=False): def get_reflexion_prompt(prev_example): - prev_example_str = ( - "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" - ) + prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" r1 = ( Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) if prev_example diff --git a/python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png b/python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..00fec68594793153597da7b77a9ded7aa54d293b GIT binary patch literal 128648 zcmaI71yEFP*!RDbbcxa_AR=7?ONWG%B1(gVAkD%qB@Ggif^;`XcT2M<%@QKbQcHKo z|KRVLdFKDlJ1;W~!@}-4=f3Xyd)4O(eXXuYh)0bF0)YsXmE<)+Ak0S~5QZbp1K^d6 zIZ-3v2b!~{qAaLvh!zF>f@vwECIbRhM&VzZU;)45Ix6WpgFr-G_upu6heC4@=q^uL zUPjB!Xxq@$S8L+rmYz|Kui_C_xBpn2X--k$Xb8GOPB&#vA>Y$v`40c5Us>Z>va?q# zBSN1ja+iP_fsQ&13;N8EPvKNoYZxcIu zu>XI2^^y9`EFF}>LqXiGViMMmyoo-9&7hY4_v=j>PM!S%)sq!De4=0`67wwlU(dfi zDm{EPMi25;WN^osG8XIHIW3gYX<8Pjv7gghFvhyr{5#|&&LRXm$4s~jdIIU#g%I3wFoon|;=HB{7wt6u9QFumZ^;3cQy6-FQqDJ4=l z*@8l0>(i|5_JP0jZ{0VBDY~!U&|yGg^YMTA3+}wRVw|cj#_%T6)(Cd;VnaPQZ_LWG zS^}FWfg@Rq1PztQb5VV*!xOb6_IS!*?bE^~oC|fFb@i0;q1Y}?SR6n97OzSgadn=S zVJ#>9oVTgf4=GMX%J>}d@q{8)n<|qTW?f})JfatoG#{P6bxkF`ajgHBk{Bs8A!_*6 zSZ-t@_xDf1ZS-M?bBv+Ow8kEQBgsh+VLsC1z*JexPLA~R>!oJ;%f6VCNX83eEiYd z+yimwr!8LP;JW;H-7XLHsiqcjd3SfEa>Q@Bz|{d|FlWKZHKjq4YOTgpC(PeB@&%?$ zduo7!1))mUXs<~1hHMor|0~2~R^IOs_jaLgxy~P>sEK~^JmK9H$6EBgRhf`UE|Yz# zjRHF;uv#^l3RoX0D>`fXTsPD`J9S2kq5q_NYL%Rx7r9z<0L@=dgc>ptRgL3va;Z^d zH}c8Xq?pB&@hhw})zs3n4aqV4s7aK3Qzi>@*>j%ws1*)(ME-oPoU!u6z}g-buAY&$UkS54HI!y8^fULW@8gRH&lW zl_WcIETcL;DsnB8;q$S{U0g7AWM042%dJVfaD7$p5(l3o7?WgV6p8pz;bcWOSFu6^Ul<#X)-uVH?oX{KtdP3C=%o#1gPeVG!Avmx?vmnvC#iB>#vVIVFTHo=G&=WaWiUk; zIXGq9g?+Tp^L4fVwPqFR>guWj=9_bkXXB>w_8)M;6y5_2uL}=!nhc1ZbsGjeZpG4~ zhqp@$#0!l}8uv^^nkU_r&i`g zp@^D(E97+LxRe@ORc1E$!S18CKDVe>+nijrm(~9cMHC~>#w&rJSMqwv^yTWe*VP&o zW{T|Se{Ph)N{DxTE6)wr2m3Sw7>4D*iKP3F?IPGVetbOGI3-gdyXmpa_@Ia)8uOzs zSi+>!3UWzuP*m`VDbpn4x3L<_$hDDmHv75~9c}ibOx~q@#ug^doV7ryc$@5@!>p^g z7f|NXn2k*S=u$#tYvaBpo^+eH_57n#e4%KfmLZu)$i&!jgJ~0$mI3o;uBAK_<3H() z$?N$` zOg5h*2s#VnK|*LPa^&WWnbOz*iChR;TjLaRfi1(Lb2+|!D^Q?xO;NZM8MuP${) zk}WoVvZy7P3c?eAOuI@Zhby;jY`ERO0}qmzr@qLm(#pnPHQt%wbBSyJ^8idhC5}@+ z%a3=p*CwSp0=8Dp)OaSMXzcV)t<=TA@uXI))ot(YNyl;&>JZxN*_nHNp3l3vm66v#M>E0eKpWX3=PtqhU@ z*_EyFwfUA08^UeE)BT;8e`H~01x4 zskuicmEZH#xQXR1Zx%-)SV9qNjmWzA_mixr{0N%Y<1HVd;()_;w)BWmnVJ>= z!Q|7Y$9CNw*zFLR7iGaHbBY2{;NMorIJj0IG_9|Y#SjGekEUo^i?OUplk9kMx^hWeb> zk??@+eO(xGyi}X7#B&J$`ABMG;c;uzTd}1pv&xaQ1i#f!2FZ19GX&R5<3=JULsWk{ z=B&ky)39b)As$m@Y3CMOs z(Q1+~tm~UZ({dsG(~JO1$*2ql_#?e0B7dreB1PfZ<9I~WwV;H+Xh7R9Ye~azx}WU$ z(&r9-PmGzuGEd43#SR9A^+!tF@plD!%>Wf~&0kN2+8z9@=k?H$I^J^^iA2Ff)SDLV zTN;NbiP13~=6rM9uQ!Ya)+N3O@!S7Z858g$$5z&@DJ zHb#pMcw#au`p9YI5G*Pa%RHyrdON7Bg9CzTFXbgj)%D8oz`5jmyH4B=5(<`+4;Vo3 zTXf-hF>+U9XWU4u7OK0jG=|~b(Fzv3gSt|kn-ikU@T-9Xg1?)1GK#-;xPwue+#bw6 zb0+1jn_FAbz18FNLbOhS{3|qZ9eFc#X2%S>o&9f(ztRbd4rsFR-xSo$o^DRe^wSUb znaE0!pl?72ITG7Gkj@)dP##fmc3xNR@L4EnU!NDf!;v92?XD?%qBk)~o6v??I^_4d zZ1~G-`gp~4_;sT|40K4`j3pAhr(6>M_mxk~PzTcO>*?SA&EYSo12#i_ z+TM*5f&F%#pv7D)ta2I*!v4L>+ku@LNM68s=LC~NrTsn z(&P^}(NB?#@3yW~V_cGK<*%GqUhAtMOr9Fb0S@)k!^czZ{MfNSiv8u9p3u?JCETV8 z&4oYDWu+BaYDT_Mrzw9LPo5*nH4y{79T18*7AS@%nsYr-BU{fuF|)Ox1H!C;uCJ)q zg3d8z5#gM$Cr4(e6ohe)RRLkmP!g|6C#Gtu2&tIsu9g?~?Lm~!BB~(cy>Wo=4WNF! z=GGx~k(=_c-4?vJY@Rr@SNPZ4&pw090;NAJCRl(wM zSUKFwr;Q3ubM)x>549S~z{)-D-r{;9X$)6R41-wD@W~NwM4my7=NuY{4k7^mXL>(( z%ZV1}DehVCnL(Xq7p~jGZnMPOzs0BGe~u=&@tk9Z;)UZz?1p!S2V?;I@7w*|CRV47 zIAb#;p7nX0eqo?+(N}VjP(0c5?k`>aBdKSWiCNH->ReebKpeHv9iC77W|MO<-!$?{ z+~(SZ1yHtnvCgMeKh$?&^qw7u-w~1BI^sU?-qT9;K9Fc|*3tja6|7UCH_W43kJtWl z1G-!!a`Aawee?4(>MAZo+1wPe5oM& zjMQ-SXyYwp=PiS&vl4b{XY@}~-kV5gN6+%I4R?8-)csg7zBI`LUtWVR1;H*ahn*RE zT2QXbiZ@N!+Gj>0X6N#rn!Q^+Lt~-mSDK4|5W{d{jXez8r0|3_8srFe59XvtKz+%a zFIUi{BHZozh7Gc_XtV*7*FN5qqMVXEVXEsBztHO;BZKT@8f{G6c(gU2Iu`|wMy;77S7NML_v7kKgMt>gpH_fq)Q+7!UXnD!Aa)W)v7@cV=@@^$i0=iT{Q8rL~*e9+?VLR9ZgTQg0R#3jkR`xci3ac|o6 zcwBq4%a~oc?i>6Go@VK?p>*7Dw-`LVaRE}jDh{4j;pLqOsqL@KpB-r~S>9KEiQX%3 zOFPI*N3?PbF`Ti=>N+s4q>~8j>sLvLJ3@>?G2@Ko z{UBvgZ~&zg)Fce%Lm5=qCd}}p%<>dN`ZMk9-#W4&bB^~u zw=CQn?yUJ}n@tUiMLd4d~H zWkA+xyZS~Zm-L>8Y4p^V*LOzU3R1njmBcHsbtj)n_hBQCKYH^Rq;vD=c7NE5Z@aGY zd{ES`(&O^k_2p<3=?D`kCR)XDt+Gx8_lA$EN{#rb8?@J=MG#g-U69D~9Gz!s_SC>Y zG?%qbwH2@6c2T`WN7Z5Eqy;24PxXbtK%6>*)>+{8_Ru{11|r7UatCj)U-l3p2IWhN zV!);Nu^BjNBgp*IMa=qswr^4;o{2Vy6z}h*6#HTXFE%&r27*8@kURQAJ+(+Fka`Qf z>jL1#?;7YTs6Z}@J7U2f{seG9w$;e^Rx_d&Tk7z6E&e#RC>g8pZLd6V4ZUvaGWz_h zT#Ur&zif1$4(sFU>iLafTi5BB=Bzh-u5h6OtqHmnl;Y4x=R^HjYHcV@PP=S&y4bf= z;M%!u9;*trxG5hGox26?h`Dr(Ek7>NHoGR)=&6w@U%vSgc5v(8LpcKx3>v{D{hnr} zZ5XjAN$uma#4NIMySL;IDTbzM>SN;kbbCl~+<<;gHkbj#1y3V%LYE{IO`oD;VtWx6 zxL#87w8?C-=hgGtIZ>o~kD>h#RDoaPc5J-7k0R)4uleZ3f%E9yfd7v_$o47|HqxjVb&A2x9XhT&cp0qD=&mi!O zo)#0@M09uINV>Kl3Tn3&cb<qp5`6b!Cb! z6=F>VB>{cpI7l!y2(=^^YUh(`#?IMmpj>cY(>M+?AlYx4K=K|&>F?jXAzmKJAe0VZ z9bUT3xWQEk0Bv~UDn!u+z5qF{^iX;Y2Q*aR!_6-RYW9bQZ$pDK^h3!5d?KpbIx`m{mc}hVQ7@z!#x{BAiD=9D-y(}=b>Cy26=MMtBs@E zIm!%32QrLVUMtf1&$kb2;;fu^I|&`=zjExkkudxGA*U%=2+Nokr4w`C8A_^O4o;X4 z(>-}$cK&^yk4E`93Hh_e*89CQ>*79GzoyTt2t60pA?Kn9$+WL-piHr@wCT z_YrYJ(Z70Dr6b#F@xOq$44ls~w^=VJHEEtz^sYQ;?9^+Dj)#3Z1SqqhUE2_ak#l8Z zR&`f>>>vMl!a)preOEF{dDSl?f*NN)_8#OwoD3IF5{%@mZ~~q0zDn`@rC7gNA~kR~ z_*{d!#F%xOVEoW!-g3;u;DzRE2Vd6CDer85tI@UDJo$wfb~N?&NKT6o+pyS0st3 zZq&n7RtWCaJd?k_2{!4wfh1o^UnC=$K%Dx@3u+k$px#A~aQaDwh_kk94`{M@l8a}f zZCaT-iIf%F>4U|DDQHIwd-;dQGupK;L+TmmB3J_$I?~|%qNK7l^rxV*T|un3mVr23 zFGMcFxpfX>N{C5^68){x9xSb3u&QSwLa_JACU1D!h|)%|w4;7fmUfXaskg^5H+}e3 zwVH}ZaJyV$r#xR|7&W$rE%u!EEJpF25dNLZ=fz&~6Vv!mYNxb~_alD%bDxZzPJM?) z&@T_F;V8e?cTIAXPYC)+4=-3c51sOm4YT9b8Qsh(e$4i6Xl7+1bZqID%-1fnI~&9v z?J*ZPXtrCCOFpfH>*-GX>dnh{ylZ21?d!38H*Kpp`YxTmw}utOpc+YXHsJ5znw7-g z$cvqnjaZ_a;U=s*iOndt<$Po%aa2M=LJrcM_`|UT@nZUC`_4R!WV4gw&k&SGMt4GQ zs*;TxZzOnCr{;mLS5xK!I~%eP<7_#VojsJPF3KUgz!1Vkw4lBA=JJ!<3vb~y?elry zUuJ2H+vy5GZmUvmHz`iv6ON9IVi`;ZoZC7Y3sS?;*5`xRcCWcBmUP8$exH6S!5s|e zdEXDyd1Htc*~-Gv;!qCeYcj~lVXd}!(aU9+(kQMQoX@Q>`DKMy zg$}+7)V5Z6voai9*W6-=*cX1>hsu_}#!PEwEo&ng5^h7*C^qO`*@(e8))PZUw zNrJX&LYT$Sdu?Ha%4&rkm990})oyb+7|pCsr_m{|$<|9YOEHQYdW;MQv3({fFEH)!Y|eqrn2d{|1ua5O)&e*`~+FOz{%>R&$s3pIVOUCYK)Zrjc5 zO^Ue4#;7)aJe{K;H)j<(e0@88ej{;~Ws=i7K8!>|Ki-(eAJ%XqC8QP_Z+<$EE#MqI zQVw68qWhX$ACi=zi?HKWc4j#5`*DJuDfhYNrLRy%8D@NBZ#4a!%~`9A74y;9;> z809UPb_8MF`SwwAM2|{u#_}HPG(AdmIav~EV!O$1AC(}6@BeBa`^eGY0s_N2vNH8F z<2Dw5TfYr(z{c@i1f@DDP^i+Vx2b2Yy^EFuiMfuIiwSaB4+ZmQAH^inJ1CVKRZ_My ziR@DT?ZHOJygKa|rF+MJd4L)*63NEhVw7I*`V7+f@{Tpvot!7fT|kk}&n!wD9Zsw4 zc{I~|dBo7Y@tM9mjd`eD(qZ1WdZ_8KK>cRU6&JA-7F)Upo;vu%(*Ux-BJTM{Kl5Dl z+LAA#oJ#5uylU?LQ>3Wc^!D%ydBf+>>8Q_Ayh<=Y8{tF2AukR$N6FH*zTx=?Z9wHM zLB$m&7$lesW=p{!brCehDb~-R3Wq794V(`rO%d<8ij(tjtiaz`x4v#t-dFHG&_@4O zz)pgGNqj}M6BT*Xrlppemv25MgdJNFn)j<=^3+Z36WY`hHHw(LaNv!3@3i28M9lBj zhh=1a#u{=k-k_4fWHtQB01v$A(UBIsxJ=i#!#%$p7Vb;)>x*IJ~^) zK>gsSF!0?w}^J# zZdEHT(E`sOA__di)#->o5@sS!63wrvF%P8nV=-#!DoMOPCD4ORl$wn}M4}B5$vhmK zgBkQXm&zggN7}r0C`JO>%=OVc)xSGz1fKiMKx}p&pD8xYi85M9zRdZSD+hQM;`^7} zGBJEId}g29q(NdA9qO=AIwid4!LKfUV(z)uDP`J;>>9DykviCKYcqHD zNMG^n{^kiX!-%MtJmdMjI}W^ydA)Aw!h*7Q*LQchDaFglu<*+_QGSgEXu{=z=}dvB zhU&9~~b(EhhPd`($$dBOK*(j)cmrBFGUn!#K$hI4Oi9sDuSoOm~C zz@QBF1DeWAK&7G1!G;f@yA4`DsYHWr`$+_)^!&0{X@LSVIT9v$1O6v;;FGlEPoKr zGuAEA6K5LouYxZ|1@IAI8uUL(Rr^h zN^BJ%b&GELf_9mcxZf>yoi<5oI%r!YKJw=eVW>;G^6F6{pZ=vCqi+YIB`_T&TDW^K z^radpZ6vWYx71e&;6fl!P{CV85FRk|U`X7k4}Xgtm_FRq z*b7M-_GUmoh`}?!I0z#ze4F`1|GWU~QJw)x?+Fi>u1%!;EP5uK^Aa;Ky#H&%#ow6Y zUYpk21~JLb=DVLnlt-X>T=pGa=+42lFSq4b%B%A&qM?R7vuS#V`5#=qtZsX97Y+KL z5`AA+ZG@K1T=F{|L-KuRRo}CE&+#hOHp$tX@x2g`Lffo^GudHa zc4TeLlD6wRnEizO3@i3094ABN_s^Zj_sT z>*$ducYU2=!5{J$B6&(sxNmFWLB|;!nj7DO*k^XKX1% zZryIeh^I3(G9>R+@C#NBPlw%bTM&vzDs0Bv1)DsRi~H&T1WtCEje;2Af9Tj7?kT@> zzylK32vlr(w}bZ;=lfrz6qUafEzh$zyTpLlfjsk9vzzT>^{EXAj=+ zNl8C*TS(PA)}pZ{GXmQmSeExF(GdwvX}5 zRiVkZ2t|9dnu{$W2ogp6^caI_p?C21okm&!bGcq9X{;lafj zXb5kr-@ z1iL^8&Ej?!eO7cys73T6XOOWA&uwumB1&T>y{oVoZu+;ld}%efyuA=N#V?kpBWXvZ z?!^v7$q?ksww$D$(Mf|(;FEPq6U1b3i_)Csv&WnFJP~T)TT&-zy=E^d`^@R>hL;h= zeEo3Pz>A$eb)S0P4ThKA2*D{!tf1n0VE9E~=3s}XvbI3@jN>Wt%dfZld#*p_DT6lL z1`a^^NWX7pd^;2DZY#-e66;=;$?Dpy+nsQ=P-xE{(9XL7S8{~sHLPP~4pxMXmaB*G z+6D-ei(_!ri3cyO&xKMLc@a9OHee;pi_>j*(3Oc4{j6fERT_`qTcOV1Y)RTQIi0De zW3hf$f_LA7Kmh&_KomGd$0zwW^?o1oyo^>`eP`#i^Ow!d=!Ej)#D7^kMhgA<{qcm78Ej|pJ?CQ@6_j=3wUue<&H)}ohmY>7h5A_SH_lB z#Gbnkk|<2PIVBEO-tHgvBlah>k7FY+9uTxQuBV6DIu41hXBxM8ztYcmEpn5eQn}Y= z-&;F17h06!v78;uwyYhDaC=?OW`DiTd_G;Nw!cI+gq zJJd;?4_vDiStq?dat#=<>XN@+`mJ(sWV{g0MqZoZ|ugpWf#jule*lH=ho912>YvoHh){ z(LhDu3_ZT2#gpjG?jOe97cp7<(~)SMYPJ+zNd_?ytIYYT;)8AEZ+YiTCmDA{b-W!U zU`!pTJZ$V=XYK6^z=D^HQI(ITmVTpwpTHR8GHqifCC$f^g%|0sEiv~Ola|jMQyXnb zxWT*@-X!~BU!tkH*$loc(HVE{Ge@=a3AYh0zJ}_$uttg=;?N?taA=){_7}0wGJT|9 zpwJjyxu^S#U89X%l@b%WkKMYO4sa3ZakRDGer? zUJXvgK9BI?<+m=r!zdm!37*~!cAuWJvw{Q0Prew*W$?B;5n5)&d0%WxG{w6Wneet7 z%IP5TNp)-8ItvUrcj^Pj_{;wSB5suU}gb zZgoP~f-5YT9wO!~K$}xw+f6FvPC0C#yuOF)54s|1z((#hQz~wsQvg-F$Zuz@K~FJ! zg4gf(33j%UgfcHKoBFhyQ^mQJb5SL_yQEfN;{Qn&UQJ+`UMA#cT_UIHT%pl)jTiPexF)#5)TSVthWdo08fyO)Dz@t5a*>NP^hwUe`LcYuR zB$?uzQG*=X-s@}jZn<3?=Q9;ADv@zLCagU+)%Q5pl*cbc{MX|^l)1Bz+$(o77di{Y z@d$E0VT)SwXSl}I=kY?jyM<{p0vdh&iIKh=c7Aq5krN@bunIDv93x{pD`wMF)D95{ z%!qjL<6Grl`dVTqXH~?1z#ru+L~Rn*;|U>>7>ne4&Z0)~v2n=5^6vPExMGy_U&ry3TY9e^|Fwz^m8Z5_iqcJMdg((a5(=P+z%aRvYw zUA!Xk2k;2S_Ol1+k;b=si&xAD!ozJ9mJ$AO4L}vJqchDoL`QB$`4ZUB;Q^$HIh((j9KU+iH~?^eQy3 z(c`Qv?N6N@htJ?w1$K5T(G5n>KKU*cm^Mn)jn3icDR@5Bks>Jlj_uLZPK1h2Ut7A| zOGL$F^FX~o4$zYg3HWMt&zRwl&Qd-BGZloC$-0#Hv(ODIZD>$OjVeu!cL7yl4~Btv z0CrWTYf*sG0T|A>?qeQ9?dP!vB|_L_69d^c=-6oIRYehyCaS{Qmj6d#m`n#~yyL^K z|Ck=n{H2=$JHlR{P>1R*)Q9dsmPn`W9w2)qB5=u`G7`v*7i$w+ra~UrH}AZ--f{5h z+McdHSPtV^j@P&EuqsF+7fl46(c&#mm5#4mQC-+f8O1zRN8FdO;4z*pL@FmkUF$BxP!Ax51UZl3MeYxI#U zhF@rX^uKP35M5MiZJk5BGD1fCG>`5?QLnYWM)D4SJ)V*#s-DuC#8gYevLxbu2M36> zGJ~fSqqihXbF^G^7DaMzbHM+AM&23TD?&AaF`)#$A9=lxCusFk^M8%;fWh=|llUfC z+V`r#*-5d-^?m>kwv0h6j^lcLYjkZnW^)Y(gr8oD~Y*Ze{IR~+))VmK74!g9_%~`62>H4Cd4CC*~wX|sZPSG?wl@O0t z4|&4@v=s|rIs5A7g@`p#EDgxGlGV`HzoK2hhJ*bv(C@1kh@EoJc(zT!v_2;lbwqk` z2$p@DXRT@!n^m$QWoZtix)P3$&TjbLh{xItsh+OCF1*lb zx_(<@uhs@*XnnvDg~HNn0s!om=}lgFmSI`PQwYv1fLp&BH|guX)oDf+fw^r@2G&k3 zhY!0G^EtYemzuzS5!UA8^tB95tp)ew5NOVuwA-d#Co$hf;2sEOWEuQNo?Asd_cb7W zJfZ6}c2r#~)QsJ3I0OUwGmK{?tQ`imh9rPrau>%TnK7PgaX-4_MMj{`KirC!==`Kj zc@dY{(lQ$&g?AZ#$dq#0}?aW94k1yI%D%4hB?m0Y^9{L^bmttX13zFt`IKUy9XB^|5!of3vQx zw<9fycmv|YIez#x;BESHsneVhAlo6pimS9(fqlpuZj_Ot8@#!@UpGm5>4cT0`}E4qdF)Y#W`2UMGjHr1OUaJB>RB; ze^vJQHp+;RjWDWu!Lgbdxsym5>k*b^_xlFIu3p^0f&_oRy*tvoC`k4V*!G2h3E`iE3J zd(u}MC+g9$F+uD!nPx6K@m4H@6v0ww^^tddfX^IC$y?15B>?!PC7ti?zeZvLHwWHw z6g);;^36_e2Bs-|;-n7?!Mdrs+~rLh5AfdG{aqv`I`$FTLcu1Dr(5T28y9c52I;4^BWb_`n)<}caRsPYhU`_uGZZrJ z_a%#6`GlPS@nOG;s8>UKKT}P>qa;MKyd)wt}djj zqB7CD+Hl~h*KLuW3_xxaF{nC|PxfW;?n#0?h!+q4q2&1J!XW_4d0&ca+Dzn@zdCWF zv?wTA&Ht8vQTfmc-09KMBAz=H{6KLXKsqT;?1?Rz$*ZK%~p{7Tukqm$CZ4o52l8~cE*lJx8iK5fzud^F)8B_iCvgQD5R z(LhmAyt$q|0VNq<9(0AyCzI3p5B867ED4%SeeDg_m~=Q407!B3NkHtM@)|nTUZWsA zSyox--~l6M6*ZIOe_5!mUO15&-P5gq6zhx#9B&IVp8B4szh@~K4Y98uvYkpeJ+^Mh z_@qSXnj{caAx7M%nR2}&87!;B<3_d=IRpMK%)_qteznAG$epCz-Ecr;&i~joGNz{D zeq=#$O!U$54o`KRuEHE3qF=3K%JkNga#w%JkkFOm{gLFLVv=~8NbM(K|ENJvH23bw z&C~v_SII+t{1R&G_wzjerVy-vvXuQg6>z5COj!UbHYKY>_1_@CnW_?N3{WsEocQlh z$K3nA?nCbkG_+T<+!>z= z0I+Xe;aBjaffDQg3@|lemKwF^Z9MBg%9$~~;MC{_q;EA|@6Ar*SDnSvEsS3N^js^K zcQy&DgkR*zGSp<4(6qIH$H_%eh9up)hm{`^)>4}3^CyT*gFEHF*N?vh7L0o8+Pft| zrSEzc+UaiNePpvNWXci*0R?czNbntlx}Y@^8MwglGg-63|F$ajhEK`HGj{;KmH-o{ z%Sok+%Jt|6PO!;);ceQ)z2~;V*2arvD%8V9!C4n6ouiN@Df293r{=JMz0WPD>#zY# zK^;$6JFQ)zmft$+FLq-(;y%GC{+P{Z{BO?65QE7o{~sIsSqp9Ds}jN;@MM+6vC|UL zyH#TJIi%6em|VC8R?Uo!_e6L#l2O{$=Us)TDbo(wV*vI}^ut|AR+)ri{lL0>fDBi3 zz?p!@ya(#)Y3e;SinVfOz5o0q%3m5?4S}^1eH(VHO^uI!JN;5lAIV1|kOb9HXL}8u zd;fxYiR|%z+asL~QPb4Q0t^Z5yuMrwLjChz57-QtcB7SYYwc&ugO>R#busiqEB0=s8_yQfvCYS_j}?#665k z#<*Ye4jpdutWsaY|JEFz>7&xWOz1QuK#L!36PNF0F-NXbZHtc*;!XUzalKT~>dA+6 z>P0frA5&q_e&>_=IE3VzXfC$tqPGQQ66L)aa(+nVar96za7ZNUWd;)m`kpW{n>2#;xrr~ffAifWL7&EBkYr<(m8mPK(mSaE(?zZ|;-pVM zouKo)+*(KcgU5RDCB3uN5uu#*D_^aLg}!6i`;56|)r8d?yz>2?mHls42`)YG-_+l~ zfho@6=~w*?H_)<-)0NBN_v;*mok=|vclpUkY!f@sHaKc(LV$_~C309D9%(dcJGPoz#-gO<`PsdFd*ry6~d<2Ya*>#K?^2w)+HfR<4 zmiJNs#nJ|G8Pi<^P<$FNL4>6H8l<;+lquY@47mBj-gjrgo@Y?l7ZLVU&e}JG^-b}o ze~uW1r|!35w{_C9KJ9;kLjj6W_%Gswr(-wL6j6rvBf z0{T_P!Y-8@q~GYCwBMKx9RcCjOV=5z`T4|o{BU$cYjDf7cQx5X%5-y+y5As4>xRqR z>vMmJK=pvjRcx;_!e6lT=+$J>aMI!vJ8}BB1IkZ-Lz+txi46joydI-FwPQ-V>y(w- z>zaMq6dW7yFU4S;Xm=7GDt!4~3vdI{KVwWkInU;t%9O1O8uG^D>we<+aBrDhlmv-p z(0`gCL8ciBq!#RY5@>6M6OUucU6OiNZ~Tufi=9o~4RziS-X=;c93F4M-EQp6n@|7* zZgNJ34ci%;;Bu`ZqgE8ItnvQ2}#BwR>_auAs!<6wj!{l!4gNI6Ay8p|yJ2!RVUnN`+*M zI7wN=hlTDbZPQuyQzjb@%M-Ybw`2Fp0+7$st=Ns*krdL*APW00GM3VECG#nrNW>dn4H0WO_@vZ+0AXm&(~)#tEGit_2b7sKfT0WI7|nDVdx+< z2Cb0{t-`l#piWRn{@_gK;URz&CFGQp5JXe7U3X3~Q|)5tYV~8dU1Qim#T84_bz*C; zI$4#E{4sX`s_$`?7OysNNYB2=#gyrhJJ?HAGO8z7id)L|z}B4k8~%Q2PS2zlNRk^V zcsLM%w}!M$Yl4kBEC)h^nSB-!D89Igfiaw@Gw|vAwz+h^jem&9VWT}clf%=f*fP^?;NqnK34;Ff! zPW#f_7>ngEHZbrp)Wyiu^lGcsA8ahWiR(MRlz4E7)v_Dv#l&#rFBRDA{M$NMX6>+jtj35Deg`|(3)xEitbk=<%1N36g@rt1+w^EhS_f)qg)Mk z$KtmIM0BqU+q5ONYqcd@BZ7m26>FJy20fU*so7d#lj+q4D6?7zRjpY+}+%?aGG6r+iA+xyC{r3%ZNF}0VW~qS}^ebrkTDC(wxdNrTiJd zz=f2moH0W_MM*+$F^QwB{XJTDTH2$0*5i)-CBgR9Ek4o0`QMgsG=p*$u_kFUI0?EH zX)r7Swl!WAI9K*3P7Bd^{0cbcvB8W&83RfuZ)L8EU;>0qw7P6cks>is$e}cnqzr#d z0jdYtU5(P~Jah-Jhs2?!*uQPQV<75 z)0;j_ZL$=HMAHS4p4KHNV5Hb%AW%sX*S+uF7n)*501~?ufZir*#(Mm*p-pX-nSc;gOob8C~lo)2(4Zm!0a-R=1hQ zAmTI#@9JU}u93{7>#UkPw;rTEk9z9n=JtrTvBjj^-kc`IeZPGiG}5%=CsLzmZ22Wv z@kG{02m6|AtP{sgl~9QR1v)-@(+Lfwpf2&iXflULtJg5hp%eLT+gL<_8CS#LEaZ`D zrmW5}VHU~s5rYHWP2Ch`su22Qt_9nm!w(iS@xljmUFK6o2_I8SU$uryt&ZZx3~cSS z#^^>)hx%<>yKo-e4YbIc3d|)B68thUGai4-$M0R^jE2eX3 zHW_in5|sEyYhWPXfiAY=ciKwl+?#4Vi-ZB8q{C6VN!Xw&#o$bkO;WFWyw|T1ZPl=G zLQP>qDTH=64P4>Q_GxpnDC?;l^#ed4wFo0O5ez$j={AU;-LDAw^iLBW%&Le%aW_W2 z*G5K>%o-vSK7$=Oi_s|```UW(0*WBm_0epBP~6!rvg{HCxh58ZMxSR_o>bi{a09ZF zjZ1Xdq2NE^aRk2ve=e?U>xIeXdlWS{bbueAoT^7uyPpRYiGvjl|3C8HIwJqii#E(rt+9^9p&u?7MJ_uy^`)(}ESaBJKN+E^n&x^agDckIo3 zQ}5P&Gw%{57O(3{>5iJHlqe4P7$a1vt6 z29BAwJg*_J6L8^j$x#W*q)GF)RVrD;Q-XGHX`Bg|t8>#7H>WM;w@+b$>9`rOtvowJ zErZb(uoZA;A-k=BorMw4cH@$?2^gy28nw6TmE)fntwb&v%ZIAkTwGMI#*<$7seyZr z{SerXmOe)hgo%h4%sF#!l8ivg;oJYNWyS&CRdk}qbH}`B65N_aQ-B$g zW;I>&2Nv%eWCY3xDyOD3;lYerJ}3QV$9uMTuh-Gw*zI~QLXo4=-e7|m-Et6M zowz;L3`d)SySQXPq<I9IO2>>R&2;26WV>yRIb`!$F=y1C zNd+N1pYB*c4qLdA?n%zxT+UQH_)Rl81~Jxl$*9J>o{)gWhNROszEB)}dGbmT$JD!` zxmWse>0%HZe|k+nve!)DMezY~V*1Ww~7JycxV^L*s%=Z~tjOUOmN^JL_h=lgPk6%cPQSyBya_UyW2kQQ>%B zgvGo?r30`VvAv}c=jpO zRr{U5H15j+6J24xyYWh&n)&3goi{Bpn8fFBs)bF>R^p^0jNVRd@S$-ir}!{tT6l;C+%^b8~vqt5K!V88*HoBSeD@L9GKO^-})pVb+G8IZxV5 znuHLJ*+W@wRnB<(RYTt>!~mss2c?sf5My>{Pj_prket*TQP3@G#!FO+iuL& z-cZogWPt1;+`=+D?NaStyrHXnf*OLUfa$IG(wd=FA9tk@Cp~Q)Md#FyggZmkX_xP? znI)U1bfupvNU2Z2K04r_bE|`GT`PrQx1mt{U#Grhyl351>Qk-EiK5AL5K(nEu-Wfd zx8=F%SPF7FFdJ4jCcRclVe22Wn&Gt2`QX&km^`qy+GA)L#S!H@8L0^ZLmUQVIruU` z+Wf=CJP9{K>m4I$!V?Hcsmo05!J)TF;ghQf+15a&Sh0kzKH+)DsyStQ<#(e^T9AuY*Eu7Hw;)Mz9EfpwxKOQlb7iZ6#3RLN9~i6 zb}^VEF{dpNdlH%bp0K<8eHwzrT9qfoHyuXR@``aZEH-9;;Y`H2iwADer**%S<}6~O z|9F|{r!P8HNRHV}<-=;Op#i^Xk@*~O+}|($lL0kp52M1<1N$Q7jJeXR$9@+45*K?& zQ0={r*$fnDNJ=VoX?@rA%>$>cyhhW}VbylcNbiXRG7?75R+U~9fAeDlmyS# zshWpJD0dA&&L_C3i)R!#X6X5gOx=17lU_5+7 zTaGU<-dyZ3r4Ra4uV>9%I(2RykyMb)jbxOgf#@Frv5nzhv2FbB zmDG>j{7v1z$_eu{d->GEjjmoNgEOEgCtlVS_pZn`0?Pn(m zyy7}tR7dh|ImPCm%#r=5&z~AQ29iygBt|LLn=h)nJ#lrWJ>#W! z&N7?59Q|g&adHncyG^y+q8W<5ATl20tA_IT=P^RIZOFlbAEsSlP&*u_|8p{iAjLD^t9NAI6cz2bd|cTnFT zj{EV!cBc#69X0jI=FX`E571(_soS^z>GZIYC(6GsX9Er^aN)j6SZmki7EPXP0UL3` zXzf>^f4GpL72OsEia#Ahy6fnQeO|by4_t@$rXbenBDGu$qo$CsL?dWPhA?hLSsJ8> z*jnk=pbIr7Vc8u8vp*k`P)hdc;gKr6Z~5C3rF882-FecYXm84T7BWwk+o3{eVZu8p zI9Ut@wzyWuKU+NCc{nPpJ*Q%+XE1jD`Ch&r*9i{7NPL$mrqcl5@q+I z#_e@)IL16(dB=LH@jK=<4V55ykbXx?3`#4JF6yJjI@!I5%sdviTdYrjjR}Jk@F%-= zl((&Ma7?;xT2il1$!~WvdQSxI>1%0}3a5>y=h7%RLmpS!Phu=TLcsc9Ua7@;fdglL zx->8i4!p0}WuzFiIHROJ{>T4|9iMJtgq~nyvR5fdPGPh@{uKN!e(g(3fB zyu}1;9(tvCn3BtFl&{y?pL?=(nntgZVAj`k*{V4LcK=cMKuP;B9Q6qegOD@}75KvgM=KH|l(gHc!Zh3K#}{v;Qj!0xe+ zUBfHl-)s;lr7hn-9@+Pl^HaYefCm2+hGdD7AnpoN~1Zjv^3q6Q}gK8gVL3&P=p%80AzZf zsRfjZ5jm=euGA_>*~=3Xd7J22$~~=(gpp!Tj$>|-foDV~xPc!^o!H#nlAot*>;VJw zy7h)&(1tIW`hwP(FE@nU%2(jUv?mJq+^~YwH05~_rl7B)y&;V`VDbTuG&`Qv9MNKQ zdKyQ1Edi1z=78-YQLikOqJZ|NLxW##S-4jzXR>wY%I$J{+1{p z)ji;H-}2y9gH26nq{Pg=ZMe6+_&{Ad$2KM2JK9KbOD-d|F|&ecbL1!_m0tPWY6a`K ztKXwRe1I-Y>F5K|BOEl<&whjyFG&$q?*3mo*Jg`m-MFF@-jvBgN@mOL({%2F|CH3N zb@!@0n=2px=4`6RJrjuQZ+0d&W<_1xhfs4lMsLhODZ?}3E4?#S0`82Kf`4XyjTuDD z$PFGEl7q&cPsUV4jzk!7f*$?ZBIGsroOby){xwmJC5>gSPqr< zryk!>*8^4W#QGW1a>t;YqaR1YNu%qAqWU$@vw}0LS z{`A|`&E0c+^uE@y!_nXOHO|piD7X9E3L%nZz;|64e{-5(J#&JnKZVUXtO)EmHtK+_-eV>rg}P8823`o%?ln``WPJIcG)9(ovX-ub0P!X4Do-rkj}9tAIm&3m z5lvnRO53uVIC>1!_${vqTdC$|mnRqLy)_R5yEQk(QrZ}V{le&{b5*xGEPBJ(p0DVu;bep%EKG1&Qj}|G%3>xE z$I50Rn3Xrj{7Jq^h=yBYmH+b2eIVJr$8FYqMaQx!Y2*9BKijP3>~gD&Q)c7Xew*yK zhl^Ogqu}i#xuZZtVfBL6+W^3)-pYLABB!y|y#=xD?>2r8t5#HywV)h@c*%{<-M3U7 z`CV5)HAZu6-&vZtq8t2NH1CN#MobF^iaZ#GP?<_vD3-vAX+sGF+?{$$H8}f)kh0E# z*QiRuXk=l!GorV6f^S?t5tNi_-w);t0`ZRWKuI0~C0a_+Vh`Uamyk9%GYU-qXdPTp zn0%dMnA7$_d7xpkddww;O3O0ZtDc(*mf(v3^CDOdsy;Xw0v>5Z_cR&J_DM`Q_e`y% zG75|zd;D~eZN@Y>Wi5uRO)1?73FH}*JzAGaEatwY``G`rGU5-(eMhsZBnsv`a%$c< zv{f+z!dW=~REQ?+=YaYsDwy>#&w)esR0y9NrKk4@P0rIlrR>NQ@+k5m^76K(50^p{ zOQz{+X3`;uOdh>Tl5F1M%4ZdN3?M11ldp7@yw9b|ej1B)yFOn2Ld31?0~-u*NKS=V$7tvehugN~_kvwoD@-jKU%e%hNVwdHH$7h_P&&$W3UZy)Fa z5&oDMW~Lr{^jiE5;{Y@!QB~w_VIG~yAN}SgHP6PW;8h?vVbft5edS=f+m)ixAT~i?9RB zLky?G4;DLr6QaJ;H>Q#PbjX#ib>OmHk5>4-=1PowMMj@QUX&KKVqFfz5ZC;~AnxK1 z;L3*pH*1*%W((!)gCAyta>P7ZVhv#f+-c5Qfkm=@KaMJ{>K@nt8GCRfA~Q*|{xwpY z09BZJ0_?#LW5d`Ff9VRZIz^lKJPE!2`-;yg%#PPB#r&$T-eapC?u7hl?#_oxwrp}D z7EL*C7|q$j5pw^YHLoo(Q5%A<8l&re#nuHp(!+aSMehG%zrI)*09>H;PNkc z+iglTT`V{Kbcs76MDNP6(}775G=e&`U4Lv6&E4f`je`ukd97@Rsx|muTNN_}(Em1Q zLtFcx>jNo__G;7xW7-?1C%BzNWp1v)%?oXDRl;F)PkiJqhhRxH;z3&U$+_zy`FV}_ z9BE4}Y1Z|QUsf7(vwskjpZ#>DWJ2OdM|)M7hOu4yf+Dh^!rU_U`GL4(hi$!vQi)kJreE*yqUf8iRNgU|hCeM^i zQDWMOlCEeSllGTZS&?sj8y9DG7@{Pv76pn;F1|M6TUuJC?rkcbP7aSctvSu^1(C+J zi`N>)#h{V(8sq}@GXZ;XFH_N8x{9M_ zn`InBx9vB#&q|tg#yU}U_3&iIS={gAlDg1A-FDsn=bh!PjZrmiZSdq!u>Cxwi`V~J zvM9Er#h>STKW_vDc<-g3?D`VH+&n+QImP*a|41Tp#Ys%c|Q(d6)zW7 zKp(xb9P0jJG`N<>3tn+@*1A0+34JwL&fxF2%W!GEut$YzWNq-{4v)9}I#2`a>!P@6^ zjt=WLtA=ssN$ghS4*fK_%bedRs&Gs=?A@PCJeQPF!~Vtn@8D`^VS~^O)h!8SCcixP zk}wAAi|gE3wV04gT9-Ce1!vHI0})kyV1N2w)rN}nk$UUXOJ%bOHqiTrfBxT^P~!P! zxMat>K@Yx0GW~VBmY+FCA2{{Fj7E-jOaFR#IN~=Q;uv~LX?)X&42jYfL1UScPV{>R z<%)CdURW&_r@Cp{S*3-+8Nz=CVJloI^tk#d10LqWz~-f#aleSsJsG#f`h9FaZ#vb= zypT+i0-B}S#15#KZQSc=c3=8tPzvAVl$6ucqjtl*`e%zHbZ+p0?at3edJ8E3PRlOs zsEUV{O%zKVB$o{5@6Dn1Tg&!#*5S+B^Mr6ggUXvkgURh#kHhOmCwPyE*x-qH)t&~+%=ro=j!cYLcwy#fOB;US#^$XaeZNYc`>Rt8=LHVN#b{%)8?^V7MDMT{X5jvZg0pc zt6hsSE6S45{~ngmZoIV9ZZi&vR zbEc^KLc+y%r^)pv&XzgNyI8ulVE*mvEq;&nBORx8N7;q0);J`;K_v}nyNUV$JgC-~ z@RsX}XFaguxy+7)11fF;$)B`|}A+p$m>la`Z;K1X~h0#N}F zJ!-p(wc!vdxH(#o|d*! z?!mXH=on7w zQV&7N2+Zy><*ojS$U}CSViTepOYAst>dB@%{p9si1Hdlq0v4RhaSeaQ?*?=_B>ISZ z@5p^GC(1t-(1I=2#QB0a*6(oWPJ1VEUM4Qb z8yic7`V&R&fN}8aO3LgLC+53Y#^3lzIB&e~>Te@yK-c^3V!W&Dw?8u%r@Yov#dwd_ zhdO`&Uvb&^YPN+yRh$RM#F6FgC|IWM>RN6u%bj9L(vkD`S7cm!;tj{g6fE-HtG=HR zLQ6fMy%QT=ZqErhD$#95Ywv}&SFEfZ{>D^s<0d|99I)M^+{a=bC+xTJ?xI#0%U$9$ zIP#?Cg5mbPZ~qtrLV7V_#m}K zX!pCcH9XF7kQQ0i0W9v``g)$}AJIon9rQUP-``4OQ0kG5jx>QsCMDg{%Qq1zVE@w=d@BV>jQ6A%g8(owUSF*>{_U+z(!k9O(OKi@?suWg9&%#-L-^X!=&=lSMa~8$yl_^fdc)So!~ntr zwUd6=imP(WkDXm@M~m!fS&g^1&K1J|vWUZF)L4D$wY-a2aVmX3#H6w`1r;N(uWXZQ zC#?sn7<>6akRD`6s4bBwstoq;T6P2`W^V zCVZuN(iS|6Ivla{*yi@->6rGv5n>v+xcdtgoJQ0e@qn>=AZD$>Tq!<@LV}MJiiMf0 zqZ<826V>Rjc-`2&)LRe*>pja2JpUEg%08CK7>i_nYvKJhc7lCmU`zUSO@E$~fj|^> z8C4|n2bGi7CfTDB&ojSF^gChqVD%w&Qetn}h?k`16WjuWogEV>hvKXgy6r|^GTE30 z`Xn9HC^<0n-PQEKtK}l_W?VS~bVA=G{>flQ?7evCJ$WNh?lz(5}^bMt@Ft%CR|7go_1; z#yC`HgzmBHZ^Z#n#^-F3dAmXvGJ7f6B(N&;Cf=)(oMl0`SI^-^I4%3KX@pOMYOv2+SEUB1ogb2(3>4y@G2{SB)hD$NaE z%<+yyDMtK{ap^vBZH^a3k*Y9+gW_nvcl${!30lu*lYahA7%Nu8>BTULi&egPpbB&C z2j6=eK(vNvXYNl~DpxVu%P~vIG3y9@L;U@1hpf<5aW*Qix8?M*5-?wsflZO#4+tfo zT`Z(^?8>GW!N&XeJ1}hFGRmJ23w7CA1~a=70m*6LmbkGa*cHJviYQ^rR*}1)T3*}8 zZhQg!zf@xmMfm7hR5&&jOt-PQRGQlX^H|g$qlepV2l&-zfizNjYqSIbjZzIev>87Z zq0|qNeFap8#-*XpN0mAjbkkfp(p}jypuL6q=W)(4IEwt-XfX1_5Wl(RSMJT#ZqrWL zV}2-tuV`TOeU}sB(^CmzHQ(S~IiKdvpv(tOxn2qj0AY1hf$sJVrA)Wuy;??Pc&W2x zJW{CW{ug||gG*L;VJh`H^2V2O+usj|!oNTjT<1MasVlVMBqrqKG||cebM1|)+i6?p za_RE=(svQ|=SWu=Ej%N+CPS9jBw4~zY|;cc7dj)Qr$uwH%I`y0GNN5KWUjsiuc?n7S{=5mdoEL_7f$n|2e7#;)*t%n34+@Sc1h>U#DapQWrgZ_4n};|@mE1sL7_3_ zs!4yDGxTew7>#28+8m9S=n!Q#9${fYuDwXzwx&WHYaF^*<`j0EG#y>J-rkON6AJw$ zE+V=#C&EhR)C;z{zVxqKPewbPc?Ru99fX-~rE6^;M(ftP5lr};-N!N=pcgDSV7Oi+8&s z{58JQ0>H6;iZ9LPb7KV9JapZ35vii%0-GDnOm|9e!F_4ViWp?Jg1XUtt9|f8q{6m1 zV1@stF=+Z=`{kK+wZ9!71chf%3+V>#8Yre$)+^v>F!<60QxN?z755R4i~vkfn0Y7v z3m2>@qv}u@fLlw3#|}^WzJy~2@ZM};SX9$L|7E9m(PqZ-5ItJOT|QYA?Zgt(f+C`ms&*Z-h`cM%fX=rk{O|CBQ9|=>J3;) zOoMVZ{o7O?gqdVcA6t)6at*6G0khmuF8mmZh5OAu-5^f_-iU z!)Sk*l(k(dfcAEa4toQ9y^O;viByG6P5s_49XE0v7f^D$uO>g-s;S-XI1-X-?X~vm zG|jZjVi6LS*5d{@(Yd$a(W2*511M9lw=NXKf<_v@j#&$~q>8kiX+BhIu(*4DzGDs{ zSeyZ4&_3u2$~~mQxyVU=y}Z|G)>sxhOM`^TSlK+*g#hBZQr3ICENk>mI9|J2v-+HO z7KX9@w$e2yI}--J@332hzOaQf=`PI_{RjZ^5f4XSsY!^Mv3BTweC)#Q(D>Cf9r1L! zfL$q}MQ1ida2A9fpJqj;YbNjbsAqdBVopQA}Ny4T!d{RL>dG`5gyEqngZKak)JR#5y`ndg=OE1c>&f-z=hg z&!6CF;sai7oqqOH<~KFCoi#WFw6ccS6($NPG<=Zo=1nB>IT&JS)x22n`N<>OQ7yMO zU6FTgA^K0Imd-eyz8k(#z6gmv3!O>_{SMPn?z5)?H%dc~lX_PM58Jr$G`J#d8Z1W! z1M(h1fxdt+JQ~owRC0mJDWKd%h918v6!l>saV#Ws{XeGfM|K<_C0uxdKn`}L3Mfm2 zwh}d%e8esl$&+lLW#((ztYVwT_%-7|MKa=O{e;P`y#FYWcxU-NaH!jCB!2XZaj^5Q z#2@uC8E+A%rMKNt*Wej<51miqRWCoxox`PCjJa&bw(8I=VS7tWN2pnVOChSK)5)48 zfpNwOmg;1gmX|k@p>+n}E6h7>zBk6vf5hCUp;f;!L~3k)p3_us=%SF6=^D~EG{)pC%(FZQ6M6iYRZ<6w5C9M+q& z_e}y$PyZ5s@rEX#eV*taR3ax%m==|^N06%WZ~r;t0Op$` z8i(^M>{As&+;@hCo>%Or{hcYG>7Mx**`UyK*sBMCG#=JkQ9gUR8X=?^x&xq`FDdKC z0S3m}4BpUbpl)q+AB9)a>`3FK6HVvHkf~6#R@z1#;fb9OwC=8SrtE#0zCPpeVwjaX7PqJEt3jh86t)7K z2UsXUwOP#ZV@eazH$ZhaW}#zpN!21uVa6+vnF5G;^aM+K0W^J%|1UJHiya$+I+oGE zqynl)IL|b4K;vsx9XPbJyE;aQQ3W3pKrjV^3cuY1i+n=kkthaO=U+ z>*Zr{6an=_7>h7f2aBaZ(RcA)!(*Fn^V1(bN{k``Ko=9;L*7r9^je3!&US@apL046 z8h=~@TB6HD2`hYsI@^y2ltg+0tr!*=Gon{xHsPi7tu!fzHlar*ca2n|uK&Ez-s)s? zKhfc6H~0LwA#l&iJ+@`mVbbx5ms}%gm@p3?6B+7o-ykDgV~jae$NbmcT-8*$H|YeM zaj*=Uw8cfe^<~31YajE4rfe!y8LK|6r=#zV@TIHD*NpLOTVjECCyXYs8dHM39;UgA zFl?tKzwPMIzvr{0M}gnJvL4<^SvpH)wN$D@e3$Xh@NM#;S+*nd{}8o#%pUCb6!g5H zwdVT)iMV>LgAqXYksD_o4#`WPTl8bx=REFXeN?+3wC-kv3GTI)2`(0mo9O!*T%Yi- z?G5qOh}MZ8JN&uly;YrZ{;u=$$=df%7Xo$&t1LD+W!6*{6$?;mvPMecV0}V+k`bcE zw6?VEuf3Aae+58BiaixE{kSr7-z1c`g4Z7kBRjeY&}smMGNB8wEfcDm4_TJHS1Vp7 zl)b$?x$;B-kacX@j+1Vxpoo9%$HC&^Q4)ClUr^+X=@8})4aMZ zHkmVJnXP=>kMY?&+y!nq!!XzF(21?7k-=}Ui#$_|#Y4HpPkkI_#QCJJOJ7}6u_m+g zUs8#t4;Ql)WXeA1afR$y_thDGnbmbcCw32HYBYliOK4MGgysWHa^5|0CMub&g?=0z$1N z|2i9@j$Fh}@(RUwe&-T+nY3J)Z7t1vOxT2ikCB1PBZbeWcF=B*^|$jRSIb*oqzI<0 zJ$O31_U%dH6BACMo2CQq!>a4Qe%2ae(?*d;tl*`t8*gILC`>B-sRWiKP_mmw~l+0?`LCr`)g;GENBwWm<`ldLVHzh zfI0S^F#UvzmB9iB91MlTYXca5uYeI&NUC(BOF0Fsr0-dRW@?=N>d zvK2S7*K$rt`e8R7T;Fd~dVeSU&%apbnFgDGW1U$9H;3=MLU-2ST5(kBaZYZK*Cg_% zOj6XVR=!jsrK^}6(;c9x?LV}qoq(*FuE}N+it_W^jHT*#kfaKX57sVDhs$(Xmmh;e za~|4Nk~(+#0Fq}C+GLW_q^?9LNl{JyWO0J)>-`7!_g;d27C`MS0WENjxYni6AJfr) z>-Ev8y{V?x)NNJwr|`|9YdN8f67hc7QeUjngM8G$Fh@QcM;o3^X%YqXJ2JY)yVhp& zIX2O48a4VF9BS>pIZ5>dSM$9je0>Dj!B1+hzkz+1TCL2_0XcPEtv3hOuTjtXcv>UX zs+M1$Zyh~ZJOozJ`fgMC%guxcr35fw^p#e4 zXbeq`L0k@&M8fjF^>xg)MI8Y?!SLfG=Lj0ra!cjSXL;eTXZS>ma4!voepAURmamt% z-L;g4$BKN`!^9>f*N+8Fe5nV;N<{~SxPVQxR&Z*A4B;WVdU3Z{5>A@B{IAx%k z+4*?eV7}%2l%eE{`-_4V zLYiKVS-+C$CDbyGDSGYZHpTpyZQI(g5eN10C*g)Tg#ewCxPVw5=aC?K=%}g2l5M_lpw84(4fb9&G+@S~!Rym~i znW4wT>r>;M($s@vXjPEHD1wZjC|My2VWi?BNLDBdiJqL1`DLRrJ3tRYHO@H2T&5A$ z50yJy+W>-V@4LtLT$4!1OS@d~f2yiA|0yn+8apmI%{Z^yit|N-tqRJhRPuT%-;Bf2 z6qGD_AQC;1?U(HlqddgJRRGp!MyZIhrw(}6_BQBKrw_)zaKU_)28 zGzE3)sut8kr~Bl+c4*u&^SNny1X`bPe8N05DrDayT<3G@INW5(dK>$Uo4x7|b&!jZ zXa`wfd~;?4E5L=Lt}`+ZIAlkU$Zzv|&i0w)6NsqKC;!5SNysWd)$-0fzgu)!D_|dYg|6pt0N> zxU7u5nM{?E=KGeF!eQp)bjTmauEiEeFss#IAjXbqn&lpiM!|t0j`zwby&D87QB9Qe zlCdH>-tH~+M+Si;U;2DQfl1pY0e!(~!H|veq+8zkSXltQ1~jY3@ff@@Fd8@aqK2%5 z89ubxjO5q3V}7yY$1{8{)|kl^*Fl#?oS*$5Mf^B~t&gvu{ifVZ(KK-(Sj3;;A+-cD z@Sdk>?5P)>;;EO^SbSXpCWcWOQ#2ilOr~S=6n|E~Q7*;09J(8_l17(epc-^p4z!1>fQtiGP(edzmE8+AC?QFmim=#CyOZTkwZ=PZrdTVh}4mVEc4 zjK6FyENp&hzU~vc+KGw5&y=P)!5xmD>cl6$_}frrf;$*eo=IM{H zFvh2J4q4@?YoA0JYp?U|Fo9A0qdOfPjvq3%S|uN+&}Dk2IH}%iev|LDf@Zcc`q28w z{acfKV*l#ur$n(uUe1=jbdx0Ry|mV)GNReA3HxcYL(ov?H?tSLTqB!zzecdxA&I!z?-`;=o72z3u!qW-m#j z`3GcP0(9BQS?Sm!{3KEJvv1n%H0;<}!hjI2o`!sLIQkBcIq8NdK zrcZ8V>*J!jYnG<9o4j1U>Ore00k5DCsaoXH>$1JJV6xyxdsCt#JAEk}-$Idp(eO9w zM4xzxn6Ta?fmv`MQ(umTj2ciCP8v&`78v6TrnyUbP46mqb5yhaBR9COR8A9Fm&Wxk zRVfcOZ@R?wwtHCC6=f0uJVGf6y}iRF1ctKUa3mlj*uy&(h=F ze1hAAht{|Wx+VSXPK*Ion^$VLXfG9*@nZA*VcsK!Xh=zOe`JQSW&vdjjr>tX%-;`1X`tkh)Y8<&@C5nhXu6kHU3|75 zm1d=mi*+IInpF0!KS^rsS# zaLf5%=)?=f+H#by4o~v5;hYvHqs-y;G21H{>ht^ZepE^4yB6^ z>0A<|FlYa(_IHBZWp8`a88c0M3SOom8}cGtHom^+-1oUO$p3T8Hs`}HWo==VDWgNg z*N*%XVHf6Q_I5DS>lEvW?3nX>Um6Lhmtgm`-Gd3gO27-M1XbrCK2c^6aewqCqE-0} z59&E5;TvlHcky_ri&Pj?(KvP;Bd9reBI&-uJKArgN9>pP!$8lDIxP zN!gY0l*T0U=I_j7GrMasQr@ZY4|HGu$Ue)B{VS`r$mfY}+{;SI_HM5jV#r+hQPv=Q zrS%4Tr@#JLB#!Cd_>SRsgwMw8Bd}@M+AymIcQBn(H-p`B&||3_=fdf}v!uP+h@Bt7 zS;wG*)sA|QIxXn6=IbK-|Fwa={A^sZ-?-$l zQmq|CZ*tyW(8-8nkc(Il<8op}?(knwuw6Q{PWZ#X-KgkUOyls&=s8 z7#CmWH=S&RS8_XO-d0aQAvVCcyph zu}LtLl;z<0f)h6NKjsCvFh+zMfvhmzK-_P*ELGJ1jd{W4!$5<)aOv8?&mKeJQLKMR z;sEE9{IfkXW}5fZvER*)oZWMnBuq4!+)D@ zF4b)u$BS@|;y-SeUyhVNAEa?27~DZJu&=&(J50<@v=w&{W)hX1dBV+9`>ww;D}Lrt zNn9fxG6PAwMdK0=CC%QX7W~raB%Lvvur{5Hxa+W*cha739P5ats`*+TtF=BKY)IdP#nlxfiMPB6yH|CH zWZ->%CY6rv88_&D?((A80^{Xm>-ilYLzk@(A(rE!WrC6I#+2pUMM`s1)kcRS8bO)H zZ`e+YyU>qk?$a*~*BQRvIf?PE>2asu*M*{e0ESVH_&Z@1SA`Z@pze4=a$$*8?mGaL z@dk`q0@j`_P_Er+U064gYOuyUOLTH-zu|>r0KBQVDpwnT4G7LBa|q?nZncr1B;DSO zl^Z{H->t)_Q9wlwALN-_=)x%z!y=doSe87zJx`SlJ*5rF$@2|h%dRb4llOB+S zaYQ*wj+i}}Gn#OWTy6#jY%oL52!Htfl>6!FU5HZP)#)|wd?Tj#F+Ru!Wumi zBwjDKZ4n=MV*w(h>U#>f_bT2_>ATO2GcfSq-w*8srn*i)mot3#T$p{+@QQN-kXQrm z4DxncM(#0P@TZE?QDToW`p(cV zs^FJg+mD2B_MWy72#_w3Y{@wDDn~Kq6sS#Rlv{qYpMDRQIwo+7aUyI7LwtJ6Ra0V= zJW3HOVWj1oT~pzJ{?C|q*=uuwHr zH|Hl(t~EDqVeXx^J3dzo43pU+^KW!=%lfKbezb3xx*cDKg7OE|IN7(6_fC)PqFFHi;k zlylj@{mq?eZSyHo_r+2DA;LEm+s|=)WGFQ(&;kew+^XmZY<1=QHF1oWCpPN z4c>CTiuZ>xZDj-Rl` zp6RFwDuM(z`}&q*-D2I60D`rzOK=zk?r88sS{BImm)`UkyiBu9@&v>_>aj>;*-C0A z^&NSp2IJe7{Sl8FFr@CIv?8bH%v(-kWXta|z(X@i&P&JrxF97VXQen*mLRskpywNT zlyZUKg|iRo4~REuk~vausV;7e6=%?S3%+Zm%2jF?+f@QB%&J+kXh#bTpEhiG^=8bV ze}SsuaC4d^ zjatLxVgmf`d=SqMZ;dPlMBywwOqu^vm_W4d15OwTq=gsCE)v&E6P-t0Pe;YvQ)G03 z>vx)(3^#U6oU37fI&y$qO5fZ`xwsbmFhfRsBn~7w8F%Y56@Tn~6zDtQx%dPRUlK;r z{VY*cih<`TkL3M?Gbc9?KGP@K=QY|#krzjI7ymopp}mre;Lo>CS&xB1=AQdsE#VzG zJWn}`v8{B7n(Fg$f2!_0{v!jTOkj(!yv`B^3+im?vq(1Fr^A{=4luaB>7$HOV55U} zzF&JmiQp5bfu=>cMsRkgE{X9Ov`pf!whh#7*Sd$}4g;gwjp`NC`bHgIeZ4#60}kv%Z0F75xT)d8|%i zQfOq_f z6)B3;*Du!aj!uTF8J*ezRkwOvDd&?dx`#Y5oI=8l6HB+&4B5T^J8Oo646JPWfC>g3 zPM|%Z#i{Zh^?t(emeWNfb18r90c(J`?PfSeYwdL_(}L1yT}3csJ(n6FTLRz)F(naq zDRZe2yI`fl|B@@yQ1)+uBg>jJ%~~pxQo(cP-X6`pW%(fGqFaeE_CMA;pt#R|ZHe`q zIu>@%r#*o`-)}a^bKe=56$g38x!nngF_LmQiDz~hotp9eGAr+r^1<^&c0eC-0I#&#pZ1W{;|7os`3Iz)VgC*_<=Gp|w(WfWW69~yI zg*9i@-z@~O6Hya(YpegM8tZ>8Ku6A9+`Icm>x=3}YK?BTwQBzw*xVj0GoKrIk!vr- zgGl+MDq%@H6?6G&T|29~yFwK+#XJF1s|2N3*fgv#SoUH;RBHr1<=gpvc2mu!4HdbC z2SS5g8O#Nz{zsGB{>ym9<7a^JrH99uL#lVsS}B7^(#YHcK6gG#hnd0L8VH8}u|eY( zi+66^h+KEREhDvKGJ|eMq$BCnVdz2KuMrI7yDr>~N5Eh7-tbrW!2nq1AS5jvK))}7lG_gv& z1VJFniF)f>w;ZM-+YQM+ej2v%&SI#MqBt^KU8_4k1+e^~1$rI=qs2U`tmJV1(U8+O zZ;nX*7xLZ$sIF~W8^j3`90EZD0g@A(;4Xm(4#Az^?#{*`K@$k>5D3BDJwR}Gw~f2| zKX-D@z4!g~s=K;gb#-_3tjam;z4lsb%JA{cG3h7|Pw}vw=zhj;(fUYk8IAT7lAe=j zn-un)L>Sm_y(+O`dfN543<$2jtF25L;GA~|6%M@Ov`4Dx1sdl~qz2Re!{PAMsil|y z;|-Y_CX%sN0bj-bpWBf6cNr;?G#xlYiOORTk|QmBW4t$yJeuxwo)2$iuM+8Wg;~jX zOnm;h?(+w+edPq{u-uvs^|h9RB>+1$+N=FR_KaAqqt~na{llCQmUIuK`Z(22aTp+r z3j;_-7$B>Fl#fnGf%eweEhbKi&N&-9TqMp{UTTCf#0iD}{p3sBiC!Kfi|Op-02057 z$xyYm))!y3hCh*S!>@8m+WU_^cGM!BcsTipJ)o6U4HT*oB7lFYLay*ssqD>au z+s7XhE>IfiI#!oHqx^S;7>svBy9=|HBb^!J`+7dtbQ5V2U6&H(cJxlp#)WB`@OI{# zrKuTG-`lG5KV7n;PzU=$x}14 zQXh1F@4klScFStAi|MaSOC({_qc}CA>`wM2Q8}`c*E;&i^BGGaa2EMhOQdha<*W8- zK9%&N1yFdguVkTmU)d7h4}zLC*DnoNJ>&(mvB^n65kkk+Pln-L(Rt{a97p&CvAdnp zudl~hZNp`DlCrv`Ybwo}mD}q9b$|U+OK6fNO(*Tcp8e;(B<{HFqY984NtUZ~?gtER zKQw!rDcJDtSzVyn|E{Lv>yppH=^}FJ8eOFHmnNSov(eX>WN!{l_Dbcy^I+|O6;V6t zGkS(?RB7pozrZTxl&d!Gyv!E zwiGLD_Rc4Y4Wn&7|1T?__;<}Pemj3J`Dl+Rum|=3Z}k@@X+HNQURywc#&Y_u#kX?Z zKAMzlGY_?Q_98qEr(?Cm|6(Y@Pne}>SH41#5(l|nSF1v@fIkYcj}u9{DjkF=Xuu-bnJgxga9@y$QmR$ZnL1O zLTGo2&jnL<**sHZa*>E=b}S-SJ>A_lZ4bFr%|X8A?{@a`qKF^VBmK{|iuv0(T+u)~ zS45>_3;UK%Z4N6;{HK}W{}H$9Vbxp*ww{+6Y?h!B#NTb_&7T@~W&}5+F8}llJ7OCC z_Y-Me?7Xo)VPE@V+S~T8CibUL)WK_Sp=#h5{oOwQ>BGN%1^%Eh{-fb4h-_1-g13)B z0kyx|caJ|;axW_D-we>n+RJbr|GUFyN`I~{{Pn>dhYDuq{?ite|6C!FPqxm_8&P?$ zC;JN&r2M(Ci-VOKwa!V z<1mQu5yJV`GnV8%|DGn=BOaH?frOVz0n-05TX4T;>z{tW{{PR|IS?vLbA%YjL0ILV z79jxtd)&7V>ZxBFO?19y`sakFHhI@M4mKLYp^(c$QljsNx1Dx9`2 zBoRL_-3;K1;k^G8BVXwtwO0C1f)O9xkbPFpx95=rnU}-dnLyk<*NzBaj_ABSmln3o zJpWnqAm5bILPcpQkPyfJV#ABUQCeoE;OMI;0 zD~-#0vUKqMzwoSFEC#M}AhcZ}(DD`#Ftm-@PD7kf@na085oHau#|8hrjQnRvqZ^3m zeOO!n;2~Qto2t>ck{8IYEwq9U2l_$T2|Maa-8I;5{D?Xb}X0!1e+%IyY zx$K`}m?At!)D-HGz+}l`EbObW^QJj3P)A2wIH3L@A(byJVaJM+eM&%>PB?DpDx8MY z^XZ-3XjPRXPS*ENkdqT`^gAUlLwEzcjK;4GUFN>ke%x9UeeWThw>)7EqlLL zWHp+#D`&Bc?QO4?q+blK*JTsFU z2jh>-qW#6%;*$=fTxUoP>LQ(4S`{Q{D~;{YJ(4(gBYN3-_TE09Y`GY4q>CGy4yrPRJ?q}k<^{sc zVY@F+9Q(R?`G!3)d8j(;Lu`Ia=pbqk;hTE~aEPu0L(6Jswt-|Pg^9U;&CWMe7DEHc zli;uhr?y*OaCkBIUiC^o&p#rUg2Mf=*$F+t@?fKw3fzms_Q%6@Av!<(WTo1!&iriG zDpZzQ1^W&zdm>h9&MV=hdG!}vZ-=np5}Z00LA8nI{*!D-U#b4i$hL*1*Dgo3-D}Am zZT^R{O|`>EUxPV{meLQ|i>;dQZM%v91V_v*?F2FfG0Hd^B5SyIlz4Nc~j>*+=o<1k8jNja-e)X`9mDU9o;^Aa!`5qvVrh$J-Z7(Kc-1sRj#^9={; z-Ce`gue)Hq4Hcv_9+K_R3Cygfz_brcbbTmSr*AHN+m3RHx$3^&{nZlLzlSN>r5T!7 zj+7i^Gxrv%Kbz&a!6gL=cw#^OYQu9#S9h2=l!P6{z=%+9@>N3tM}hiNjg7$^%1<#e z9dSA9(^bd;hN@V#+H2c5Wpu~08S})P`YgPe!7BpfK|a`zr4UfCDZ`iAf($NR0XcyO zXcVp}4*++*J8v=3t46S$w;3?{+)1*s8p8AKaP3}qr68!?KkKSA7FM1Uh85AjN#MMRJ z3($lD2lVf@^YObQDnX^i9D3UG1P*v_j*A<1xK!y50Z-tcXZEo8BhI1S`I+R6%TyGl37 zbY-DK2_g^d*DT8|IUd&|f*`crh@*buO|w6yjpH{8b!_c)*}$j9+E)BYU!N8AN=B=H z;`y?m?Xs_+A)EeBT?cJm5`zwIdPj!%ydw9NiKol*zF)$czr4X-^g(qX{@d*+Q#De&bJM4fW)HmLt#S-;_p%#|0X#Oa%2c;66ss{dKX$8vS36F(z&@n z{ncNeo$9ijkI)FY20AZLI}OxrX->FzEi~Q6)`$nYlcV8J`Ihs|C6~WEkxcK435eso z^P1(#fw?PD2r1Ieu zr7l5kl(a3-q2o(vQ}!TyPM(eyF}`P1f3U~yPZ#?IvomhMj9B(j!`h8x?-nVLw!BB$ z*Q?b`2+u!8i90lJ(gddKfGKOQV2WqQGUxy#lHcSjvcd1)w5MAs)nwQOj;xpxM-`2+ z+97ba>G`+mmU)&zL@qZOc3PIwR&FS~?UR#cKn`c}k@Ff+^^j+Le-24Hfw+Lxv*HPs z#-~U6h7TNsb{S3t>jl*J8Zf>k&Z&{eVWnx~mkXUSXxiOMbLSxzJI*6~a779HOB|JK zJQwNRr*-|yVQL4Ni$*v35M|+xh??~f=rc2IU)uIJ`*CRG{b^jKaROXt(yk&ls@B_K z&eXq|0GD{SD|rU}mT9@@v3R2qoTIS5s?O8CL@`Kb7>Jj$~7lQQH$&=Au$Qj3x znpk9n>EC&Zt;F7H=>vY~S5>GdqiugDKYUoc>ZkD)?!F7|ACnZ+`<4t7Oe@|jgXIYI zdz0^|Q?5s}i1{38x5z!8FzwQ(3YY}$HOwG>vuC|a?~2o?$9oa-$Y5`j{!qO{b;%FQ z{E_Zn5C&_L3x2{*88)2mS`fRureUv@<%jUH7j>WmZ^!x1> zO-mJyW&Md~@$6&WxgY!lWYX~ea?<8iORdwrRN8)lN6PLYO?Bt?yk$10Q&-dWg~L1x z&zd3-Vb=~P+Yie|TWEQ!(;A8V@OqPz&b|gwlg$*dzcrH0j$&bZ3zaE**_~?_Ih7?O?QJ;(Q+xBCWYRjyM>~h>*LPqO-6bC!vSjBVg`A;I>E=Ny~W< zBwx+}*uHk~IkYJP`@W7WukCDOq>3hi3(i}@WK5qv22xKPa<37ZjCv|jL@{S`5Wc2* z(GuhW8Cw6oV2o1>xy2Qf7sA-Qlgi3;F6i)8$u{Wdtzva1x*%J=wa2aA%8~bSC0i$3&5A zr^9L4=jI>C7{%!|w~8U3Q>7TTyc-!ObXzfw@cl3jr0W_tT!jsQ22F!xmsTUAL~u8} zbuM6kqQC|B%q*O@2db;gdh1Wu2y7ZsjWpv3L^-9}+5X%d+d`x~l)j;KMREe!)Nrnt zF_9dKu!9ziEBcoq8kijqROsEKq+ZZciu+y*w67+Kn2s-BtqcW~ z+yMuaOn_p!j#5Qi&jb<*T_ZPoS=REc{xR$2LrUC6tUMsSW*+xAIJD>NQQy!(7_V1;0-m|Ha49SMmXOd_sxc+P^-ZCtl#+pgc5Rpy-=dx zj()SH($spkJ2SUQ5xP55wNf#m-BxLNV%stuo@S-Bb3PIx-y}_Hp>d&Op;1nJeS3oU za&NpZ85|t|Yw{s1?yk@K>meLop6ZJH_U(7irqsiC`NU54Cvob!MK$kYD@?A+2;(mH zly}~9HOEWthB>RmWUSn|>z!Hd!oe9wsAG6lEQXfWKt0q3Eu0XjdD37gsBy* z^}%LNDuSlpgbsvUtJXmT2gzhSpJcCYUvOu-0)wSdkk#wPMboAQ85~CxM#@zQ{i$~h z?IRZS;NF@jilBNhSZS$ob2$K!0F|!ZzI(@NGD!8(dY+6Da#NyyJZ)Z-E|biA(R6<; zbjtCG@^(YAdcW~{u-ew(rPVBP1pm>4#*1Z)B%4;W?!E1zCaX*BdKcEKvt4lFpInod zXDBf%!h*-~L*3QmNrQ-`AfcNlzEr~Cv}_i~EhV_-o6BQc81Wtt2z3HoKA80e|K>0P zQoe(6chM=Nzu$1)v2b_BwN~^lmg(Z44SOu<>kAeAgT$MN509DG}`j& zvE%vNBge!+X41deb5v%BDeuC33P)JYwSBkx51I4ZiAxI!gISm1StE#!j{c5_9z`eJ z3VYQ`gb>C3?LLIn6rqU`5h!5lkp#b90pFYN$N_ah&{QdmLQWgE}fDvpbw49f|yklws4-6R)x+iP8XlGX9 ze;30bA}^0S?{O?$f7n4fSz}M{br_T5afPL3unx@dILO;UMHl!Tgl zmla?A{{O1d3G9ZEZ){4Purlh%`AS0NtsR~FWcOeUW2WZt>_yjnsMyK- z=kAT`yvF^PN#L&z&efm3M6*K$oR{m#9yjlq<`O3K2ZlV{Iw(7q{MR6r3QbOq_G7V9 z@W{BIU&z_mun^2gr|{SZPH4Nsmi73khZ&!cu)PgiNGgItK()q>)_5R=Bb$`n2y;O2 zngNj3qkF(D!NI{)&!2y^U+=FO)7D+-2=h3bGOj-ve%G785%QTvvh+h29w;8v(Hel! z)!&~jn=1Iyayoi-^#iCcBro&Ne z(@g;E%|Vo2aeVvrQGjy3q@-Xq7gWu6f1!Q1F_!np^ZLwiXR?Hh*CCijGTdeKcwOi| zUXsGq-#EoJ5awW4&A4uUi-pOmi*23zx8mYRq+C`{xwvBI>YStE;(qN+m5~6l22$jS zpkT`EtR|<`?1p^NyS`tq)$6|s7sN1{UgU67XA6GP;kX?Q2@Y0r6i^@6U(tDNzm@N| z(L_eKpA^-3AOC7A!u8@(L>eO0&!@Yz&Xa$!V9MzM2~0F5UrL#GyqNGK#FrBOy|jvM zZoGGQccXbK5%}~n2XoE~C1zuPWo~-d*c98YY*8=-hlh6z6fHKigI$WQ@C23?t2Z-PssNaA|!sgGYQF$;TD^voQ2UOT;|1O_- zaNczAIFr+I8V3&#!KwvEuqwsX4WzSTZaoFJ_J3BiUAhb4z zGEw#P^m^l1+myuMCyp+zuki^8#M=TNn$Fd7z=3_M-@s6k_?*F`x?22Dd2E-Ffb?Xm z6mxKRY22T*^z$Qrb^`N3p7O(+n^4Fraq-pe!S5+TFX*`R+uOn(6lf(=mK}Y(>v>Wu z=o(z7&G)vf3)OWoSt>~;*h)S`t1R*8!g4~ct!cRs>~6%tt!Z6dUq4R0zX=KsevrUn zZj{y8(E-S3@36|_($3e{mp|LtmNz*`2r{3vgAXPJSV!G}Do6hyD3E z&GH8-y6M~kZYM12Xw`R(H%ISUTU&cyy%>#WH?9V>>T+|~c?naC;{_U>__F+ZOwjjX zk2xM-V!otzSFLxk{d~)**NSA%ii?X|gAPcfNV!7wRuxHzzI<^c7nlDe(G7D)SFiuO zT%%VdT6_`5`kMP=9Qpm$w!}{v-oC*~&%b)!`0C7D9@lXUFI`!=(R$B@(Q2`=@ga1=!epi`73Qx-4H$9v5Omuj~Q+pMa}410)Zb^$MX zoXeUD-=YN?>O(mRm2R?Ce$i?tgNIM=P3xm=;V z-@tXSsoBg<+*oK50{5`68}-75HMXi6Qp-^@FC%K5Lj}y##W#=D44) z@BQV^fq@91FV9z3S6M73S%{dmdw;yoi{r7Uvu?c1u5l^R^oao-EVfw^9nV)YU{auK zX=xeFRSW|5)MEpR`qOa@sJ@{2L?QX@)#(a&+Zs&R0nPW6j*iW0wwl^c;1dwfH;?}e zn~@UQ8{c++yKv8H50wIY?cdqiu^q1u&{I)0KVt{h8cc}}4-XTw>LZB-V(N8b~F532aw@T@~mA0${QUBGlE@1?;cZ zObZJO1`!bvhmAoWEOK6#Rjp8XM3l&+q|l}&q4RlnX4s6Vf``e=%R@8Te}L&9$(E*} zqVf)+a0>=vCoUxV4G^G3O?SLGGRgWNEY_j%lIcB=Ng6no&syQ%myo z>({w+BI2Mip*H#Z`~Pq!WTYnCvYY#G%|>Z$*zm39#q)))Dw-tLn^;v72Rz9#WLT&& zIU^T*kURd79NF`@>{1P&;;QDHNMSTD7KFuNtW)}sl#>O#;qGdpiOq8QrF|us&R``G zvvewIYC@y=de?ejPRs(y2-9gm*(Csp9UT0c@EhQyV5K;)cr1V!0r$L?k;w)u4h4Tw zQ!BiBU(1A@2q`~qDO61At}9phs%7P9rfe*J&3j^|&hn$?*VkqZ`}Qg`CHed+J-gMSg`4kHtYi~nG=W_%2r z4)H?quS{ty8J^@@qxn`cg(sR?6u-X*S5OEA1ARv}pVHH#*3I=_9=|Vo;TNo#@Xl3V zXWaDN3l+^q=FRr7s>R1#EgXfu=KKM%I$l%HXkbIIqx~`H4eni?EHMBJ<2PMlzOuh> z%|uQ8HLi+u`#*bbuiH#&y-EA9d&7p?hMVkhwEXXGxe!jTJ)x%$ddy)a2MW0$ix&p{0)0+ufGStWD*qQ#d+v zXr#XQXPKFr{;HDWcHSdvx;YA&Si=W$5Z4x_X+X(&!lTG0QH^IXAxsl@(E6xTt?s_u z7tQWHuk9JX`FScdfI0lQ$8`=VCN}RhP`w2ScJp%#ph1|MZuDiC9uYp4vpF9sRJ?J? z&{r1C`s?!WC$mTSb~KOLU+LW*ZQAj%(y(Ft75k~ysonxUplG;1E=s&m{Yvon^1OcU zJcWS11*Nj{X~b3u{PgJr#-G7T$gzxRfTDgk6)f2L{cb;v(VzXk5Bz7~25{S7`S=Hb z%1mafS>bFcvlGi+6`G;7k;X^;r9F-%sD8mMc|R$xyfpYoCI=ru<9Md-dgLeNZ-&6n8Tr_@)VyTF0#4qJ zsO{FUvMS^OTqrJ{r3!c%n)OcE5CPrO-5+-*t(K>zH=Lqd7m8*7<5$soDJnr}<>9=$ zj(lFHoYkhyLu2gxC#Jh<`W@j}2ITfj2=i8@FMOGgSz|dMXW7+khFH(YRNL}27-wzW z>9^oyjInfzRAsCNgjmyZ5zt|Cp>U}+1|Q(#+4}L}O+diHWgUOP~-cDSJqyy;ettSD!RLjLYVmy)<}x>;EIqD`dTMJPDo z_p)6x;@8;EF}9^_)j)Bw)y(q=Tkd_)O2}!5^`*UG>0q^@acc?wa4A=V`)RiGz85Ch zVA0}zgB$(`d!QF0ik-a_CaZwK#wbqzsMV{1ZBks5LK)Z;ucE*A+!#zhg#JAG5)~z8 zW785fusWC1J0)m3RL;43Gw|7CF`p-ofMa8-maYPbM9)B;B7#$2;+agrOgr!TEOcZGVT=5)|LUgl8aPAuO@)?J}tx3buR>2y0c ziMO9jTi-=6r?IE(9;d*1-jTo`7$i$JT^dD4FvS zL2P|JztKQSA24!YR(*uP8S&KA6onyJy)cgn9W%2ZlJTEn5)!r?r+%}LgNm?lOKK&q zkGLe!Icgnt$}*$|Ih8LcW^KHVE^|hsSlRxIEqCIDEc-{D#5Xs_k{|DnFiEh4@})(I z$&FI>j^17I`&DB{usNslNEVs}wD?aW?mJ=75nS+`DsmdWOq-Z<9uXL&$#dXntFDwL zovyx1&|VQ^>FEwT7x*dW?ATHv?iw#-b0WT!hyPbuZ*u-reX3&4d6{dUOt{^r+r)|D z@u6z*oG^ zdpn=^rGYbdcz8(0?=qM~3uE8dA;Q76mR13-v9*|Bbq5pGknknin^#D~Pc}R@`$lQD z?VQ;mX3&(if`>WWt1*THJnxj}sA^GFIu&jojekoHB5i%Wfh#&OEyL^ZePD9ik7T-% zQ-D;7{#cG8zVPEVGAn1c;JGup(T1brh4is-{`xU`o$RR5QKVc)d7()1B5!CImfla; z#gWIWlmcx&`s!TRJBovA>0>n`7PnKo_I+?(BWsT(UXAIQw_8>O&~@Ke`vN$^<%c{pDW$Dq!CaAD)E++<(In}-P%`8{ALn`X~9YVoErD6r4- zJHLO&m??d8sxz7LzBnAZyUfv7G5=%?oa^+^pb*>0WQK|toB|q>$~3zwp+(spy;1wK zl3BCgUS=!c!cU;+#2^=maD1(WF^(m_!T8!IRWa)I%?*Q=2Nqq@lifseGP*C}?T)s; z_*$XZUoK4g)-5iNG`xP0Na#W-hJ&*V^kRc7_vs>)CXtta(L?A>C0nTSI?pW(W-9Uh z&rD2_NWHX--z?Nbm4#)R%47c$Y>qYcaWFx6K|aUA_5AzhWdMt7#bUvF^jEX z7v#C2WeS;kvM$xOfASoZzuAeOk`D)Oqee`W#%yF&isE_D@h|RmNfQ&C%-Wjis0kyx z0h|o;HXllv5{m(~(jS-jbLkW%dw!Nz6Hj?ZTh1l>b$e`abaT6QwLBUQ^Cf0}B_wt6 zignAb7Xue_tdglJAtXM!{>BLYC3bGdp+Qalq}M0uDJ+H-pPR}|WbUqeFDKtrA;v=c zuSH!CpNuWeGVk0#$re)B zEPAw*n>N|dHK+DZj2SB^9{>DS7Kn;xx5e<>v?+2WU*bauP6tt?Ae!a-)DlKB9L+vU z9@2@Q`h{7g-y?NJNw~d?j)kILcX3N*Zunp}e#7Tzx-0)_EAZIy?xCkTkww=*f723I z(`N9P=Iic0Uu29>E9Am)IAF3bjte~D;i-AebH1tLzRLk3Zk@nI7C7qupx)s2O-c&3@Q|zkpP91&&on zx!14$ekedHz9_)}-f{4+G3Y0>XVEX3d?7~zgd_RR3$8K<@bK9~hq9||*CDBD)wkS- z&y9Z}h!&w`Fygp*Qdyd}F8QhT$_TVyX$i64Z}5C45}tutIbI~Uoi#DcPp0(M7Z`WJ zA^4i%>?&t5lzMd=Ilbk?;JTOEdfvQ{O7t13Ku5~}k*+j_>%mAV?$mZN%PNTDWd zvt)iM_~ss&&r6*18D^kAGOnr5O_XasoRy^lJ_eSBjxiCFF&)N8h^ffu)Xpo*8ZwmT zRC%bWVrg8%`lF*Tqk8i{Ud{h|6X#}mq;EKiIl zB-ikzxEteXA@_U`<{ORD0v3H6mVoFL-G)P^y^XOHPD`sruFNak&0EVbI3%^1VTN)d zxF@)lCL>NA zfGm_!C##{06MOWPy5xYOyXn@B`r&!9wP1HzWW)O#v@wG3q6T_9vSo5n@sB|5B6{ zXy5lb=c6GWsdKgl%CH~}xIV#}V+3-~M7$xavg;bk^hm|)$;Yqzc^yA7Oa%3MlpguO z2~qp>KS6E@`bDJo6w_L~^+2Nc6N3x&KBY}M*3p3duQ5sLb29p4pDmnaP<+z~dD$##Y8xIek}qWNP!i{DzT^SE9~I95n$mf5D{@ zothIStCaLyn*T)`cQBtY5=$YKwviUfRrxG#Vn8jvnL}KnHUP;?{bN34Df5OGHVGhH z3Uldm)yfv14%XNkXJm#1KF`d|3}eMK8_(}zteR~FKViPgp7I=QFVw1g%yq!DS3pYv zxv^C)($>mOYyAd_{Ry(NvQn7(`1nxFl643cO6sHk zmeoRocw-Y$prjtasQOn}58>~*m`JG2y-4hTw*J;6ohr8TBR)M`_vl!&+3Ajh0`TQP zc+upQ0@*ne-ioxfDKXoJU|g2TMo;&Tb4VaE)&ZLiFbThUdh&{yP)@hT+Vl^1%0HB- z@>k0CC2-V06MTpGkB*KmF4t2-EJ#)2%gbLzH~_)_Iq%I^v}a_3Zh_G>#&7y;@o?ba zMJnPw3xXA$carpfeH-K(teA;N>^mOD?i`p<5kPRDLoun#}mQAIHP z77I`0XO^SH) zT~A3hib$l9Vy~US*w5En;aj9R&1OU2Y9+<5=Iy_@ZLRV#Trq z`f3W!TV?cwxWDj2rmg>`cXM%AZZhO6orvC#Fb|fT_`V1fQ~|-KB>AEuEz<79>r$AR z{2E?ozu1eT?mUITB?Hn8uS5^+(4GDg!I()U^pYX;;x4;M@QFC?uMJWRLFZ@3aPzM9 zhhYtir;(pegYTvFJpEeZ#~5+UIZ*4G`skid*#B58)?IcVGq9Mr`sw`r4T}s`Rx%yN zuPecbHmnC$#I56!Nq}Ca22^F~>*T=WT*m6Q;ti zJ96r4T*!kk_Sau9#6`w-VJ|=i&O3BaYP=zNXUGQr`C8o&g{F1)8KancbNm3E0evm} zX*h4cI)#h7p8a=>%#C^A00LmUgERrDX<^RZH(0zFup%K*C`(;*&h5O_1#_j~#2J`v z7IkWig~oK@g+iTRw!rfdKfz<5egmZ8cyWJK}dZ4Fxwwk zDQEi&)}cC)$;o|3Ydv8%eyG@`Zl_z+$1nO}NrXKz0u&h#=mg0m-$!G$TQ5RZIwPW@ zqd$0Pmu2gY#(-o7JS^?;$Vs{bj^{b!h?*T z)Q`?5;BsIn2rY#U%oC_|2+Wm9aq2WSMepnZ>-df+Mifg@#HO30Z$O8MhPCd+`Kfu} zzoI!KN^38;s><;fkzHL~Z$R>x%lARmf)^hN2?>bK2V#=wuT}wBb9;U69&JzgdZ-~~ zV`Bqv;UqF4q1(Ku!N{W!s3;JjWv^xYU~J6per6F)DTEb(NF$TNA8*u`Kr#~jl!qst z@P)F===NJglc`c#&#Mz~xW+^NlR>f3e6=W$^aE3C=#g#1qvaC?(lb7>hTm9(S)KPZ z`;(rVn2wbizKd`JlP&&NU0T`^Bu9M=<5_urUvuclH&lF0bU*aF%{54w#rI~?qn?b} z*e1^N1=Xk<`>~H^8xge35m^J$@M)y2aWl;=J}c`bh%u1~;>ap~Wv&dDCoW5>7%G+T zu%4~%Q62V4CzYW@B|2hW*KDb_Cg9rl$Ypdti4*>uxbtbH$D`MK$4gJjnK4w6Tz%S)8h5hj_w%;##urV63!?747wUP8 zH}ASm4*LfZ^Gd7Mhs*5@YNh_N*yEWRbw5U8y?{;y3ueDtF_BOPn_Thk{H+Q(oElM0 zJB)yyv}*e6wZ7QPxSj+dcSR5&FTk30eb@Fb7B#3%uUz@N1tUT49QnbprViupxMZOR z;gF~ZJo0Cqj|k|GK-kq@?bz7*(+hhJq(5=X?`htmW7|jn#50?C|ND3*lZJ4*Re`Tc z(Z}FMY?vseRyftF^cIZ`7PkNL>;1b0RM-^a=5B!>>ZxAyzAVsaOJcAaGgYrbX`7y4 zr4P9P`ICX9r?6X>-@nr0Q#KJ!2V4%z`FTGcyLCC~xH_?x*XV0l`=L~&?08l6y)Tn% zJV)a6)1_$7*zb7oS=;Y!s$hYHoZ0C2xS(qWGPObU(p9oPg{6i(lpU?8KX zF0QSvf=m(*l|cNQ!i@w;8X(*1whVf>!}CwSyC`+vViu)NWq zmiqi(0ct@-;opfCKu8|h|5qQZB>zIC5Ud+Em~cFN{JtsfWEnb{q~O$4N|0M$1Mxp7 zwIGk@)l2Y+(O#=Drim;Z$Xk&A0XP~;Yn9E|a8LS5^<{)`BmJia!S>L`L zUTqh_vKalaC<89#GcaU@_J<%BsN;1R0Fs@grXX>wHbTN=~Xwxq_*vczfw&CXQ25wd$@SrF{+I_)bLC(wL?sA>N z@KaP|q&N5iAdNVNfiLkIs1_0qGXgNw7ZrCvTfs7;9v=K4*L|`(6APjuhX8jq3%RmX;EMZHK^YHNembl;L5y32jeBU{uau- zxo28(VgO15|qu7 z{4|&c8OuFBSisVE5pDf&#W0AOUHAbz%y`s9c?H$Fmut zlk++(0))jHxM%5fLTCN%dVd`dH4H7$m&kQIvv3!)aJ%OWbEmH7mVa!IJa19z6w!4I~s4g)$>)kP^;rZ1{Ix?QiWAT1Dl%W-J#gwP0Foui;$VKpTwz6$1J_00>rtG`H*8SCvp# zG9j-d0OeS5;8kmI;{c8-Y+8Y!DA8*}0gj~vh)V$%8U_$p>xos&Q)aBNrwt1i^h66R-!stk@F}-x*F5L4*?(6@{VNy88MU z*@&}&nEy4F(Ta!;a1?8xN1!CScXP8aEOj022kf{QV3CzL?W)7b002&{%dx6ey}Z1X z@<3tt)=dYu-5-@Mhx)@KBLa7HQgONfUQ(ppU;=D$7~=r6%7^>ATY#JDrGZ;CzXf4j zBe1@`iCht>URNKVhy@-1M3frW9S|tgYx$rT^s*^gpx_@85>gy6KC+T7E-zUD)&Usk z0{}(|!&HILFWVy;K#IVIECHuY|Bi2XE;M?Q3Ajar??jfQYv!zsfk2dq2YtXz>@)JP z0{tNh35bp+083J$UeJ+R<1S`PO+&Lel7p>LX|c3hwUBMl8Sbz-jLejN2S5%6C8{tD z0y=9jS*(|@S@Q*4jt{W|6RKA=GgWFB^4SSc08C)yE59HLY0s>ODNIedba>>tIjhik zSjHbXiB_{+0ZP4!3diHUzLw}*TT2{8a>C?+wBHy9951EK(H zHbeGqJV%xi{7w`=IA8#aVdly^K-9+o`w8?VDv-5rfeZq0%cJQZPo6&Y2OyCMfLRS% zPjL?ikh2ni-vgO75>|cxnrhfEs&fac_&3=Ve>81(Uv@eg2ULC>BP#pxLd-{}1dO*egNO{yz+; zOWLPTx%)n@t^N8;0~E|;lIE)qCD2Whr{6R_deWu%=s|Dt)CAQQ_Jg37Xb)M|zxuR= zGxGSfr|!{p!uLt`>reNkLlqRzxRVK;+ztRT)kM+Dg52~xH2w;>%QgBRjzs(qGP&xNa6XsS&ys-6V|m`eT#ui{uE$2&9*3`Dn02iBYd^ft z=<)le&C(J8S^ZG#E{1oUlJG0KJ$DkLaC=Q*%`o!lkl#o~+qW;t7*DlGutnGg|HTt% zufo*ig$|!C?HRKy%jVu2AjUQQL6Gndh9pY~B$|E{D;K<{O2g@!Tu)iQY;C$M8lb;x zBo_5a>a%7s{#x_UPcu4-Q}D7XtnJLhUIrIlKGRk3iO>3e__F?VFhTDVygYT!KZ~fO z1|FoVzzAm^JJee(lmJJyl1Z4g8jtbYxCG4f>IZ_sGjaCkdJOVV53Dl%ZYjYopN(sL z;+Yz%F)v+#9z2!EqfBzRcP_?O4aP4KHPq43J<1}hRTR@YpW+3sm935iSnwuck1>dM zghE>uoaw3`w@0Ke{?c}%bf(|{59tRmfxk<`dB z4}t;N5dFlwyBtcolQI?6>Lr#-B%CQ@ix#^N*~JG1v{_{RL1rxu6nFFz3&TmR9Pm42 z7yE4U_uK((PH73q=A9ZOp69{|6CL*CDJh>E+uku~Iuq9+GDxH1oy##;GU~e1hr4Rb z#o(jYJ|*CC^1tRJ!+#IG{7?{X++k-Qf>(7JU2C~!_reO(=f7|sg;+YUlPu=cS3_RF zI7&er=A<KG(um#3$lt=p>=q@{F=2+ z&+crGrAu2Y*%Al$T*ozB!iL+BMosv5gY*HXg8kIl89T$x6aS5E8Z@fZHqv=rr)4&% zKt*KRX~st@63lQih1jN_rPi8ScsrBx@hv+)31m1w(Pe|a$v^p!MaO#GWk_~FtU(=X zX+5Bu!sy@6DzBR7bKA+$>uG?`BmBCDSie@B$VunHH_KFF{VQ>Uf;DYPwP^m-$6*;C zH%BUMa3n`Oy2~vA;1o2FwQGe(e(-(o?8!h!DVJg~S8eje)CjTrxrxMrz{iWAD?IPy z(>w+A>%B*%CQugiEIov9w45UD&ORz%LFP|F#f-#Yh4Q763 ztTWKmWT!Jj79JR8v^NrkUfETai5|0mP2dC-{*XEpN-^GWAkoBwxKGiyQD;rt?aA7c zwR(vpNm!ZUO?~`)yOQv?MEcyFj*d!jRPu^>H_}-Y za9iy*_Qu{yPJ4_D<<>Ojc2tYmjp7saER0AvBUNQXVJ*YujLL8s)oabp9zZS%B=S2G;7`^&az;vvKtL3#{#cYDYe}2h9rVT(=(H za-t#pHC4l_)4mAEBH?fu)_d>ZE-kLixZqGLt*KMt^lQsGq_+zM;V_8KoBiC)F`1Bv z5*C&Xwf5^$KTp&ZS>nV9S4AM<$USkt!-Fb9Eq^l^0N{zJS)c!v9QyuE7Tn9)fVgZc zl5jb7hfOKt;{`|M0Tr?ot|kiBkWew?`xlpV+#&hYbC>MgCy2r<>4n!9oSr1Alt)}S zr8BnPmZZn&cfwah;n#9jg{JZjT6GTDmKW{ygNGB!9sj(psTR*V`Ng*zGA9Us;zb0T z_iOzX6rz*crb*iYTIhq9Q4RV1YD9rzj;LC@S3oBHbN|sFa9G zmx_Q$cY`3E(%o<)-QG0cwQxW0^S)o5`Ocg(XAU!a&uli_{A0y+{c16OYqUOkkzO;& z+lvchLC@`UA#(Ne){jWSVlIuMYb=Cn0@RzE+P#CyJ-W4dbiprlZ?N6lH=s@a(eob{ zm3QNeqO|UqzEap~@A(8Fp6~)6rLdi)ql&5g`S)B3K6wa>SrT1ddUI_4Yl%QOBOA+; zdKcL+UK{!k)2VcGB}x6;Mk)vE=P_luFR!MzJef^q?0sH$;KvT* zcvxOr8&N-DvjD!ngA5m=9_M(DvP4!@DW3nS^G1@n0y8LMc$7LTgf?lOC ztn`?332$IH@jb)gBEJ*YU#c+{Xe*2bBu;0-E|9`zXxZR9`X7@*TRFHuBHL*C-G}Jg z_v}R~#>{*u`NOBR!)q3rh4F%uj3cphIn82=9vD?g zZ40WG6%7$97YRD1+^UNWjqJ@CbAct%uv3%6+ro7DRxX32 zu+Y0YoY8Lq@*`8{FXn%y-zqYbsJiX}Ua@7sKtSYN=C)%EeX`A29Oyb`xILyCf@{{H z>1c`(F0OL5FhCP%vSAzm_R*WyLqefzL{2Z8)%3YY@2b)WT;Xd$>(aSB>LJ$njCj^XrXP|MP-K8T>50MRv*jeaX;BtpT3$ zJ}OEMbkkcuY4>q{Kmi-T35d%2?=~+eQ>{AWc_|GvoshZ$p-ht^w4B%1!958bHDsi&8 zoDQ&kigtMn04tP#U)Cz1TNA_{n@0f>VCDHXN^os`acJOo%eIM<*k}4l>Pu6RKhJcB z_ZnwYTzo{TJ;p$<{9=9G7KzSLqoPW_0C(lOJ(CFNPRnJt`uA6BHczIRlOEOyJXi5Z zZ|Ji>Pv-n&xu=43ueQ4{zj2(ra^(v9LHG{kQ!2g(uyq#nniD`@wY*8L)1PBp!*>pZ zYH*cWw+<0bS|`FA54&}6l?_lCkK>!e&d?0q;S9j@qWhb1wVwhEA14Qc%^@4m2u@DU zroO|!jjOEq89+9qp;(!y?MPBeyi5L%kwzV!3AFUb0F2}`+PVS~Wk7|v9G7*v)3vot z*(;DO23$5VOATmGD|AM!E~vlFOI9S!i)OAtH?27H;v_;9tqp8W)%>RNJUhCxR6Y$& zx`BK1yE&FK(5JGk{Rn{j0Kh)-1F8|e`UB2a01g@Gsggfd96Q84vs6it3bP7=qTQY5xfcDB~EXFRb0@>xBHi4)wt03lI}uG z*ngR*S#KVmy=UYdO(XPgK42ndG1Wi+CeG`hN6fcQzy9fnI+A~c#Xv&j2so7^o9odZq^IGr7?!(p=Ourk#^!(DnK9EpdfB0>n}|Ny} zz1tT#-cXR}!Zg8h3U0tty$o(-#&+CRzzQ%>>eJw9GmrT0YBpF91nAfMkPyR`!-kRr zULU*D>JtDU&7jo*Ew^;0Z}DeVi+_LW!>PXjdR@Bwf57$sMfLrk8pucepmm;g93LMK zTWQm6^>5;?s;$mkzR>yvEk5v7=z7CUtm5ii2d~OX9RDMA6&D3 zlW-Cof5b-bF><}h&6*y1`>JaBCzx`3eYU3TQWB}b4w;mL9=U5Z)n|FFcud~*_bqJi zCOW<^a|k|uZ$LUiRi&pYoLQ#1@ZI&phe%KV>Mo69#;-lBzK=-Wi*NfVu&s| zYX|{gFy%8K&cEB|`gq=k>0O!6&{=(_KS^zOtUSLaUdyJx6tZb_or_(_i`%c~-H}ng zA6eI@GZ}5qucS8_buWoaJL&TfEdqdvw| zzSVkkS?WZOS2K&b1unT>%ExA%8)SGDS&VaMZD64PoLV^xN84k*m})Ce3*M|@^QrE$ z94aQq&-70l3Vf(;vB)00acB~NC;0u2*pUp|3n0+sQd#*EL9a=>%B7^bGAjoagtw2v zHPRd&OJx>M9BgYq#2KxE?jv1nSB&CmaX6)&Iny$Bs)QbR+Em--{zIA+Mm+NI#8r7i z!xi$4_Jc~n>@V-hlJjIDTgQ{eGUZ&xRWYI@eqC;yv@c&t;WkrlXyBeaNxnaBagv+- zZ&7@zX-UoaH-@kPk_z|5j~NeT zhBX%us8#TDSNzo&E7PG*`Cb6)be3r!XM$X~IP5QBJZ4m&pqc{D@lA44-s|_GbilPa z=scH-e1=hGV1lLvS<(pr-eM1NiOKJX$&x1R0S%kvTk3ki#*mX)3V^pA?2^%Vl@CJ8 zG&BgJfyDvsBN0G?EvuFYGTs?Ecpn;SbU5V;zayH=8V_xbPzOAL3c_@_B%IcL^9X^q zRiXKiKw9yl03vPx$zvpuW$jF-P5BP~4&7mN&PV7wFATUWv<+n!}o z%QLNqDrXavhwy+;XB)Nt`8M)Mp(jl0H?jYu(We0 z@Y`UBz@}XiL<=TjAim5e5pnm>5LsF@bm18wqCxavH~Xw2@)m>L=L?Vt^Mi#Sko^nN z0{y)S$(`i{!G{8nA@Z3?0XT@5iN;ViEW-pUDE0L9oq*O31L6h736cyCI*hoXX9c!V zD`3W!!QsDFez4aXwg~@b13TAbx4H#)`#$grtMJrUpva2;^-B>bNNMo>0ev;*O|CTx zJF?LJ=0uc`(>YI2lj!N`6<{JvK*A=73P3RDT?3aNYyngoB0KZhhz*2Uf=DS)O7c4{ zUjoJ*+416mJkBxhcm>xKI$&Ph9M&=g=EqS`S^fkij)>d#a=OQ!9}U3VPNnb|Ta04e z5KGkz?966Q`F=IL9)2(~0rEWo1NIsihQWXp4n<~1jpSF25Z?(#=1hA$ zA<&B8Kvw`B zhDi)2%W6V0a0;#DN&bg57jArFB9Oxh04|~;aKsIJ5rBBkh;Dp*{A~DPDD{P3;I*~2 zLD_fY7w|GLd1M|x4w3p*VE|4_AoJR@jTA1Yy6m=hbNncex?jsW;c0`$31x7EU8-UOsC4EIl9 z7XYg#bETubI79jm3nRD7#?05_?#i4vl+TW;qL3q%>Lk0 zh`gS3#y~Y4Xs-IFXVC;h0VGs6pNau%0>iCRFZ12|_u`I@1+~tdX_^CZG%$368l;cl zeMsqAsj9A~^dKzfAl)?lEak4DLA{ckE$4kBBEWk%ME*|NT@(@kAqt2N24a*-|2XOZ zao}?zQGx=vtU(D-0wlmG?>Th+T&$mQE`DgMKLDpCj<%xZ9C-G zmqudYcfu&S4BtWY05~bOpq_`t(zneU(JLd65QMzD?N{Eve;>xKi;MUw2!+z;#Gr7H{{)A?in5YRbyzH%;ue4}95J=NNNvhFMuues^HvL;hzf7vhfTq{KD zh3aNxCNVJ;2{GCJpKd`PVfeNBJ$u4Dc`6>ktiVm7=b1f`Ypw?Rx`RStiZpgU=UewS zTuAHxCpF$_&>I|TKY(ch_0zvp2an7_*)y3pU_Jq=0Yk7cV&-M@fS8Bn_=onvwlb^& zhAnAXzUt}z{zT5}zi$D;3%L*f;2E*0XYBg|J8XSfRlD1cyK+cagJ z=GYT@PU zA}L-f&D~jNU6j_Z9O!O&ms`dKA1 zF{}0~+AKXGO!k)ZIj*uQG2DX`1w*V zJ8}LZWowo7<|91PJCmz@nfmyEXP})JG%mm(KVyAxRWoT0nd!#;?ouh%={3J*R9Oo9 zZ9j3by7y#TOf}9M!}sb~uV3nVKG(d>$Xz<33ST)YXN)EFxgZ*>zbADBEKrFk@ z@6@#hvWEp%+Nzu>6DvyG%*%h?tqJLTK}J22a*1)sX5b9ZJ0AxXnXT_%LY41)kASKV z`rF3R;-BgkrQRJgPoTcaF(2xW(liLp(*ocGnE3lc7vafG`aYI9N+8AmQL85HIJ8BJ zL1*rLkfb>&5jHY8+V<#@>7|u4&s(8p@wOxkMAoK5hPLtF-mxB+=+5+X+JVl8>Y!D? zxp&U?^H`82!_VJ5Xf(i=HOCEOu_Hh3lB^9`@~@a0C|0UdrP0b(zI^&Xq=eo4_SrG{ zj3PIGDx1&4FJEdq{i?e5II#N=K7wDKihqCne5G+agO|Lv%W#d+MdA6CzT!CQyXgfw zcg{)?2kG8pN@iyw?>~ROq3IK|$&7q=+fT_ebT@_C;&iFt^@k!VOZw@9ti__;fEW@n)C zYT>@U-i3JfRyS9C8%kIW(SqoAh_~!X%uS8{2X}0!nmiFcMnHel*Pwg(cGG6Y$!r4r zyG?2rTm4x$d;=JK}G(pLPY zA)Qrv0?uQ-SVq^5+c1q{ZSl^gREdx@e!l+U>4UH(HK|s+8!Gp%$-Pi!W435B;9yH1 zaFuBI@xMe}`F^mBj9HEXHX;xW*a)!FkgmWhe`pA`Hc09`f|m{ZVUx7q)z8b#c*7QX z5?@QsJKWYBb`1|KOxeojtq%T&cb^=8)Xqw*b>80T&Ef;s9o)Z9<7}bO4KqT#z{S&} zxrJqF3~wH$jK};6<^4AR{2f%S@HdZ+PEY^s%FZ&?qt3%nbi`>Rl*lrU9;7?vJ{8$ zZ&Wx0`@-f7BP%~u!Y74^dWe8z;xZc>u_wfjXkYyS%MLxgccn*TLCe8JaF6WC8&R!# z6LaayUcmLCt9{|+g#frm5gjwkf3iDfs%C84K@<|mImIF>Iui9T zMLv?J4U|?XkCUaGwX_bKWYF`_v9JU{1lxmDHEy+h^JaJkPP0jgC}s@xj;M0hEV4av z2^te9kf{8sD*;z+1!NQH5`%(*Xx1##k&oaqR4cG>1wqRNs;U#-sn>=bCr%+H6x36a z;5tPL8yt9=B_Mu$nEW^ZzTK^IyvMp2;{sJ^R(7^As6T`307^WYbeCDasDtj{vX*MN z!fRIfjDiT9kGI)BgU$_75)Os2f!`m9(Xq3?gT8qcLeeNJaLJ*n2RKk3%GMJnPeKIL zaGip@ya6Q3`O^qTKvR`rJ#`o9)4HBbfhG~@=Ai!s+@%((x1`6ZR8W30!mXlD1ycjk z-qiS0{)9j^sdD!{^gQ@LI}7FYHTVIj?%&b~H$Vr9V$Jv<=md!)LBa%CNjB@h@1p`1 zLV2Lykz^l(s!tH47LS_G(i2G_Q`LD7VVbWEOy?e2YB{q%Cm>+>29laOQuDfZ zbf`;4ogYBup~lOVP}-r=83)Q`DUk6<%gU}X*fg@_Qy4%C{q0dQ86Yt@qwpf)U`W_5 z4pI^ntD?%Y^ZTR5uriQ|4O-P+teFN_9iq`G=k1Gw$S5d&w`rnw@hZIB-d>fBf; znD(`ABH;Kzmv@a&U2MM@jM#(ba%~T9vxR+InTnT(Sw?k_KB;CKmaH}~2`&FPilSN! zs9v`1HJ@Q*WW<=luM3I93cG|ryYntIv=+b$jf8GoB;>2Wr+or*4)w>VS-uP!J(N*0 zVFL=oL%&+4j$8%!fAYJ| z#Z4UIhwVDJhq#Fw5Gd;70Xxt}i$NnwA?%#PpDc$whVUyt;kgO7D@5_>=7@p0P9HUr zlw38xdF~@U^ZZD8ER2ZD&F1+ouIL*x@NVA}u;cR!`-!g*L zZA{ zKmlm8tpFg)e}EPiYkrN~Bi@Id5KEO|3*=?J08cSFX!-oT*&%ue2&QU?{(EA3;f;?E z9lUBNi4iFWDpV8|h@b<21B^hvC*t7r8=fstY8{cseGElIX^@pKFVYznI=>ZD{+Orjp5DD3|rPmP_BM z#`~qG4t?=%$jE`Vc0`Xoo0Ap6Y~Ygzl`3DF!h<`<4qB!H7VgW%aGpt@aKZGd@OaQ| z-NtZkJKH3@S!zm`N@4gnxF4pCeTw-vWx%dQK699WBNr<&bo0?xLvu6tKOq-P|Ljjr zgx)RwZu?F#9?v+j2igv}WtuhfQifTWZ-s+hW0SBk=xgi;FKl3|1*^9-_>u(oiow(!JU{E=Tjz5PW;WWVQK zmL(bCesB}{^~g3sQ#L{)GWY#4Dgj#)@r$BAbZbLyMwXXleI%_>t;wGlZ!i6^V}YP%{>%FEZ?4oioc>4zs0<;Y=O4|K5;|E?!521LuKApQK;G zw9`J&AQ!cN4qj!Hh#eI?oiShUMVfGgtf1baiwXoq)sweK5rzc-j zz7y+KJ}}^IBPvd1so-Y0xkTa>{Lp@>R8eFbizVq`2pg5;7+S*AP$7F%?Gxg(8o7B?3)y6b+- z_{UUUWFEumVm^kCug}4I{u`NRy!|adZyZ_niV8B^XZDy^E$?)9VyHdAHW1G>lupck zEqjzhM{iT`uO0x^R zf#*k~cK)T&N`Bl$z|gC-iS<}Jxj-U+qMDA(lAL?6Txb?bfm_xmIm5I9sWRmbZnKogQfA3X z1=6+dSF69ib8#Hu$#c`!ACUa*Vv{pX#^*s75fp2Lzx!F>AEqp7IxnC+zB1$?nuS3B zyaN`pt~Tbw?R(qi4@9-acP|rWat9cUpJBrJ9IcmuNuM5aP^z!1r<>l??e>VU{=9?p zZgqyhp0<^4Zike4|0aH8bE#k1wJc4GZ8hu7C&st>qSob04l2xZzKB*`#8P%$eLdaJkgDyarm2$bW5>m|&C5j1v8LW*4;sZ! zSn4d9ThDtxfAP$0M)(rN!0X0MS`J(DDSnxnSyI!xY%0^ba*&n$xBRlhyR(eu6raF9h$nThVN~wCr zK`^77lk>^E?W5tY9%G&X+;J3pn+*Nb+V?Z$4+%$4UlSR+zZ2q-@BgqjZ>=)82Ix&5 zQzCyO`)*~0Xgpt%-0yKPh$ZDD=mM>;^2Hf&Ba2M8- zSoybCcDG213;Fe=ZjsO_1B3tHZ^r^qI0MXvIb_q5?nK=)iZ3)IBp~3tu9u0P2w42}%YdGSW$q%WZ(;L~rUrxaCRoFtWC zIp2Ubv9-M8XZUsKi4U~Z3A9>N@cxQ_PrRY7t9q?(JeN5Nd4lo98oVk0t{omuk^L2- zm*+9@0;d?7QgC7RKdEc||MBjTEBs&T2L2y#?ga~eT5c8x=y7nLOphxx$r6wafw$fx zVI6GI3(S6M%%q*!)aYr_TFiWi3Wk$OQHJpT(g@h$H$3v?g&Z6f2{;BPr=@N}Sx)0Io8R zYEv)NI)Q=>Tvdwm0w6D;m=gs}7&x1T&skI@zkqfr+il1G%a<>omc^|$m&YEtm)>|F z;d%g`oo0wqH2(GJIe&6flikBAYZ^g&e+a__L6vP_k*bML`7^e!mqzIjEuFQ12&KLQWw?PQO43z{d6wtp7;&7x09 zfe8VwIyejHk=~5wPzXC~3B= z3qEUMFS^vE5_3d-`Yic$w;3un7d5X4sj;7zBQF;q91tXLIKN=6sJJLz7HYt5SH1jg zBGMaAlS{*3Fr>n*?3pGVeJC(3I=O0n`4+MKganZuHPzLbkTeT&sHZ|c9K7W=9^w`iV5c0qz^Fb*KTzLfXEhQhMT~%m;-xYZ z)(0u$Z$FF|Ea79*j$`Z?jh$rm+`o?tBn)B;T_~{$SQxT#%IR8?*i&mfBk&Hb{3F*I z^k4*6r^RhKF5AmRG=}Aer|*n(nL2DX{W#=uxVNsyJtR}K8%qWTP4Q|vs;vBxf98jopW?OXR z?J-NMjePIUw44>7X`rzd0Xhs?*X8H6*5`Qxc&AD|G-s{`Z~Rd=VVx*j7@V^m(Za0x zZ#h!Jj8;`ul?cIS+v$wzDbE;)G}&v`?9wo}yR&CuAix{m>Xkb9b`H;c+7!E8Vy1-6 z8V*S<*b2>wW_#83tXeOVB33pc-|3*BJF{SII{h<%u$N}@Ggb0iE(0;(IK29rb2;gl z;gGI3%?HSRy#zcEPF9)7ohGg{)^i`;3=5>!_2N@D8fB!)>V}_ed%_&s8c}I}98M$uaJLh2$8W7E!ems|T`FRpu|?!ti~L^W-u$W(v$ zG1eI90f@K7W&m52E@}#B&NcW5a8zc2TP?1B1iE3>oTq2O4rl-;9H4BFx>MD_qauPD zzRBIL5J;- zJk$Y$S{5{p87j;~R5#y%+#uBX16b6N?-eEwCPgnC`xAtz&n8Lp_guTcxD|N)Q(<8t z7^nei)1__%$b-$gO+q)>53UJC$O8%ayUleU*mh>MoHvLPhFb?}y#fv#80B!20cxlQ z;;{YgMj+-HG#E8_GoNmG3B@oQ07ezozh*3$)1g=93SgXrMF$wdp~);Z){!AuNoL?TQy&K&j=_#m^GP8|`c za1SPivHX@?8gy?}Z_d_Of4nI#bK}`T$W(q_%0SxJk##UMQr5oS;UvXgaRb&Cy4A#9 zw?+5U!|HPRf@@3VbA}i5m)a4F40q;d>e1u$%g%iX9GHNra3!dMEj!3q1Ar4yJ$dClS{PL6-g`_H*<6!l&{zNrvwzNejAr}RFNg>4d0>2$;N__+BD!RmS2*JQ71o7| z@7!rC#itf|_U!pD{d4;1Y`X^=`6L4jUboXEPavL2I6}tz7LMmH^=sz_>NZb^b%te< z7wNsUyH-MT6p{+X2xz=82>8^G_ETrIAXfm> z>sO>WKv~#Y4)PH{w3wi1OGrsz#%^|nz z2*I?{dvF(nD)jNQJ=V0yOr-GcC5Gbw*z9(;Zh-9-a@8pYbYX3WPKM&vaTndt7`VIe z>n!)-O09@sA4PQoC!Q7qhQjpMrz0ReVt?xgGA+m%&^sm$>3r31&R+K4h3Q~9+sPhnAtel<~$5U*Oy+WART>O`d(n!8EtJd4rX;g->L$aGEACvI&TLc32c*)$4F1 z6>Ib|9aD^q(Q*JA7OFscmU2x53)u2aL?a75$~EWUT-FUu4v&LoO*|+7mfB7&fP#v} znudno`m0nA5f&@}_v3s^oz^h${dkXqeVXa|;>98)`#RT3UKXXVG2QJqqh};CoNvzJ zSY!`2aY|$CpbfFhpZS&T(v3gD+y{>pYeodJ{lXKz{IO7VH)$+dxVtTX05(%pElRO> zH}z9H;3OJp2z(uBv~RLzQX|7^ScB&-9lpy%<+#(s0g zcU2?uig=lBigs13$IXmyKOZjlD6b{9wwgC@J}PKGuQK@x>cN+=+)>4HadClCa80_H zVo`Jumc*|hfTOv;^FE|;Z@hlvvIl^&A&zZ2!(@=W41>dSKnYt=z!#X{f4B>u3Mo8o z(NuA&r#fv*&#&@=P)`Zen?D+YnS{;n$ijprgJ=b%`PGj(d#+Wd{!LK<>-=#z@-&Jwy0KYfBeHcYq>|P8>d@0I>$g_9 z?Uv<1EKF-WLy#h>P7rBYoL37kj^A}NqsN?~+D)O9aUs9uf_7rkw>|Zck^U@m!L=2! zv7EFezkJuh7d2^XyKuP69T?5BMD*e12Wk50UpcE&O<>C2C3-(T<#zgTY)|Zy%2DUj>ZLwa2k23iwX@! zkVeGLe)P^a{JjVTC!Dhr*G6Om`}|drHp`W%%1?Y4T61h7YdlNnT#}ffVM6T(jBN1e zpKiRK*mQXkSatrPc zN zHlN?_Xj1>My@c(Lt0OTpDCmLrg&6SL2;KCzH(vx$f)MZI@TBDO{JGYy z*Et0^?bXY99-0kVFbAE*H^Bb?>-!@nBpZVY4dvXxlE9D|Yc8>DPxN5(PG@P?tGT2; zn0`<^zB~H_()IBZ6=Hc_Fi+(klv-URftU^J@W3`-jTm1}NZ>+coXFNBNqX(q@Hq-@ z?Xc@tt{|-!;ZdFli~e%e-?@#qebSch-Hb`SWV+OJxjOX? z|L=FX&Sjyj#Mithn6K>ZR8j>S3!Dwo>m`(u<`w1NhOipp7^-4z(=fIws9}^7u0=0u zkdrL(X0nwmcpzo*&n7%4$~PN$ z)ojSr@n=l}nlK0z;`Z2T)ILGX_Vi~D_@|(Fs0N9itl8e^{-3w20QYbL8ldljT6+K$ zoNV2!xFdnpO}ajoj(*f#F4v>hh| z(xj;8r`4?^+8p9JY$J;p*04X+_tI=02GR^iF2XQSdZ7FoDvYwdKk9MX9;*3C`8IF2 zC@hsjMtrmOy9;>}+2(G>ujK8q8KU~LyT@!gAb(0T&on&KAK-`|&vJotP5;PP=MF7R z;1uIRZ(P`hW$sHWT9dH{g|TykUW)P27i9Y_$Za%N(K8;&Z^xJo$1YCa4h7*sa46sv zC#w6ecsnhod%9YaaI{uk@M3*6d`{NZxxzC5#FHQ^B3bhMdY!P*qic`8Nazj+kM6mk zMk9)~q3-I=__6IwxmhdSJ`9QaD^}vqM6S=sGG7yD*{HG)Rr#rioDITFi@bo{FGk0Z zq?fGvu1wZ#TwZte8%#$DfoIX*?Pp@|9}0MwJXLMyjmwbldh2T-8Tm?L+T z2F5{Uj}!!8AMnL5T6r)-zaOK-@>MQ4kXbhQqYF=78ek z-bbUWp~NxHb7lm+ve`{0vq!zx-J$xVk?+-XYa9w~+c`qs)llJhMZYNF~PXuOUYqL2+Pi}qo5XQlmqp3;W2?T<*OVo7V2_j z+>_LVmqp{X>pWo=8>L8tbF?oMigkn_8ZUE+;2Jiil%hku-3?}7YtCe-zT z^u)ltfdR<$_)PLL`U1d|;KgQjlDSyhDTU&nWqfQUzuy@7?-Mp=|Az2cuXO&0>*oJF zu??;ezW#HP2?CTKc=ws|z&yAMrG-kqc{Kmd7X*<4umPD#O{&4|umhnJkx*Wwk>(?f zfzJi{y0G$~mK+7|WH)p{v4MigzW z+?(S6{lW4K&h$wC#ICxF^>})ml{5}qkmNpov31i2Uk&@i3(-Z^1#fSJo8S5ByUofK zc>DUQ6GK8-(BniyZVZvRbaX_K+lK}TsjuKmdIC2NU_#sZ?o!Gf7*=rVWbn%LFj3#e zlP91b0r5~#ulXrV!b+N9#UW^E=kW!>;Q-kqYXQvOb;r9U$O1HF<#A);I{=x5HCaUw zF|6P@20ujy6eaqqgFVe|G>grhng_A}Kz;WuWE%P4bKp~y9jf&LQW=0@EXIO97i7L2AG zrGuLKDL(B#W*@AtYGtW%5oHxzDaxbr7Nff&k85+Pje|ar)T#1QXm8;}>Ca9#XI{qFtzx>^V~~E$)uYK*({8<5X#a|p7b+qu(J22H8dgL4J& zo%VY%Yjw4qq(9yEakzRe#wk?ZN^sx)P|Nw1TIBXnqQiv_Q=3T1?>vVFw@=W_B3l;J zRGWZF0a(L=aDOCjfh88+Z-&E?7P7R0&??C93m^`haMEdP5jR*LykR?{+DM3xM?r5m zFr4MZ#c`pHegbnrl4~8Z50S?Qez^%~4TN}w^V=lU;&sB^qBGH8-`jY~xd*-}*eB|s zG==5h6RbV+1Np7rj9}f8F!#p+9T_wty3lc=qrbl87Z})TZY}|E30gD+JkmH#o(&}I z_fvInKoNPcH9;?dL%^oi(cTU)Cp!im&UhHSg{Q9j<0;zH8oA;30wdpnRBQ#J^s*tW zh<*)laD?0e_7i+nHbe(9!&eyx4^I<=u8tFGSb z959U0KCN#Kdxk5VOCVWh!jFtiaMMV64VDS}+)w}Od8=AD`gSpCWk;TZ^-L!}6Ukl_)+$(fCOZ|Cg=flLyG z4BeHmRs<*pf!{x17w92$vR+4&+w}gBg0!?wOB6p5jZhe}hQY&bFZb|(E1a-%(;OJu za|E9r8X(}FBloMeZX3v8b`=&4o)WU*xI5s11I@=WA3mFUUP|4{l3FYj8t15}2*|fe ztzf478G%9wfbF16qQ&WZu6!-cMs{~p`wKrY zqv1L3$A+RX;n`I0AD23Y&z`0(yy&=;A*G0ONr3a+&%oRSY%W>I$i;e}#Qt*^DNp>! zyKThBS<2l~o@+F4v{`l8Ag{Jr}ljK5+IoawM$^&vXRUZf|~pC}UOYTc>t?+tVEWSZ(JSc%u}lplv#^^?Wo zpl%>Dz!E<4HLXdrs6)(^>3OHXqWEWjeNM2yY(gUuE@1cFYONPMT!u|yV^(da&{?+W zxFx{YX2TV3Fu8uO4guz}PS=%fPC?Hs9!UjI#X>|O> z@U#NAq$DKT;Ik$q*Ysfj{4?LD-${s~;{qc+SoMaQ!E*miDe-OQFhGuwYg-u>I4AiO zJ{G40>~Dw{(%10+i>RG|Y-;1L-Y;KT0x>}mJ5*@SfI`@8$O;x>5k{mx)+R-}p>hDr z1~795lf+av>rT9M6vRLsT8rSqRo1jip4ECQ-&!?QR4An0QgdKRA-5l*EOn{Nf%ijG z6rDRDWbA{!XW9sw*((2Vf1Mb!rfb7BO3b)Qbi;%{{lZ<;6(M_X2C}@6|%CZ*aqjALW=kWQBMdE zy)Zorrm+qnVK4Vip%p03-!FeDz)65>ZQsw||7Hs@#wL+%r*2np1#F6(&pH|O1(b&t zp9^Panc`y&th#0d4jgw_fm@rJ@o_)Z`$x=1tJiYSL>AY=kl0-MjwKFGk9XHsnITX5 zRBD<9rI8b2{xfQ!qiS6;f=+LhoK9oRg%*Qo&r<)8yol6-___e`kh+-@u zfi2S&z?e$IY$uJU&XEo>bLJT5h*`Cz|B4C*xJ?wDh4qdhD-hOn2$Z<|25R;PPdtty z9W8|IEklN^qglA;z&?lQf&-rDV9!Wtk|ne@L0W?x(=EA+kSZV%!5LYoJxL4wEtq{K ze00psS)pA814&U`fP!(;Ksz~*MA)JO02y?tkHU_?Zr-lRdkfkK4A2%(>;dHitEZdq z5V>L5d}J1b@g5c;-EH$N7bbwwvDsYKb%*c}y`J(fVLgRc(k7}&srwSUYLTS)dkVTq$wo z7^v8@u^Pd{+T3#TdrLG~rDUe+?r_DS#G#-wU#Q@?$$Dg&-{|I2@8<0`gJ(S%3ANKv z+0*>*KPcQAb~+tf$M$|Hzy<5Qn0}F=z5P;_IZg#P;R`${{isF46^b+3=Yqe>94c<# zy?d+A9Xj#|b5lh4v-R<|ts}$KuV?B#NtkCYOQ&SKA1SOli*wMBcDsC_l z+*KR#KD*%62XlvK?t)^f2B*>{ld(0s^Q$XcmF)Dmr(!7!pXxBQNp;9kY0j^hXP*4)<{wt86n6xvBOxXK3O^~ z(H5Qf4(HY&atKWBAHW2X4GitX$-LyNFA7hl&LwH6n#;lLxCLb~3dhly|6!4q=!r?R zUEJFXyeL=$oGvy|1I_2W&Hyj)<_n@Uvk6(=fUNn*)pl4R;lu&og~y|2dzuO% zeiI86so&N-nH2NkWOmr|Z$l>0D1H9txu~hM96J_2U!m5Ic?XD%f#QK_ibT`Hy69I5Sm(J9IU(co#FuHlZ7ivxAP$?iu8a3`OoJSWzEua)(Jl=Z4# zj_JQ1|DA_po5vQt&|#(st)le$_~<5gtEzsC^#?8;C{aoCM+~x_TwYtomN*8(|z~- z2v$SdLPtTVG(h2c%~ZM$aDWzuSOx{BBL;B@TReqg(B z`*5H83wVVEEva*!JI(IJuekemkNY4m(7%K z@qUdkpXH6#ip#OoEZpp82I#^R%0$g_(xe;fr=g<4=sl458(9%9?2^}(GT5cy0JZm2 zdrIETG(R|N!PB`IP1x*q&1R-ERm3 z1YtxXcseCrbInj9gT;m81%BFvFi`h94KhISlL=HjoFMkhG#^IpN4{$>c+nxuFq>QMU7(b3T~ zFPE;^?|#g)Xz;7n8v~jdg;v+N9w(-0`tkheXG;$_Q2BjW*wabKh$yZKp-+20t1*!X zC|F`|#jQrv4e&vQ5oAylX0{0#OH469i@=GR(gXGzwqzj;0w5okjr&lXXEkH>mx_eXhr_mrWb+lf$q85HKFL>Osee zfI%qdL2*X4W{LLzYJH_~lgkHdd&YsvVrl&@5ekcDcVQ@eQ_80L;L!JMc65 zqWIl~RX9AD-J+J~%Z>^DGVFk1J;bZ+XHGRZ#v{iu1jaWu{>w$7dguFz#O%s@#5DhQ z^I0iiAJ?c;dH%e={Ok%6Uj7f!Nq3Qkce3HjKZ?iy-Ejm3PY#ub1rENazu}@>(t^aE zcLrc5Is8hhyx|~5C+gSf(QHst^&;rS#P~H%eXAQ1+9lS;rw<%hxng?c%b|URFKN|B z!=5jOYTnlxE!Px%xLIz-j2&?h#Re8eW8Qw))nj$>xwoGYnUYnW@K2J`|-g`EOU2kjk`U;9ig!ZZRm?{bTsdZb{ZpOfc_j-|j|m ze2TPI_1h1R1te6HX7IOmM8<*(*|LArGD+Wq?`Jp9^ox948u0<~(P(FWitIg1a)j{A zK&80*wli7N_64v}$gL*ae8z1d<>!0^GE^1x>~|ZPHG`VSjZfp=+Y?h8S|nPaHq<)B zS$^-@)DtE(!GoW&t)EdBIX%|)AIpP3dSG?I3Gee1GlEu~L@K;qpH&)&n!YwvQ$MWm;oX_mAMAX~ zt&~jz)D7Pfr@0Qo@4qy4^XjXo#EBB36k9KF1OlRwB;VV5GkYtg7vIbsE2LkHGnUV} zur(d-tiH>V1@ zcIAxiNwEZBQNMaGacMV;(=zy%_yk_HJjs}ut85`VePJL^{9!o-D=X3AQPqH#Iz4Rh ztXv^FeZ(wMGQvg(4?7jo5>iRb!?WM>*3OcdW9-PWmyi@nUYFGerw$eqT}d!zfTgC+ zoI|C)*8DZ-{X=j*MR-GYJTo`lT=GFy{wnG%d?hO~sUcGF;7r)}~RMcbTq|V>oZUH7QpKv)2AgBGvr|%~Hg) z#~*}Y6Dun&5jIW>jy~bbt$5sBaqj0|UA}_2c%^rF;_-74p_P@_M=o&- zg%~RcHSQYeWkw`Ako;o+7)p(Kd8|f@c;jb^>Sf7Df^QUh!C0Ow@W}*#xpEfpYiW9DW&yB}=>X_29 zPIy%PDhzhggWN}AsPWfj`0DVB8bH|+Xp7Xk{DdHes z8C%K3Pno<^mokSX@l;km>GRSnsEA$SBtEmcBpLPskpya6)x2z)yq|T5Y%TX3JX*(H zyEJ`8uSgP`p~i4dE0gvrqkA|v{K~SRw=r}kEbl&xjK~|w^p4k318-#nzN4-+JqPF%0y58dsJUi)+_X{>hggxTocYY!24T^w)7|AN}T()}OU{A?Uf zRROk*C34<~|52Wp8v!jshK~2@eg#OXeCn)PLx<`W*duveE={y-x2(74EM8 z@W3eB8s6Gseg+w>g))KVMfT4Rz7Y{ty_npxI49G0Hf1%`H`{sohexrU?&Hsa49N%< z6=p)dDSKcXAM<V}$(G>qVUp?#_X?a)gFI z{Ek~JLOHQfCYzpb-!dVsW%cCqZeWU`mo_RmJomNht-iAr=JES_nH9%@{2E@tb}9$w zp)~pfywzjry*&&DuHsJfvo(i18@o_>czo-W+IS4-`scfyclZW4Cth$!9m;U*6vu2h z{`9zcotBi7S^Q~}-1W(;J)N*bcO`Q@~*kIGUQM^IErCYRmWY=SstF0 zJA+gL?%vQt0BtR{#s}1DUb77z}NR4mxW{ADo$PLo< z*w;VQ2h9F<(NrQl;TLN8-D;n^mu+>*tfTR1#Uh#6W45;AY9_2pxMU`;r7_r7apS*8 zV>R}Y9(0E{w#GoOi`M4W)Zt0<4-Oc!Kbt02!0mw+QWRnF|E1IVamccVlAPP@Z!#gr zv}Jlibcx5M!z*;{S^fLnYn3x+Ca6hR!xls6Pgmb`W37|*IU)7vaSSsV`Sj7zkFOq= z4F-=+tnTE75k2cVnVC@|m_8T6#mbd&d)dPO=~aQmYw6}^+D`i3Vd0*AnyH`1{Unj_ zDPgz_&+RPjki@5_iCD8vN8t(Qu9#mj2 z{^7au<>5S*GxU5~c4_J})9F-t33r9kvV(g_2FpaI{f!Aj|3JTl8=>42eSr5&hdzPCu>Qqf`KFS;CJW4)Gd-Lc?v=?c_#J#xW zOJP^`KTb`&SL3|h*N}}<|28q|lH_`SLw4|x(Zl({=3~)rT4bvO{ONq+Ib{C(%>2er z?ggCNzdGiXrmS(V&6Cr>`p)qRqcJFkPAx8m1L58o`7ON)Lw^N8vE}k4PYjb#yd6}@ zym2A6`}e8Wz8W1gDok2kco;VGvgV^P`#ztqZWhb_1MhxGzY8!EycoBk$o?tEnnqc^ zdDZS#<-S-zW<|Kr3nln~%V$TF=BZ*ZeM94vZXax$E5Z#N?>5sueJM`1IEv>ixgI>@ z&HjUa+$n$8wKDnm`wn5c+_ysHYuq?CHl`P=C5)PUuPa_8oZ3pb;p}rI!pe76$xux) z__y(LQ(az7bt0PE8kV^Cvo{XTZd%_NuZ1Q=Ouo+r$bVOT`ku4Bq!cQ=Pu>xcuq8~H zX<96(W=@C6HE7eB6W`*AC$O!chbR01Pmt83Dl!!3kFJXfGJEO$>p@8YP z*XJi160)TCOuCd>gY6FDnu2&vyuLSdGnZ~Xo+8;oSQp!N-x z**h$66f9PP@@);gcn?-9*`l={)O?!nSkuxlJN3>QX`RS5&X9VoxG%!aijtD zEulMOQqF5GD_4H=(Z6PPE_A=btRnm)ebv>I>tNN3WPDcwN9%j?Ois;kfp=PD*|M#_F58GiI{e93y!RGz@5TU0a`1?{- zYcUSWe_q>Z^Z&OG)?!U~KRDgob$4Sfa|=g&-M%#Xj`^AP*@ zV3oh$YFpsN#>8vj*bEjJ@Z=8Kq9^=dz|)-~V_g2Hg!=JgMO6p#tY5qL%JmDe0dL2azg%b4-112-iBNJ*2veEAZRjOEpDH1rAv zr9i+AnpOovw;yHJxHJIAT!uDS1Ox=FQ@u|>1P?OjEOF;2mzE+kGFVggprY;W?%vqa zf(_&hX*cxkmT1roFTuJ7^)@BQag;&Tm55P_42o6Fps@kz>*e3`QNynefU5$410fEb zRKDVmOMvpJ2AyvLLc%h~33+F?_;jO!GEge1e*1O?fNk!e-6#xtZ|I7ljaw_I6vd^a z@SFWi0?tSy5G4}Oq8R+6cbt9rBKgitoH`NR8b$no) z1vVV+5k8Z2M2r%rsb(BN(;*&PBV?D_3gkqgpma`O|0ML43I@a?3kQc9qcV()fyJMp zQ309(z*$aZ>2pI3_XIqg@i3DIzcz#u^1^qBn`ooW#$_%!GDd z#Nny{%b-q!4qi4;Bz&U54)a0=#ne0lf^gMioSdAHw$vPXaQ-}A_lFN<4xTMks4_De5rIGgnNMXh1#)k@$nrmk2V!Zm;uCUkSeQxU7Z}1 zWCZQ8*(Z`fV-v7DxFA)g2$DHv06Z9L@HOWwmJKO3^yv5nMQfzs*(?+b4a*5y?urf9 zKpwIA4Je0xTD&0&Hq632q)Ti^9K=eux3`=7qCh;v8yJVuqYBV4t*4&XGX`AE0y#1k z`RO2uISooasH6<2T$bVod5Cjd!@MclkA6kC_z>Rq1^Jfca6^uth(hfWe zf3Vx>(BN)$d%Jz5;YgtHlCG{Fzy?r*B5rnS*B^tszckUqE zt@Qn6zCzx6tCLFTmw;kD2h(G5^JWNeHwYa+=R(71NFhVn;2t7rxdX5sg8>3@X;l@C zLy>)qOEEq4*{$~eB?16hKaf@lE3mM#LXz<)1zGJfAFcZUL~gtR|At3M*i!4U;RF&x z4^PP$(bB_$io(LiZT#?oI6=U)87MnpP?a|^f&*>ON#$M5xPQ4qhyxpnIn&>^Nk)aKJ??Q^%yd6P7a=A3Wdy4BLrae^TD7)X60 zd&0-ZrwfAu1_M9`gir|cud0#+6-R$^Hnr0*K`?^iyOmTb>l+)+lZ{vhko`sF4NWbr zsg;!&AfiDFzT;r?6QRoildSvcA8#{ukl%Gh9>Rc_ikg~LQj(^!vJ%y`tM|&FO8^5@ z`P2Y=?$!lJ2qbrT3>pp(e}x>3P!>GTrzxfLDi0B;IJz2X3IrUL2teqcPfE)5Zf zk}?CW0T)d0gQe>2wn~OI$Hzy=4Om2*g zf%-%!=vSo#DFfLE5l%r`2NxhOU8&RGhFKA^mG77;iin^ ztkmxd0PrRX3e#4A*@0_s0kB2T9;DT?UcR&hJ{Pk8P-yo@%N@+E@?kfi*rp9{YY4ssAHE=#;GU=b+rKM^pmh$0s@W!rgcFsVO_STCSrDW7XRx(8*+nSb*8ct%uaT} z%FPzFhB}V`7Y|P}P>Y>*mQ4@VI+&&0SJV2~C%U>&LqR^bmA9^otOsvlQ|cBBHpluy zHQ5T>nUj-~-{Z#=n8tVS@JUEWz*Tq_N=n<97pzAE1N{vmWP+?~6nF;9U+c4=krb%P zN?yF-hlL$F&6d$hYunk`!6L+kMc>X_QX2qG zAggO?roi}2Yspf92N1WVJ$#(=&wiyloQMZ=TL^5l#^GTa&_LV$_E-mWu{cTk;~qa| zJJ<(zq_#uZ2Y3`LSOwEFGgFh37;w_wOgZX`SHw!BKxRo8!E!C4gP?ww0k|^M05et- zK**@43hJ(Fg(6R_sR`dBs?;GGYU*jIp-ji>GtHZ*N73V8@CXQ+K@yzpiGueo8H0p< zBl3*Q%*-iUfH`3wQ~1ytqMt_z7%ji-Y<3XHBnVda2LGSAYv2hj2k$z^pb5G-aoxnPuH|69v;tOUa;R zusT2_Tsf*f)yE_s*YW_T7yN9aL{r09KlAz_SKGqD%mdA@TGw%Qk{SlbVyD;9laSVgLXhXQ{2cU$kdhVPB6p*>NxD=swm9C#B1RPZR(Mn>d_!sr@ z9fF%epb&s2JArB}a_evhEkXDcePA-7ttTmnqqV~Sx(?Ocoh3k>nmaqsqu)bIWOfv< zBFjWgO})CdrUXEF=pr8gH;2A55kQ3FxUb}shwp%!WQJlqO>rg|T#yn8fDJeeV0ST3 z8~@7nXi+)2Q}XiiQUKYbB)|a&$$3T#`AlJ}cD&)U5W9d^g{|BSYXuD|HoJ>AG~guM z=?A&|1VM9ZW)1G;<>l4Q%~o)t=>FiQS%B{cD`~^(s}&qWJcIt`5?Wi2qw7Y75?V0Q z!ZMTtdUP1rXlNSC^z`Y|(y}ros0se~@naSA;z^;}4|GwyZXmKfDjTBSs8d zkrNaKWnqy6vk#eWz-R??UwSzSe%;B2zwYkt?p_CXr91|@*`=Vxi+l~# z7Exp`U{dV|LGYq|2iXTZ$^Femwr;!hsf7hRNl8hI;s{Q?A?M_zBo=Vq;3!qWwx;WH zkx@|v1Ei}3h7ssactk|4l@6mq0G(o#btk)9!wAfH>PO}kyD0SF-4 z>Rlm?vdYSKK(0iB`h^uZW^~^OpaRBUZb*)Ul{G{C%-x!?>K$P^WTcw;-oxpYcK=Of z$Bpg>t!2|Ue2Cg?5|$$Sjd3w>ChyBP&NBe(kDl;Do zCUe(MGLD`$C_J{v{yyTAMc9RvApDCe!Z|Sxd7f~OoG^0!5CI`4&$x_E(S!Q&T3mRq zES)wZ?&xWck)KISRJ?z+fOEj!fc9JZIN8f;eXLTuhc_jRVv`s0{%O48>2LV?0-Y!+ z4x3F5@pEaewAdcM1Jj&G^z7T+glZXXq9zSyn3Z!)tFKO^b%k2KV0^a2$*PW%IC8(a z^nOQkK~lN9c>Kv6iNe;WL5z*9Y#$P#$w?)OcE&d}5kw1|2 z5YDG?SfHdC5d8!^csOK6Mnb?6W_H<7HhnS-pMsFQ9zU_vmf*P-*4Rj(U$(}>WSf9h zEpBD2ea|cf<>-;?eTCc$Y`m@6Ki`+Jrwg|~_)a?Pnc+FA$ru+pmBSz+hmQlwOLM0! zyfDp|POv!LW0p$1GPPHA6+c)ehito_`Wx|PN4I}eT#e&dWsWd9b~;;mdI)6Wk-$4D8~7RIC}et)9tzPri-y(8EKUfx3e8-VaYMuWs9 z>q_$vVo%(rA$3nYGkM;G1ao@LfQVpBF5)raF&u5CccFug&a^{csEv$7s-w5n7|yVo z#a((c{*pL^*mY>!kAxy}+JYpPB{<&jT|+_1!hFU2U z-V<)Y-CYE)Qd=fx)80PdHVPE-bY~d3`Z?o6-|*B!Z?41cSyAD(-P`wGw$!O_HWX(V>3BXsGm9 ztm&#?Y_{sF>iW4%UwU3Sb{s`nqlOh@S?6yZdb-EB46qjSxcyXIqzLdyDH5ODkLS-4 z8aQ|xa_?2+@g5)9>vw03&&XqnIiD+AnB4XIF?rgJqSF|PWVn#l_#{K?T8TT~1>5>mQDf91v%Ou1%{K8KhG(1re z@ZC)6!zwLHz*^MEqS*<<)eTdgn!wE^(98X`6X^X7J2$&VlwYQ4P}To64V9GF$<>7! zlf1`z|MU%(i?)r^Rjlh-7?h^SnLgRyT|jC*?J1RR%*=gv+p#^BH>vZT-t?z!t}_~@ z{W*d7x|wIcl;b>6P@B$yKV6Us-Wr{0W5^R4H8y48m5nI6Mv>P2PD$H{AbL4Qoda{l zj`KWuOzsDK-8pMk=N;-F+$*M!7llVCa`@zzOW4vB88#O!ubCKU5_@jF4zM&n{veoF zx`MI<#jEhx{9zaqbgXdl#}K|E=jLMG(O29L<0fI;cN+V91Ift99)@*mC$7J{ER}T3 zkIIB|zvcZD{dOUda$~-#E0eW5N$}mzUKTw>f{DK?nVPV$@DdwNa&97mCnB~u#Y;GRz2Srl7kw&|x%qB~6`S)CxaYhR@DL21IOALOC z{+lH1-ze&;pY<=#`l-~{>@FpEIly9;oimIlF1?D;K*DO9Et*Pj$%-q)(c`q$DnBW@ zmp8k$Z#8&T9~iN2dv8#PTq9Lx=Ab|sB}h$2Tvtx#d0UNhxY10OxgKytX}rMvrO= zz)$O%Xc}ZoNlBr8_kS{{7w~zX{X2Qwa)It0?U%4d75Yp(v z&;bkp;&|HH+S9i1g&DWz6m+xs?h2A=WJ47YvO}`7 z41l{3s;VdQPF;X>#syl>emIKjyGWo!I$b16jAv*J!No%~#wsYNV}gT0=Qfz3_dY5? zFoY0dg(4yy1nhA3yU7(;2{#lTiNOF=ZKT4E1|gq0we{iS`Xm43*)ufeAk%nKzadxK*IZZ0Mdw5{6JKBLN2e0S$ zk8p@aAIENYwxK?6dgy#`(Vu z6N@-ER;N@TuXoWzvsV|I)AyYpe!y?-(LxRDC@j_Z$N#>*5f=xiU%=%yPm!QCqWphU z{|Gk`7(U9_(s24$k)hBE0DSJNlm69-xf+ND1n5W}fd|WZgE~Kl14;gE1l8V}m_Mu+ z!9bkrXH=(=9dNZM1fKANMt1X9jmtSZIl79g#Ds)|&cbpkaH>d@bWHtQ0Sj&mI;Mvs z1Z5(+e;17gx&Pjv!JtzoDng>i|9dr?|E082Xa#3vhZRJL@A9@sW8 zxzhgoI={^BS#HK-`DtVyEB|iDL-om49G(U|RPY2$8Sm9UD2jz&>n}Jd!*aiid`c^} z2gPJYUbCSNU7~*qPcbnnOlVr+(P@Kb7MIRb+5A6)urU>u9kb}_zb`pWc#+q+pTdgt z|Db}E|1IkwXr4`jKOv<}ep331?)8g^)~8ffx!UG6^&&R$b{v+UG~4lt@aa{Ql~wHw z1+=@)qcz^{PXAcM=pF1bL*fwnPw^iTe}`GMz2Bpa=oc8K$t)rAe*b5F3{`a4(i>U+ zI6-*Zf2pJ~uA^J}zW$%LMB~#L2qmEj-`~L_BQ4c?^DjEzf55?9zH%k~E3fv#KnJ$S z73}k8POjynt1$Q=8^uBj6_7Zk|9TM~aTElfKi`Md5xz8hs5*S->*SZ$G~N8&SePqG zNm4#@Vh=8!S6q^C5c1rlO?*Z(j24pK)~47=0(4cI0eUa9r#4E3;CrWzsQH_r2Ul=N z6nzGywNU;%xyr^LPJ|~m=6oN92A{k9_NF*wZrR3((8ttGBJ=C%|7~{8a_=#yUFj(+%lz84ec9zkL_NAL<|L_0>u_uf5jT=pn0CRTE zX*D!>SA2%OsiT9&_|YTybMQMz_-N=qlG0T7nCJ})`*|4@52~w`!IKG+P;201zyi{cu>rejkPj?$L0S04!AWp`m5yRqi&WZXV@*E?j(KVi`xzH#`6hL zQN(b^eXny0s=I99dWpt2j`R)B77rC&Wf2hp41L8(V>uVN^wr_5CDigTb#F{?)0K*n zvJu+PT~K26eEDaDm9k!DXWT5hYBM&X2hM0=EE`o2_yuq=<+h&*NFyVhWHu_RHA;9d zT(B8lrh-Bi5fnTE!Fk9Z9k@8tlAMwb1HmnenzS?wbc_O5KHv;`oscD>X(FUv&?0~# zWpQshhJI0BpV>C3YM`_&cvg`oJ_4QVy1+h>_Y<=IyvbwlV9yf&-PfvRXTm2mQ$0E? z5({%vU-XlNcw0(ggjUbU8$(_q1_^`9_wKJm0SG*WvjyGW{)%tohoUOor_Uc9b8#Vb z{r-s#)0cPY76fdQUGb_cxEZa1^_<^fh=T5X11veEr;yFg1FnL|s<{gEgGSPe) zeI|SRBv8;IVJRhixp}3IUrbDn8-8K8TFuH@xl|MxHWK}7Lm{N0LBZR{=OVkA*r?f7 z8;x{F+UL}5;qbTPK%*oAfm4`%1*lHBRr<1X<8RFS@|iq?ym$ADqq5SUKwFps?T0qI zk~$7q{=rQSa>t9zKke#n^_rzGy{W(R1H!lg57q*oqVisy3Fq(z(ml^NHa3pNE(d$5 zcy{ItK1(*1L|duC<Pi!5QQPx>u`{4G;TT;^Di-E12!BMR<7$A-BZz;P;j>l(|bj`M?gn`gaxm z36nRw0jH1M)ARNEO(R)+Ybc9U-uWDHzr7yYWkX#@?X`$w%a{pIE0DoZ8B+4j7?~y1 zd4!=ila$Q;foGZ+zf?KXGK(3t@;ieB3J0s$-ahJt7sAq2LhiIM2Iwb&?HTi{YT~5O?C%{gJXj124Cy9pT6sY!b*CU+3=y_c zvs%x(p!lu^iazv94F?~uw^%`RmbiYnF9qL^l9iB?3sccp+d35m1D*jfB1&g^Zo8g? zWWG^Qh-F(>JbEeAQ=%cyzx1_ayFwoRU%>qE6eRAah3a6mL6E@a0QlGFg+_%Y;Z*7A z>{0JNcDT)IWH|Xs*pG$Dd1biK_tFCc3W^sr%l>m$6JaDj*D%0k3bY=29x|GFWb^B; zn^KirqE^}O=UVd0D9RYVw&5~8H*~F<rj4c?hRF28ghtDcozBX31!+h zDTuNA8jdj5x3{=HGB#}Y>Ol*kxoJqp!sR56GhMjh|h{zc{J6JG`*KuiRg@Jarro61I$=KWn6<7I-J@dtgpB(-^fp_DD@8Mav7l%CD%6rp_GqVZL7WkkD zK!p+V-o;B2Z_QvgR)H z^YfvZh0-l9%q&V+!@BExMkJi&uNeY4c5L=hX7Mftv=rc5o1#zY(TaCnvtg`$y8{kX z;mgZ{0=vAW+#-KX4wL;J(>EDz1xC{EJ!)z?dwXe$4DGuU`G&2WT)v3$2T&`g}?z12WXEEM*_Tl!-YE6&jGzM8T2|&N8r4S1-5e zS7to+Wb82{mcNf-N5PlD34s}gSefmWOG5FDVyp8WOSF5bVJjETl~i3mS26{1m75$T z_V)IDlj9@u$Oc7fvS zH!Ms_yC_C8Y5JCvcF6k3=qO>7IHt6sOv!9K6W1+SKU_svSUEs(#_Wy&hfeYtS0%Fs zGqdm2V&xR4kw)h9j4$_~LyfFNu+HduLDrLb`6>d;vb<2O97YwMYBbHtR#W%Xd zsIe2jVDODB=yJQ@jQ{j0b!k}zl`)-xEN*pGPxo3fXWEyj6iB>Z|Ls{Lbmz+3G80%k zz@^m>4*|PI29iQIqTPnn$H;oSy8>!zf`wR2v6G0PgexFUo)s7v*oR}Xk=OZf_vX!; z04^99b*SO{qN33Kfxi??Yl@F?$8z_jm%EG41JhYf!$OuOp6D**`(`MF|1=@tT7KN_ z&V;0fkFW0fS12x;y`z|$E}ku^|8jv5_lelL_{23W8aHC7hSaWWTs!bQe6EfCrqGQ@ z_}-~CG8PA_x^c^P6BQq^I?SVD4i+{xm4#DSa_^d6MNeFYkwT-MvV`J;K$VwW(#5ZO z(qWO6R=sYEZ{=0iiSJmeZg30>J0$!YH}UcF_h;ttlC-rh_^>W4FGr(bBQg$RF({pH zy8aA?x_o58)bw$kw_AErvQt^Nzt2X1gSigiAwU}iKv?zT=g;@2j?AcE-wjd7;*OVcT{x|K zw+;#`*!OqFm8~iYJxC4-DA^9FWPuHZA7k84ewVs% zF2{=ziH9?5?d(@qUqT)sZBX3Ai*GOfkuj^Sq^&p<&*KuF@ybT&XkTaoLwVl@< z8U1X;<!2N zXZAwhpgomz9og7cH+4x&QWD+E+9Ua=0{p2{-do?rq0ddpT-o^pcY3 z_O*?tC%_h?^0e2E8ezxg`g>>;Q@0B46DAmSwyb{iDyNroOwo$`H$`*Nbq3U(3_5(@)WF@5|pf`aDri}f5>SspHm zOGZUPE_1FacgFUX>B>D{*i3(^@H$6J=x8Kz(^T~Mw~?~xDhiw3z36tZrAQAI3Jz=# zuJY|~UP{+1@G&tiK4W?Cb|%>BDmgttlID>KaZ6}S-&qdf52YtuT=M=kIbD0HHllN$ zk@5G5c{9LRS36B*^ib?UG;RUTzqxlCqP`Hl%-Z0Y;d;qTwWxqlb>_@B>j_(Rkik=0 zZ4Pd({|a&Ac);~;H~u9?-AA?6_kQZQ{@ix2qez=K;aT6u&3Hiy3DdAtD##=F+P-%A zTI)dp-I`9)Q0R%T6>_Zn4>0HdgimavIFRPLNs*&Rblrx=E~^A{%V{{5ct8Eg80%S5fl@ z2U&ll)7e0&;~D?Jl?f>oqbH{s|%(yq0+fAa+kZ7nRM zy^3?`*LQyft6*U|{Vi9h4_vc%e}8N5`dc@gUK@Jg9Z~XovPuXyX*zL#4%HZMhmaho6dNUd2)wnZS@|5QL zExxah1yBENCX+FL9x92HOF{VQ>3GD%?GMdvo*$_>=~WzNwOQ>#98MR%mjC?bD#>HE z&8VBx5d^m-c!<_@9VttG3qCQv6l-q8yV1jK|5eXV$*T6WR~t?GpO0w^62LIxvXokT zM87R^*ZQckv!{^sqk$ssO1k}1N^Qqq8COiCGBz7}p8vV;g&JKKXbjf`NLpwPj?G)p z!H^T@`;*J^C^yt>TyE0F7JvWV)6QLCrgu@EY4=6BW65Jy1?2+|MomHNlQ}wPNPWL2 z%EE)2@qyh8*0<^PIG>RnyHlI1HYB%;kLkTXA~m-k4fHu7I2rxJ(Dg}D#Qkfp%4eG%R=RM_-ffTMAD0T#V`AoT>5&>%DVA zCuT18WykfkAG1<-zEIUYyLJq>MCD!0pXW0dmvco%tmM|QM6sZ)I-jtE7U~i= zhp^jDPbA1Kt_yfl3!n+E}$1Xtd-+p@jFP0^vno2=3D8HI~q0Ka}`onNobrnB~|eN zi?vm!m6@xQ{R{!OR;_S$15s|Hn{KLR(2u(8%OdXyUv1I`F>7F9tPlCrcj;W*1@Tla zk9ZRm7>D^X7rj*ZwQ=Fw4yo#bzAw=$jmD>jUTMd9Dvu&RFH|U!{5mGMuoAi^HGCAe z8>H!eJcxDIfs>M}^7MD_E2e4@6%Q^}C3G_0zc$&!U3i7oUz5f+sF*Ke@&-@i7r#ql~`3$)xc6 z5#WnO!Cjcefx+YjQx)=Q`n~6L7Bde~Vl!nC756W?#_X!hAz&)ttLlBb$KA( zzd9)$hHfv$HMm;?cQ`c#lf@=;t(N4vr3KDyVoK&0@l+ZldKaZ14PcV$4sUL+DAef8?xv#HD}i$^h+=8NsxZm?N+NohCHiOTccer`M|-ML2^Xot-^Qn~GC zhkcs!b#?LT0~SA7>j~a~@K5ASnI^MCK?-$Ij7dP?KnHmoFZ07 zx9M5CR5B&&MEhmrveMkpi4=@={B!dwnpx~S1#0i=L~%rVnUz?d3wzrzTu##H@BTm~ za_01!?K=&UD3%sCt{3i1NCg548u@t0DjSVmWa@qC=q3Hg5%cP1wEL6C zr2$2!bT6m5`Fyk_bK_ukP}^(HvmR3nE>_Xfd&wqFk%X}h%(1f1u|Fsm!3^XGsu^5y ztnV8S84}OyJp7P-GlrVTJ!45QLht3RMQQ_GPYnV;_reDa-%RAA15PSVSv|=4V^_oS zpYcF3kK?BpmgpX4&8~&~1t~`3#})d3T>D$QAFhSXn)mp$j#8{^qw!(f9v_-0Ohf5KswPPnENYe-xu)bGV?E9c?n2~J;CB&T0PR=G2w6} zQ~z5CmwS=FoAogH9l5{fhwn2hfhU}&%`UmuU-fgD=wPdGHg>PqXRev|Z7n8T%pK^V zVET~VSjQ_xn}ea>R`fc-t!Zz^HZvb66{mkfu&P|;XNIqn+lt4*M%Vl7H$Rt_C9(1M zr{gZYGF=l6PjKeN+JEp)>1{oe%^iVcM)Gs#kHm3l?p)2hQI%lfV9V-x`H*aJK6*Oe z;a%3_tFBdS!KV&Eppl!)K{_LT3;Uqr$WGO)tUcCQrF?soiEZ46c#F!5+>Hxc%j}ZUp3OTa$}y^=g~4xde|zh9tyiApwGtz38781iq;uw*3&h zoOTOYVG#M&ac(k)L3`EnEk$=vC>z&x@(d=4kjMAzMM;V5ERMBRDXp552Wnp8e3&vH zIdsK~OhOQYg>{iE?uI^3(yHr~^h*lHm%oN9UG`I8)sP!cNWVmLEz6{)cyL2efmc%D zczr)T_h=wF8Cg)NXyq!&a~whE_Z1;TX3RZ)qSIx28?;j%KcVHQ=-35;-H8pWiQDXORi&WO>$bni+p>hrX+~cu|Q1 z&fcV``sVY=DE!~`#CNfO;66GwmDlk@#L0|}P7nm`7pZkIT+Htg3OQ1KAZxU*-Y9XJ zsGh$yelT+P!oh&$zl)4vUesZWhys-F{)$KoYH}w%IsO|0(B7u5JNYM<;9iECyNl0g zxz5&@i^r3;-6yXecR5yD;34vtK9$R-$z>q?>TrYQ#XJt(il#Vw@W5LOJcjwKEg*gV zv0d||&x#Dfp9`h))K&b$xk{z~|AJ9*br4lSO#K03kBb2}$CZLm@BrQ+@vN~yPdriG1yA7*l-XuR<^O>GBGi^Zf8e$?bG70%Z6|BCg+foB;=n6;?7eyQ0$pDt!^Lmxb<~^UtAeTL zys?d4KMoW$rKb~h8GLp1oA#EqmXoF;&Es6H89O;Cqe1(_nKZtrG?X=jyaF3IWvn z+D;B+F@xrIW+%DFUmlxcucN~>ZW|hR_7tk^wmn*XG_2}r$KlO)-%ER8IYaeRg0Qk6 zZd;6ABSVV@rz#|YbOHB#=45!2j;m|Ad%Qld+;|!HYoTXIaA@cWpuNHPKx1fr5`X1% zMTD6z)&q?{5Yq|*@ijmV8`pOEYVX&Iy@33j)yJ$0=XHFyeHOa*qEX5MFhD#nbzU5* zZjeJO?3VU+wH5h?YXEqj0p?($$PI$R!a~>Yj~+cLg(i8j?yHJ9P*6JsK;{;}8>cRy z1)E>MldQ}2QZ7qr7ms*veWcm0>7+YniNF;}yc|zH4>4EegJg9t!Q+DKY7sYIFuw>` zV*NQzK5I|vHvlTkHjKe(kE~3i=P33WDBQ6zSvHsM{Zaje^*4W$oq=0#K@3t~77=-@UN`Q{&2SsM* z3MHUvpoGQUyCT4mg32klF*$H!f``nz+|?c%9Ke1!BT!5)5Yn>ZeZO{`c)}0y8o^MIj2QCtgxM8FBk=VtjYHGv+dO~8pO^W3rp&;Um9-aQFiB3eJ+qu(@$pawbV)lVhRuLw_m zZy}TO4Ri8_@`V5pOFIU)1so0u?Nr%tz?-n6H26Akwhp=S}06(^tiAyv%8= zA$*Lly6LSCgBKMQ3?L&|pU@LM92l}wP#mHTCD48@g$3t!Q2QnYvR|{rf5Ucs^ZNbl z-P&Js&wz{z9KH_N>cI|&u=~)0v>h%+3{NOt&A@`b>g5A?rQ<*jgEX?F=gu;}{R)_< zfeXuEl%W0HCfS>f{r!D5K|xZk+nN%$KcYEoPZYKyx>q%9GgL^2xB~!^V`H1?KO+VG zJ=$mACji|(Z-7BbG^_D1W4HM(tb#Hia6zF&g*EPaccQTJ3hoDC5OdZZiLM4DpEJ@L z^LsST35i&L{XD)MCIeMHJ#tuHfHB8{Mr4@At)*c!g;VwA4x`jiw8z%)w1**U3+wJcdEj3y?Lv*Eh9bdJ#wO!i5Xc`vqKU;(dJkKq3H2XskvyJdr$Z+@nXV z-kiX+L%=x;nJ+CoUDDgHKpGT8s_~!>N@KWaP`a>VhI#r#dm%2FiDvJh@q-KVfnW*Aqi|&1n&feZBpRPgcdoh z3Dvf?ssgW<8R!&khz||x+7hs))ox4F;2I);O^84>&3)le$v~7#par7EiKv{=@MQ*S z6tvJMVSMG&%--St3NY$?Ct+=e78UV>C5{H(Nz33M)eE0(y5;e@NDym`r;k3?I9ZmU zKJXsJ2M;%zQKXq_47Er^kOqKAFwkZY2@+TzAtnMHa6!?DiL@!Zz#&Iwui9r{0?n$} z?T@M>Avrmm^}ExAQfJR1ey)?Vvy!W;*z!b!VBrzWjV0hL5@kI^pb=mQAW2N#-XjLN z*HHaDRp=ieY}0qzb@3w(M4xJ*Bq6VBz@{MLWhGM1Ov~D@2LiLZyA+Tj{}5teWgz+q zs1>5ifpj(g=;GJ6lF(fs3e1PCgJ}aO8K(jF8+ChuSzJYYWhmwYNjM}sJ6p;`5YPpP ztOY!l%h1*_S?qQI#2n}5rQ@^V)|%ZGp<^B{2_qf`O_*`9Nt4V)=S0dE%$xf~41=VL&S^ zDMS{Pl99o_ItpaV)37ID=Yk_f9NcfSvx`L73m>y1S62Yws|4g&r~v`(i&n%M4>TGl z7Z$>R=*cE0SJ$mpZ@@En8&iL<{vPOmy6qY90$w9o>Ic3_AhM7~~dN1q4V)ndMtSTd_4Aw*qX|y?ggC7~nM4!Y5}JKQ4P% za{>yo%bh(v?Lahl0_GfGLKaP40894|2iMsdvDQIS+jVGo9w>CcX>SFn2Mf1HU@L=T zEz8(S7wUv|`PwyXztu{d4`ei8;i3982^iiRsSRa_;~B4!EqOsN73k3>HFt4PriXX~?B-ZEveV!=#>Du>FCxH5@K#4ND!WK*tbI4y`Ce@(d=V zX|=$Gf=KD7YZwTk5H32!Y7ai?x?_olL(E!jU7i1<^pmhaQKSW?laFLyM|N2bdh6jr zJ3Ej%&#{7C(QI-Owj$CXfH@3>QyGhd1U2vrX-bL{pa=($7~SvgwY9aa>G2u|^c`JB zRd-C0g0mE?pu89U>jx!hEnlIf7*fZdegY%dXQt zK0d;*k^%TbgP=KS6~uq}m*Cd%nW3C_URCt#Ql@~%(m7CMEd}Xaz@SfqX@SD0pOgXO znS=c!d=JR!+qZAq3>TjQE^B}J?MpD-)8KBUJFFcY&2RRGt|J!(Y7nXrhXe2&$(|sn zF3@Vry{ORI;OEz3%*Yg3_E%+X0xir?h&v}iAs=CF4iEPof%x7uI?4cs=@0Q0#MHx# z`LjW30aZicSm73Zj^lAUkmsjYZEu0VGBlBG1P|P1XfyDV7^<0};NzE`(17oNsf6n^ z&3gecT?P#nD&F_f0mv4dF*dkJwESTwg;4PtH3D(WXMdZjecley(V=#-Fpxuv2$Q?J z?uZf%w0lI~gm)l_JhT);$lqOvt=xtr&p@OA^maAib|3D}kU~(blC7T(K%!|~e?W0U zWE=y26^UaYD~T)}h;YMBLY+~N?*;-X>P(GjcnE+8NbuHYA{x-qV6{hP<+P|M1-!d( z)%qco9@OGom&fRUUV0gV~Z$W0Uz*iSSKfWE5=n@td@ zA%G>ge*1PfTqUcB$Vr4mFTee`M>^TM9ac2*tzc}ctE(sDjm?^%tV>2o83YE66&yFS z_bL|J>O(X#9_weILqJe3Q1e+x^2GL{>sXQV!otQqEhbj}sr0mrj0{}f70!2AMg?nd z2%G{7sKbs0JyHx#A)dx2BR!bMdLU(>bI~8&5eOPg?hJQxf*i9r0XTVTZhkXeDLq;8uYh>ZB0Ih?-4rO%!*AeR9K^?SG6 zS{4wV1A)(;0nngbXxK|mO&tOxQ7LKZ@2xd&-&(;0B2u@qipm;HB4_;xdL*j>-%<*c zVaO(-X3o|3SC{~D7+j@p^SZQD@%r`Cnwpxckm5sO2h_h&xe6Ron2y44N>HS30v2o^ zFnRBKaiNF`D$OV5!EsTA8qoW6n32PY!m)0P8kp$lLY~Xq@0nZ;1qBXdpkP@j!^gt{ zYh{6>hT!o(c=d)G?EgM$aAepv_c0IruQ+A)lEgrxy196lWP!tfDJGqpg+Qw5==a3Y z6i7TvQ^=i^y9mx3%H2q~27*s?=s?R62(;DEClHZLrd#nLNSK25hVB_PsMScm3>5a@ zM8$)&Z9tL=i1er%LRd^p2k5d0yUb~J&fZN-Ohn;K2l(G4V5Viz{AIJKvI6h`F|A>I zA<+U35f3qV(Taq1cQyTjFnnCl=f!(bdp?xSzZ=RlywzmNYTwWDNJfrNs# z;P_cE%fMWTtFAWr7ce~cUzq+&aU-uRi3S&WhmCQI?gH-38{q61#cX|WtZZk_sQz0# zNCRT7Tw9_4f@=TgJjZo`Ua`RJv1;EL>i-D~-~8XB#L-RudHg>Va>@tYkvBoT^7}-v zuR~y!BUTJ-C$`h4F(4)~?UZl*Kj#T8VHp4SLR&m={eNy5v^K!pDP`+h03acw0)?Bo zuRsTh297aE_n}%NG&7S8VnbX~Qc`Cs!()FJ1XyI-8HhUDK}E>nsc!XB^8lre5ft6U}8o^@%x2PTX)yN>gh9W^4L$K{oSBy1| zMf}pxe`K)?2@8x%+96u#E3;CE&M*X^WIz_WKnqK?1GjGrq8D7My z2S{<>OD%?4z~G$M(V+=F#=ds6<9mb{G^`R5MmftR4TmQy2eL2HcyA3FgKWmh@7UPb zNW2Z4EkGOL_(K8z1)|&qXJx*{zvQ~|YmxpDuurX6e4ilL0Y_LA4gxET4B~T|&>0w%4meu0wZWxz0IkY-Wn2!$r{Lby z!Fj{!gN1|&aQFBK7#$f&2re%WhAmtr|6#<}e`Q&ZV&~p9z%dZ8N&LkE0}$w7FmMih z4Ult)*ja%Si=-d`oFnXe=v@n9Hz=k8#RE6Xm5GKJbn*d@Dm}~f+5GBNdTK$lhrmC& zK3?mAS|=caIjAByOCf~>*x*Rt2)b^UmX}k88gL_W1>Koc-pSj-|IR?Sx-dgpUq#&W zJ+kqr0T|G0)J?zzsTw%=KqmmynG<$3IQw%C9>^o?G1$_Gg7+05h=t@9KySuoytYOc z1XTUOrM3W>0Gu)$k<@{Zz6QA?6g)t+0QxQ{LFNpD0c+okvJF3v7LV>FS<-k_UbU)($!l$-`Q=jnl;ZL!AK< z){R@Y*o1`0AXbFf^>N zBjY8ARw>>L{a?(zc{tYZ*EV{W%yY<)Ol6iak<2QRkP;%9m06i0^E`!0LZ)P%2`TfK zd7g)m%tDAV?RDwV*dxIQoREgA zRLElu)3uIN${{hNdz)1*jyQ<#;5`a3>g&@RI#^HmAWt68o-b#?!D4CN^EtI zT7*YLc!Jdyla$m0dNo#C5^NywbkQIGmm}%v#N1Kvc8-TjH5trT=>fY7+*HoN?t(I9 zy#|*t@lkIo#2GS;rATld1G+5>J3B5ae<7+d_ksHw9PGNQ^+A{}7$5+3g+5fMU)tNR zLZJy6hUbei8e}8`DP>J}_pLW_`cU?xl4j=drGxFNL;YGGe1MUrZ)$MzS&{k%Ppxeb zUQ@#M>*T2X<^8pm#wdszMLDP>qPsRp0P(Ad5TjjKRV3V+H$1AZ23v8A|*w%@&-vbQ&~Y-GW#20b!Mqja6G zJMDi1l-XA|`8$khybOgZKB~b0C!02_K*m#VrQ8|L-}@j9_a58}OwXWfz=mfas0|XM zm^E;}A;cs?RX#saaT%d}eX#mp<(FiT%W`!!#N+6(tMU+!1+?rQZRmmXV%m)BT zr`+ily;{nhVIMChZXEfm&1r8xo>kx%@C>vPI6BS9C7KX`0jDj>>%eq`4aRKLGmBOJ zo;{61EC!5+_>|H$<3TP04$+g#!$lCq{5s}f^Hcku?t?)(u)I?c<&jVoa6(Tcn5a0v z`3UAFqtUuh`yw+Dbcly@CAV}OT{D@>mXm!l$9(N@W>&=_`#1_rb@@-`#7_O$I_|_r z;1?xs=)@{|T@1o}O-r)5?zPxsB4lf6{K z6LqSQ>N;)t9;1r)J;Ek?Px2Uiyv&y(6O2pP?F-}*#D246>Hngj`+FZ{x}mBH076`d z0_o~mryv`h0xm8xD1y+7Vm|=M`7Ya52>-+Y+foT865cE4be2^w_xGY4kGp)#RGH;K z=6ZLi)7!R1A4ymH*k6NHq_vBT8(m9vtiV?Dv##R~Ac**Orw&%KIX`((Dj!-11N zyqkW_UxRvy8nGCyp1-;_s$89L3y)+7>*cMQf2chc`(-;ckCn4! zY>e2PU1@@ZxsP}6-=&l-sZUll*MPW2}m)MZMz`E*hH)$BnossbT}VYY6L{Tl9>3aTp|w zq?OqO)iHX(%KJJ~56}AES;U)T5KQD}9O1)6Y%$^t;@k}{+md0&*tngkyEj@}Qq2AK zj0Z5hF-N2w8`|gkIXQX;-Pul3_<{UZTqlq6+;@w4aPMkmVxJm|Sq$#R>ryxl0#$njEwB6s)GLj5v>NfPqdUAUF4H0WD*2CWV z^;^-A!r2%j>ODdu&Iq2|yx;Q)FFD0?-bjN}c<1Zk zfj?>*f`1YcQbUI454Mq?t9`ipVJ32?4i~!BI)VS zX-^!C$6JdTOn!zbabJ-yk+kBcyWR&=$?mnfIP#eGXR^#ut&FalZP9umwGV2^KQJFF z6gggCvrfnJRE~`#g9-P773R&y-6e*EXU8uH-S{WPmblT+lH}nW*PUh0rrf~#E?R@} zmDTIZ|67I-#RJx%KX}~(xeR6)ahS~++%TEHl{iWQNQYtQ^%Y*1|3G+@-L*gVszE_bpJ0`9xEiLlnNle#r zyYHK69Qu86yko~|0=_K!a3+NKzU0Jc*4P&Uy)IY3vxVtIw&ImJnUNkqon087! zcydYcs$Z-M>Ki0e{iLyQajt6>pm2GO!=x8^d?4pWnk-jK$yfTec~d{1n>BcNc+y>S z>=D-H=SlvV#o)}AxomzmaK6fPzBuq-&g?1pOLXWkJP=-hLjUhW9shsiA^(%&_5W=x z!}9;gl$NY{PvVT*=N=!Gotw9++egK=;16VbE}k6ok6L9uRvP}tSGuO_;Uh~@>zHzu zOC@U~msNsyOh1Un?0{;81yUv*;#c%l zJNFtI8(-g(e>MS0tu3FlYz>Xc+T#Mx9FuNE0*xnB$|>TDp5f8^syia!OMM=YMTEw| zsf_#&Apby)WB~5+@5o;~2C%H?vmJg}S=oB^O`6cpi)_#u1J60XxVX5Fiv{Wf!mXX1 zO@QMR{LSw)IHFp=*LLoJ!vQ}`Ia1s0d9GFY>=JmIJ>a4RTdXNHV>+lzt~xlB%{Z(hCSIq2$HJ^Vl zaghcbvJ$(+Fi>35!sG3Cku8P4<0-=9hj8ZrmV*x#B+wQ?HfN~x1(?zPlZ5#v zT=SBY@tc}sTlad4HE&nif?~xW-C{0*{d0n{PoS2D9C3DA>@h*!RO z4hTbsa6n6C?}TQr?_;4fn2p|N zD#hE$u2$0<1JC?h{#cnY2E!{reP{pt5S9`-uE5P0J(ykY!sGxiTK;D-EAllN0xj)d zgBvlp=zUT9S_)0?WT?{SVPdFq8Mp{SX`CX0e~b3dua#iv4}U|Rm>RU3!=?#n(76-T zXbtMp&HLd0k+A7z`r$m(;eyd2%_o4tpn8>p06U;e2TzOJJjbt;ip@@_n-~l8M$fM` zd7x~CTTL{X%#^XT`|V3#y_0q{3QkTVf1hyM^917@FV{9WyZW`i9myM@@ zs@MnI0Z|!RQqoZM8zJK`-D(m_TD1RVld9pCT?ae7NocJg^GYM5_xb1B8RuhMiJ@I# z%7`Ea$gg~oXcbe`=E4qOsSulpnpdZ5xKcF31}-Fw(Nzn$e)A?(yuv?jJ7P@$6WvO} z>4umjVA?^iQ{ha7oF8AX-th6$fIx~c2k<1ELT?H#i-055$9CP=9T^!O#-Gc+BnkeK zZSfM?4%ah}DGW7Z zC6I55OWxBL^x(e;p%K-p)Att@l$9GrzCQCtz1Pfm-ktBRQ5QycpwfkYPrsml+QZ5; zkdygBOhBfu)|J0;;9GKR?ekyR{mdEF3R%YuR%s#uyY`mPxCY>&15o@4(Bkpt_CW9t zhCa4FM9OI{&soPS03X=3+}F|R%GZ|zsO38QFGz%N+!LUQ^6rkobk)hzrwPHe8r&2E zvM@Q!XN64KJSr+GKHh^*4r_?o^tM#M+fNE&qa=uUt5K7LGb=oqcw5h_aaIgFCE)8! zqR^e{7Mz+GADLv_??emHww~%?O&1otR!-u# z@CnFF12h#%(Y!XPH@5^kM1_T^4tKvE`Urw?D%@mKlQrIA1<+(oX?*44?7XYI2z&r38>?hP*2CqoB-`}ZWPswXGb##z` z=?U^1?YKP-*a+d=`%5@Cz2(1N3ZoTDl^xI6-`n$favG+2qn0N03TL81ng((Q>9wyu ze4q_&toQHF#eGA+2YADOX)tf8f&_BQ1|TH)?gWWGnI%$v{DTg`i$F{AITwg%DMmxE z3*Z)}HrRSJl+WDO9(?$61<^o{8^rwAxy;^w#xC>5magIL5avxS(QM(ud zzy8O#~aT$oHAJ<8ag8|my+ng4DBeVE5{9z!LTrvbdW!FmR~ zn)%)KoStp8i_~_$niVA`E&`zG(ksQ3ns}<1c+r?|y$7!b&(_hzB$14KYBTxdm7)F}-!bKBJ4{|#f+E?qZ{jNELI||9{eV5<3_{u;X zOEBjuyFQ-mx~Z4b$NC~8<9EHEy#nI-Ai4a0(FqQ+)Ly*4u0iRp?K^iaUrKyBT?WT{ z98Mh^^uCtFH2fI2nFmLG8Q-lOgWV>PogL&pY-(f#O{-Une=Gn!g_Z;}xF2!!Ix5E) z^4oP#F<0Wmn7@sR^6%^GQ`@3Ssn7){B3NYb7vj{ZgQ1PGNvcd|&(=-a^A7;%hoXVq z^3biJP8eq#{^ErfoHR*b7lrTxD9LVYVY*Rupm6i%RUp3H*s|)DzpUl?;=~K3yL(b5 zuqb~@Zrutiir@Jh_`-bW9J@`uq`8aFc>_>Z{SEM2HcZWsg}Q z3MFrgog}~2px*7{g-;hek?TQFrJqFB{8WXH_Lkyv0!3S`t6I_0j*Dk8mwXk-qtsPR ze#tQh>Y3laoi^uFNyCy^iTb60GZP3q83}5o?8)RME!@k^>+XUyN$31R?N{h{K5pU< zmat-x&Mxev#lT+Ql)U6dk5U9bl85dWRMfeb#GvOSM{_ah_rp)$ZltKefcUF4W{K9% zu`FY*C7XN+NqQe4(DH{@g0JNht^shI!0^}eqo)6CJ0UD7-`327-Secz_vPUxC8E24 z2UnfSxh=p&@Q!dysn|pIo3HsEJO|QBGK^~%b^Jk(aG1Cg%CI64Z{HG0NmZ#;1p|IQ z1s+fcYz(XdQ(eA7AoKCdrTGabDz&)wF?BT8#+-|^q6uQ>(Mc9IlCfo@UKh-G*ds^v zf?Lp1TKQD1{lu$q@3jX6Q)9|PVY0^zIv#G{<;aaHE=*rD3ga9e8Rb)cd*%KBy$<|J zJl)2N<$yY;^zMw2{>_t<=KRw5)1}`@UWTb`)U*0BtqrH5E=)#sSOh1eQ;mXpG90_`?^GBMEQ{qkV`B-(l`$5ID%q_cb*% zBte4|yu+R6CXtIg{3cV#9Uo4DG&l)@{8=fh3S>4eV(WdXyiVJGb1KwAzr(BlQmW=V z33B~rBj1KRbDp{4h`VgkkojFAd+cfTPM`1+DcQM95m6&qf7l zx4-z5UNgetJ!Nw$&&=bA$nCfN^sQ64Hp{8|zwm^dw(@XS>2pNX=y`pz(ju(iFumi- z0#iBUF9xm6^xlV^ot?m(qJg}_7k@}{cUdS&7s3JbfMgAO8z!F4oq@DNGp*GH4tz+O z?SBzs5%aDD{y_OMIR4{(dlUEeZC8%ANF00K^qU}{JxHhunpZUbGX)@r;~Qk}aMUlH zvq4oOc5%lHLHIGO^tOrJ!`$vI`F#6(XMi=-}iD8`FGPhS%XF~~=l#mONhKMU4)c#sdl zsS6A>FxFX(fH>Vq!={%W6$8G zpZfMKS?K9KV1^nAMk_~sPfSb{WY?r9Dk?%o_P}%VHC%>z#+S{QrxorqpCtP8H$G`c ziQF9}Yj=*>;8~4NGU}1Ggg{O;@vy7V=v^i3WxOp=x^dslu7$iV3Dg2xxw4t&DLbTz}xzVS6vuh z5G9R3{pkio7%?E>yZR@8OZE)@lZ#d=0a#;`;8n$4YMVv-E-@~fe*jWd#hc|^4p4ZW zV`Y_C?^IDwDJumG&;+Pg^8){~d!1tOV@4I_Q(mM}9?5sZJlnqZlD{(ZVojkNyK;y2 zzi*N@)a70Hx6_EW3|a@BU3IlWyB1BLFxr92V#^^d*8=bY$osAqe{@4__Ch0kxW=u^ ziYh6O-0q_J;CEP79zA{i)fQUZZkZOO*y~wmf5vdj^$Bqgx=%L~xXcc7k%;a6ix3)-e;ReinhAcBb<&^T!_MqhcPGH)X z&1c<8&wf8}FNH$Jd@Ym`(46$UAgZ2^7V7(61?Fh~F}w(wgu!ET2Yr-(+U=R2{tsL4 z|GytJ)P4H}q&E;DfCzpJu}0t^Tpuyd(+83WGKT;aLVXs35Y6h23XrxunvYj_ z24>i#GF7SSC)DSj2;{VtJdBhe?Q@s^= zcjY9(hy*n#*!aP`!3#Ipp(`-j4=7UL~c0G z)bowJ(#hF&Z1+IZoedYv0P_D3d3@kFfj<`+xRXa+S2a_JK=7FYEBFTJzk`0}#6J^?fZ#W+o)8FhT4eW+OuL{9`2KbpOeTOQ zPeBSH_8nfl4)N!pYKNZxQXfLq)s12ACFX2tC*jd^V0GbB$jS*cji0*2KENa@NKqk4 z)DT1EIq$|zz^px;v^rJsvrExux%_z3t&M_ncKCbu-+!^5I>vXJ>7d+XN1E-VGPizz z9`%TH;G0f4GW!PdiH+Axb{SI6*SF3Q{CHa%<4gXvh63ufH>LzxFxmr7*?k_a*WGI%_f_?U2gFV+dLUm(&?Q1*CYb-ctv%9nnq z*3f|o=MrXK@EDNq)-1Uv!U7aM10toEi|UAc{Ct`13MrH}-{O#~TLK5^zn_1l{>R51 zk+^}efJSu$WyOC%+63S|qI9>cjP={_CYpP)x4&<@K7IAsuVK?S{QAe?-nD^)0Es&2 zdC|RqOu>MU8JxuhwK+XHcc6p`%4&fjS$x2@z*=8lKk{vWMCTqmrPBMm`7e%p;J?2M zcY(?Da^RI}V^P2260g9kLQb3E$M6F`vgpzd+S*wYWdG@`rgW4)vgo$q6P3X+rPiL@ zW7Wa+xJ}@w|E~dFV+Ufv!+$b#mbOY>N>+&8nV_i~rsP+}4QsCnM%#Lg+FJ zg+i=T_2;zn@M*$hFvvSR(u6}^=p%`}SDP%sBMDQx)#H=8k_Xs-RpA&m)^)b&{Q%0@ zWHMVf6!rUSsJ#^v;40g~pq6^*woWZB2KDvbfuC?u{|p+nwtG9a;4Qq)r80x~ zBhdOzRC{F7kVB8G#DEYPk?_F5qzJSGP>%sLb{fS!7-|L4K~nc23&XmlSVXZD3==4^YimPAUi>hbqMBw*r1+6L6DMi_IG1) z?xCAo3^Td&A9kbrb;YuYH;YWB=}@yMdrd><)3F(jlQ!dxv>n7AZB=; z81O@ePS^Owx!KuT(1pO)YlM)hXKhUiWMzPK@b0fQ0bvzQhr;j!n-192CxBbRLHjs~ z)=%}bgC!u+NQZ(UJDO2;9Q?20OrQV~AR-P{xl0{QH-n4;8<-i$l?fngmUHLc&wOFL z1hOYx=LJ?YxQsnFo0%kLV+5Efkb!qYr2@Xz(-s6!NDBZClPFnPNAopcKu6d|0!-KY>lE)(QX*qDk_PC@x`^9D=I(=AUTkDLl3X_gmT z+3(sxU&p`haTqny#MI zUPz~&+&P`V_l}~n!*k`x55*$0d0O6GTDtryLtbk`K@}=q0hxaPbqv|58CVe@8N=eq zNlRnkbFVFCZlN;WtIk72;yH>f*V9t(qD$$yj| zz(4lL%Hm!e%nL)v9zl63#VI+~=>cklsuVKH~R|7fn8nAAXfWD#$&fALv zIl%zz7{aFj7sB-X_-nuiAqO@t{ACnvS`KiWVHO%<>mjNuVvp3+)Bw+d1S!4%yoHKq z3Pc1uU^=+oEzig-F0NyG`akEXg+6w9mfpW8=&y^i5(#T?pGYG64edqF-!oA$-qLNS z;+(&zI$3a+ywyMinrM;d(`iC~=vs5SE7B-0?G8rOb0r0Ya6Y}6LZ!1S#>_5xTYmz8)FE+3gJd3hhcBAFk*$x5y_EeEIYeUz&j8cH;am&bNIr7fz-VXRvx? z1uC?0GIUIKsp7nEnoQd`^4VW;pqCkX39P(%to&b;%ak3C)7PLkHKgtIbgF*~wF;L!k!;Bv^WNrs2{5FRN zu4u8&h*6qeb%9ZhkbAvDYd}is@9m9M`Dc+EDOu1=9cc29Q&4z=3zGimzn>1KTIJ>n z$v6hcu}DY(|De0Xav@L;Z4;oY)Ya4^wTW65oAsk{h=@iFOy*=@4TLHAu|COBlABx3+6{fo|@hAFW#1||Emde@|c z&RGy#gRSl*DHZrk+uW80Lm^sGM0R_ujjW2923p8NZ%vNhe1F~l$Na0vz_x*kt}@;0 zNf{K4Zi3jksq30m{Fr0G7tdCt##@g5n}GtpmQn`9k4z}N_R|di$Y`$JNW8jd>5Pn+W3ml+`XO6V)EyBTvFujI--$ z2Fuo4$ z;}+k$zwq+f-9E@3j^tK=cOwlQuqg@)Yr%H%Mw~SOO?Ct4gW;V4+YNv9>RfJSIDl-) zaCgH~|49$7!(dXzt?M~PBGwZ;TO-+K-(mrrBAK$E(~Kt;^SO&=erpjff35O@=|Ujt*824@jZ4G5Dx2?6e38phid% zhp7Ls=df6Dr5=R37(fI2qb>4bun_W}gm~ivax{Xa zL?gH3D=<0}pPHKb0(j33MM?g7e!VCcHtt312^}+9);T&VDpnn=n;|g15X5pRMPX<+ z!>~Faw?D}EmKy+9@f++}&2lFJ7(Qsf2y)nU=!;E(?87%F$LgdAIMYJyi=~E;Ale~ zz$6}cE%&{3TqD6@3JGGE2_O$q6-KiKz?eBXq;&y>SYNi*?Q+FPbYb8?251_on#7tc z4GpmZudwK*)PgKD6qu)|VWr~>0u?r@X zKkVFpJLQYb?clfR`r@?=P_}`}ju=u5VEZlf0b{Oac)09Nr!O!X^Z^S3{^(B%Rwu;r zh!?&X)v#Pa#uFuXut52d&W-Ri5Dhy1*Ymml86kyi2NK-=a6-Gtc4b5u2_=!Qvvr{o zD8gtYoG&2WFOYyB9Vh#5)f#H60|H+_qYT>yhCJ3F%aoxfWHf>RUctGj4~;swq9-u_ zje;a~PQAAAF*tMs{1rGksRt%?Unb9m#w4*I>smPG&HA1ZqvcgUhFEmoMD5I~|LQp} z?tfg}6=v0PgG?Gpqfkg69;St~4D}9yriyBMG|#l-?IoPKY9vO;Y(dC~H<57qzYZVq zIWA!dh6_lgj>yRM_0xmfAUvZ$#T~?)shJrcI^iL$aJcN5?7+zV!t{TeGJ-zl=FX0$ z4_X^|{2xzp&Xkdr@wAbW+F5w!MgEfcd6rpF*yTNg=4mu>%U6y$h)d%dk=Onn*)xvY zIvZC6NwJr7noymsP2(N3a@EetS^b>f-_MYl1>}v7G$c}0{nZza%#2vAG0h9plXJIc ztgqzvw8<+hLMLk)=pqpkWZFtqvx!j(!;9KEM z7J0XD2rL~=In|uvSa7bQdsA{E)KMbHHl9uKj!`|+&NmK@8>ft|d1P!3r_WCsi0%y> z7fL;rHQ0#7%i6?xPo4w!!!PYZ#`cRd6qE1wt=m7b&V`H1E`1a(QY6V2@CL52M)Y(h`&}Rq zIMWT{P&LIbuBY2(#)fcM4K>Q)tX^%nzxQ|0*tg_HCs;lyN$kNFf7C+tZ`SFg-6967 zJ^9)!ip{*Ux&x1@gAIk7!ilVNPI!XR%N1dsOL=PeCnQE;A*=3(`o8y=Ub30Ik2R;M zEStnKm+E>`*qBrHh4hE#PKVRZnJ@n5t$jO=f&iUTXmaXN@qDyiCv0tPks}7&;+(n8 z2I_WS6)I0FGN?Xu5TsV-%TJN zq?_$)pe1|3fQ)9weHt2y0<0&4l;2&eaKO?jCyqODLbb<5?#Z)zaNr`a68=eU*A*v&m4_d)A&D2Q=F_Sm2CvWvP{!dRD(H?PF8DSsU4O zF~ynhi~K|r#ELH%QeNOTR1nt(zUVhijFj%$yvLs(8bWhomx>gdQ?inE&1&u8uCYt) z6QSxIxCTcLb#?gKlv1u5KgN=UEMyqrb1=xtrM3QyqA4> zZP74TS;FIXmGNu3%)|xD$qI$H=d_Nm;;aYs5N&%`~olW9d z+mlu$m1A_Ro9FNtHt5hOwWF#ZK*xFhV?sL91tWYk0>%}*1$F#RA^}Z{dBwL$34E4# zqH>%%eEadJXW2R8Kd72&wIvA{jpRKpr^$8Hl-9C%V2`bwC-drIy6kLHx|+nPpy7A7 zNyncOyw%<29rhFYWPa1?;nHM!Z!GghS4>)=l?pLaxoXm+kzg($(l#Vwn2-|^3Gy@BR=(jeR2 z|D3;-Dpqx|2mCrrJLVU}ju!CT&kqL<**y}J@s163WOn`t)kP0=TqTY@YF@vn@{8T% zLNgTq@*6Dl8!;u0r{-AT)p7K=BY{SB{C!+QQ*`DpfiJr#7*NE8L5CHMHYdpmhb$BR z<}a+Frepa5{`?nu&;TF-=w25;^8E!onZ(5I1@udH!V}iH(5tFM3vzfI%xD{HB9~i~Zbb6rwUSK`MtV29R*9MTrYO01`Ax z0}rkXbXvAs|GfP}FiJ%|av0ueZl~f4c_av})O7ax03AbLl8Z}G$2UEl1Ad0j0bCA! z=p94;O*RlKW}i8+b@W~*0SJOVE*krZ^O7LONH$Nm!iORKP#B4CbXCzX?F32JHGo`} zUYV0dbTOEbojmCUY56qdija`jgU^P?V?WyY(;ZAdJQFPeH+J5hT3JEzqeUVYRjx=h z@EcU=#U*I9?np#N-boK?K5-<+HGoTU291#7F{fLLNg{`-vM`Q|58xgowgR-O7L0qg zf9$)vI5KG1M;_Yl*-x<)e)8Z*50Ckrkunw2^D0s(y2I#ukBT{uZS!l}RGk)uBBOiR zeFVKNEyk4GRNM_}H_k-0-FbZB_LE~e(!=E4F7ED_L6<6BpDaC()wypqhUIh5DqAb~ zXSdt61fxZ)8Bi6%3ewoDe&>c{h9DR_(zzh~_NGgI!jDNMkcgvssX6zG1szb_yQ9z{ z>l;MJmM9G)zepG*(G#Gli$we|%DH_&#OMndC`zCVNvIR}1;c9%v~MJVKL)r$y#daU z;jp^x#TC%rzXl0D)#vARAfY>RpaB=^y&wkx)kvvzCxFNu@Sie(%G^3{JdR_Nlj8&1 zad0pS{yiLGFdjL;@{cUkULaI2w~&TGGaxoQ0d)op&4SUn);ydPzj_$}pBn~s6=c3? z1#J#ez_4ZG=f< z?1yoh{)WL8Un}Eai}!qXw9dZE9UUFLxjfvfd6#WDO=557TNso!FQGWN+y~suUT{c& zN~Rs2PRWMeDa9Dx3FwUx5fM~^O46QQ0bjPvA=QNRH-K5r4+ERaVAQJM17x6bFa`MO zkFvRt`>G$Wc}MKW5a;G6`~p_;4WL?{IgYRo=m45?=1IuO1l1m_@=MzSR?u!ZW|J)2LeHWO!~4JRa?K|7N_sf zw*$F3C>PHH4cg+fGUX%SWtC1ibHjg2ti5M2qYf{Bt@bxH%>_Bepklol= z_!ux(P|WFBoQhDaH9KEg#X$e@@fpLM^dIg%*xnB(mh6d>JHF-37 zPN7DwXr_hzkFjW=telQT;>0Vww{#C3`tV?T9X)KL(N6pS?nhtqDI@V)5fN?U#>*To z_>5Oi{#Fy`Y$PGiPy$~3JOEvi?1VL0lO1runeDM zyw84aFx@i~Ow4d_VR?-jM}kbo@WF12fzi2qn^~Qot8pFSC5;{C%kW-&^*uIrM0JXv zqLP>1NV*VOHj2yVF&UaRpOg$TYK%BgaxI|tY*Z$v*2?+*YtS1@BiArnxY8l@Bi*ip zsc^>#hUXOF(3UpODcIz9v#5d_yc!Jj_jcC(l3NDABH@jss~Dg-jl4(KC>HdL8*M_J zFkH~la8NyK(oyRU6As`6sKuhrzgg-tUFrhgz2vyoves$& zF_r#>liep)vriZtN<^2=@tw8mSME4bnsz_mw5EdL>z7?VmG9oHw%H@m{%NB!Zk?75 zXUK1^IXK;BQM(%=<1O$e|6b0R!>A>RFuGza>NmgNldJhm>kvcO<`RZOQv>D?_hR8!`n<)4`Q0zII%Jce!IT= zww$W|(TJP7RG3-tkMKLc5@!2$JIUeKq|%L~0!j~FD#j+=DnGSPYNdFidWuWy_lb#Z z9$2io;-=c>@pb&*izd>op5?#h+ok#4-F^t`o<1#;_??OH!JC@KYR@YbLN7B&`}LM~ zyLoOL=fTyY*~h=Uci1MBSAMMzi-BGtzY3MzC~p2v*YP|)`F#b8Qv9Qw;ivIIKfMRm z<>tP<8Cc=bb(cg)%UucGq*kX^!m6r77|!METawgN?Dpow<1)yMf2_cIf9{;l)N#Kl zBIoSWH3tVL99M4SIp?3%XAG}>&G<`p)RSn5E~LtxfBerD%g{krY$P#-xO=gFE>FBv8=C7(z}-%{ z>LKOXzWXyudxh_xIjL%rT=x^hZN;$nuriM6HBU{BsJuCtuG$$Cif9|M$S!#z80{_n zvnLSdH=d-`(tPmhrp__a(X=`J?YVE~82u(2i3!uBFjgf}RA0oteCkZ45dQH%T<9^5 z_8FHm@6Vk%@gmSQX)^xoS;ECmZKm%#=<{S9s z=imBCKOVdDE-ICR0W7xnVMbG2DDDtGK`L{XQ?+@0eZ9`!r?zR|-`GpJGS|dt{Un!n zn_1bj=6i{l@)^#tGgN6lEKoWBLhF~}g@T2tE6PrR?o}dwHlKBqtDT8G8;RbUICcsM zTp6cQ(39r3c{THrdTE1i=wCjIQ)0M^@--aV>n_6u2NdrbJ6M^srBueW`8}Doln5q6 z{n_726-N+%UM9);ag%s*@}RNpRyLIkl6`wB%QeRLQXl+Y$1_8%?jhKg-=_T z2UHbUC8*XmtGzegTa0+r72-}zIEA-OxN^?S9BaQ!{=+uhcw>4vHb&;!H9`Cv15{%w zFHe17iq1RrtYM;$=!Hl{r0~tMd?|s%cQ4%{3N@g+J41NXP@dp z-h^z+&6SKQ^MsYLKB9WVuh*a@X4)UWXjfK27&o$OJG0BTWAIy~zR7w>XBBI_GhTKpWI9+fl4ZYwE;Y=jQ9R)ca1abOF| zdTv1UtWP1Pr4S0a*r*)ARlhT-qx+u$KSahOqUa(fI7P4W_N}T0g92>)Q^DB54YlBY zLM(q&W#2hZ^{mDFZ%hcda|Pu)M~`}A0?J(J$@{nJAAb7h*E&s<9REBqp!L80s1V)K z(P1)G>kH%XLR@~7`CtC=(Hs3A(on?03XhF77e}wm#f(h=H;@_|9D-lnfVwNS$N$y+ zPy)Cmvrzqfmpc4S3U)jKkgJ=(Tn`}Ag@Y*&e<5A1fOML-%>>+H-L6SQQ`Pm%6^53E3kp@)>(D)~zlz@@|@)R&S zlmuWJ$%Dbq6X~njUN$>{g)4c+i3_r5;%@E;7(O8B1r&CvkTW#+oq$%unEl{=@P+gR zJ^Tjr#c6K9GvB{Zg~L_tdX8qn@t4hZ&4XZ$7LuaDBPK=%dX5t7uU5;2oS(u9%XyJ; z1mxRiFI^fQQcr?rb_npeH|i-=Yyc-W+_q=tMK@X5**D-Lyoioo z=$ZgB?ExtIiyn5f0J-tZAN1CbkKVdjVt2U+x`Xw<>|4*5(%g7Y|9xi0X0eYGdMbL$ znc>Kz7HlS6hQq(81DAy5!i86;!2Hac1w#ZbSXcoe zbTz=Xv<+G>M$6#J9-){xjY9z004?pKotPV51sxeDW+4j$sm;Cpw zg&*nCHBb3=-INl2zv7n@|Z@v`t<2$sLh#16aUN-xbmWn`UA!B79WGWP= zPNAVdmV?c3vq51DE@Xr+o3B9e7<8Yz+N}5|?30lnW$<1hbiytAbb&<~>>VRu6=Aq5 z8Lh1OkfNmh{HkS(?g)_+AR%)LJMjG;))4(@cA!K&1>>zV zKg_`j4E$_D26bIRLCz)!XcB6Qfou!59oV%9f*!~1aNluM;1@IxpUy^`qR0htjz{NX zIE3O$C(iu>N18UnFo{NDk+Eu^n_mMID4>8)Sutz=iHf3P;l69kn>SzXJB~N-Rqe?_ z(6L%FJBpt149|Dvu3@feG>KTMb=@8}-3|Rny-vV?M z8c-=5?leg`fi`y-2uIhiW8;s(MmI&~8lX8)1grzDGO~V#;M_Jv4;Yd$(9glt@+~5) zctFXa9qPIQw2KUlJO*geF+-s~SQO)q0&%*1_w6k6c4ZU7C)cA%Qk^6O#H zsRpv?(*7}=zb9%KFRM4b;ojjDImq_85xUv50fNHg1d{Wd9a8POp4eb@75-fDYp_wed4BLCw%AO3mpU9gPKO z;JBV(G$NV7r7b4rgXyct^d}V}!3k_^0dN@{Ew~B@Bcuc=D_d}%AV=}Hk`^YWrs&3o z%ZXMV05NFApW@M!28a$qK%<;R-T2{Vnv_1*m>EA4lSi!8r{=qcg3(U%N&e77H34Y$ zeOlTEyi*TwF0DRJ;b@PCicBcLd5J-8nj&L1Sl|X-a*G(Cxo;V1@$Z1ja{?E)NWTsT zKG1UK)c5aK;k-fyOqP^ax?QPClj|UP$k46$bbh|O9yr-R>KAl=xY6YtV(*g8uGCduLxxNTT(pWewT089Eu3N~a7;@MH> ze9s&TOIAN166a+M5w(4cd-`SVg3;lGmsi=&8@IP-uh;m9&0;&`&Pa$liVM)ndK35Z zz76}L@cZf6=D5pc2X0q0@piA$LONpUvUdJDFreU^G=WVozE+L(3`lF@z|U(eyrJ{a zCaIqJN8Dw{l29LxKakYKt|Sf9f*IRrLeAYKR?=l~?0yf|E?c)E20W%VU=qO3E;-W3 zoi5!O@J&gAcR(8gMDD(;1~0IclUHGZx)9F=lP=Hloe`y82Aum<4hMMG3zR-Q$j;;7=b(RmSbyKa`|z+QVX|GD z{?rO#31iHI9sYY`;lI@i_Ghbz81AbyQV5%KPo+FvQxTJ`Sv@?%`s~8`i_R&t<6&M4 zM6Sf!y?*)v0;~z0qA}I!Z?-4tubpAA-nCMSfYcC*KQsmJN>b*x^Ru0K? z95|$QL#oyYx~h_k=}HHR%bRu+7@(l1-HO;|QQ3QpG&EnZNX4t+e9_9+I|e*GP&alb%|3=_Nh>N6 zfaBAwCoTgMKYuuIx&e1KhXfk(18~G@9I_7t?j;2!Wj!R3mc244*LU5HD|iKW7>s8n z^!4=Kr#p2uVHfPA&I~vp9Wqi1KqM(Frj7hHH~AQtPcqKVpL)p|K?a2UgaLc@cYp|v z6ar-*>ui)cH{xT%YA=b_hPCb-&G^xoYNDkFt(X zyI8d#OyW66J;4K4Kag}0!<-~aLcsB+Mc|u)d+JDDM!q>O#o=gZh6bh6xqRa%{Np1J zrslnFT%(}T`N=@f$`|Lf?sAE+vHr@iugtHJu8b#wr`>r&_#7N&nD^wQm;GHp&zs<%v{R^vPi@j^pl{Ky4@CwF`XJU=RUHHaalkKO@Pz)s-tVLIk{SmfXE-L0%@5`! zthfk+u{)etPQbykEQaj{1WbSQ1#G5d+f1H7!oZsbQ3y!u0gxzzTiHt2-0G?d7?^?g z1j!1d;HJZc#<0{ny=UOo;>rj*!=dwBz#<@!Id#atD@!9kOHQzWPlrgr!~*AHCx`xJ zIeE^kWCF%Fq585+HAyy`JI3yv4?f;Ep0XqL$EG86w70>#XOd4fB$h=vs{MMgrko*( zxtX5RAzkLt*SZvXho>C+XM!)fFOc4dikP%$$BkHmdDHvW3{I2BMh8JR9LlQPpZ6eP zd%`6QLMv(wz;1pDW(T3hPA)D&kZM6@rHnjs4-QaL1}4tkngrF5%v}eDy-88DJi}8T zR0D(GYENqD#-Zu%pFcmL672=u9~?rBLY?AukcrLOo$-V)-U}>ds14xl@(OkfQIi8Y zVLQ{od>_GTabc(+xz}7L2Q(9FP|vP^IUBYHM&&=Nu}Bdk7udAMJlK=X>Y}(lnU)96zYJE?hC65lOT?oLT13<4h#-XhBX0<4DCm&%HJm7^S6?p@(p-8 zWh*74;7o=Ca(+a>z9WUpx&ZFsAd+Reb?X*{)=xuM={A1;L@u&VmvTyJgw0;{M)0EZ zwCh(5eWrd*f7K&J!*c+yUIxJOq_It$Tu3vd*ZZCVA9)EJy)Ib%5t z%>4mMu|ROVei<9vimFH0y+Cw>F)coDkgP!%QvoZTUQ+DK7tis?uD2zF8AhkpjEM8OpY{0sHL?h)DL!B`PLpa~*eoq1)80&T;jsrjF=CuIb*+2}0ej7xub+ZHq@>R!3I z(AoWt&eZ%O&EWHZkaL29Emje=+%|&I$utg*9Ll5m^&wVHwu?2vOL;H9LH$;rdsxJE{kj~kP?~79tSe3SweU_=9Ga2!J+Ox!FjpMk*UafHn znbCgPl0E!ML7u@9|KNpUMK`X~pWfTqdzWJCg}~wlqmwT^9le46oF*l475&=ndS<~9 z(`@%2x}#1`=Kc%<&<9+SP!t2r6@J*~{Xhy2&Mcp-;icnbxpWavk}KRNo+ z>G|hmieWd7&Y>-T_+J?OiHZxJ5pc)$2qh{=5@@0!`Aw$}PdC2$P)Vq_pKqh;vo^v@d~!FL>=v9#d23vc3jS@by@N0!S?Wo0%Vo)EAUg-xj+d$ItWxBbO- zA5j49A3EjZNsM}+B`%5SQL@gLN-O#U3K!8$b~ZM#O?18QTs>Nsq;Cs@CZ@PWp93BA z;~QsN39Qw|r|h-9E}`x}98{kDX2#&cE52S&QG&ZSGh-wxCuiBq3bRd66Zy=5pe9s{ z;MZB$4@Z3vq-hUMo}{D|v*YlP+lQeAmUB9AC9!ab(4?Vyv*JQKMq8o_p3x}m{>lG|fXNcK@) z?BqJzMys7Dt&fMsBsCko1E?3mV`EY!hkiuaw^st zPtpIaLOsYmNjG8s4}k=Z>B5^*9HEe@z@>ZZcO1#*bME?+ll=m{TDn#d8%PXoAA%)E zSe^ON)kz=2!p`b9vp7i}k#*r?meh~$2O>_Mky55Y<~L&shBIihdUvLqg3nTpjT8LX zy0^Bq#R}UJoCY|EXrR(?X@2Do$d(YCAEQeEt#^Z|_5W4gm4{QgwQo^6MWLJqqLb4h zGS9PQ$h;lGmN9B8^E^Z;B_%`bLc~U-ohizcsR)_2lX;%m8DmRkzGtcL4BzkiUDx;j zxBfA``?cQpUGG}!x$paV?z9ui;IvQ(3lZBP&jxrZfHiP|cpX4sw4Z?tU^|mm@S2)| zO0iweoZH<sZ5*PnBDZy zf5__LdT(joo;i;h1+i+Cqwx+c*r$*!kmJ}V4Vx#x%`QW=q-ZUl&#+g4hmyZ=T;8wq zgry+Q)`sLQVP93DU+u<&YXbV zGlX!|)YKtnFf%d#lgfX2Ht`kL{4uX{T#??@dGSZ;Og1jy4CRanhWw6F+^|A&JPxuO z(KQ>LnP~(#5^U1MKwYGE^ol__OL{^&CP=lzkt;CL{mLj&5D^`t(z;o){#-K zMJxP}dC05*9;3R`?MtKl_gR&gT<5WGmoG#tzsbq5!O5X?G6W1r5D%rsQaSV#j~stm z|KN0oP&l+bl4kA}N8QU`LV(1|6;c!kj&=08%bk`l~4n^YpI z;WpjYTG%|x>-TDFAo*d^DO2Rp;IN499*4mvF$QMk6pHgVOEdPr9{JP2o!_43B{3li zi-W_N@6cstxDU`GUY>)==6fsD`k(u@;k&RV_#PEDvtvAa_kS;afoH@+cc?| zY%*chpI~yk_tcMEZpy@swi~GN3%RuI$>$npi5rv7krRr(pRRKxPUYB-tzLvCZ-2a_ zedJWg4ce5GXmW)%dLVZEm(`;WBmcG^yc)fmiQCYn_}C+{zb$wJQ_SkCfi*) zyK8)6!q{=@!r!j^$4)px*uzW2VpAqps7U|9#p`)uQAQW=l4Vl<4l_=?nXQ~ldpW+0 z8D{uQ(^?x5(HDxjW7Vj+Vl{t+hT@M-JbFS(GB4>6h|$?sDj6Hq@ zCnjW!L+}2d|IE!u-{S1LY^SSe^nMWrLCYbbYmFQ6tQG@J0fd&&e;i6CPpDS1uvg=# z59&ENL@qkAp*3xXT{*Qj_|4+Jl^wezxczB5#K>Edfeovr)+ zMUETT_wGCDPiLxM9~zz~$#&+`$`EWTx+=Jjbm2`XT}u1r7I?jn#k%?b)NW+*+FtEc zd6+}+*yDiMnDV=lPi+hbESt^kFBX&Y7hlSJm1*Jmh%Zb^x>VMBU@AScme3tsb(@8y zK5y1~z3 zr1|Gf>+Cx^ty%lb!wl0de}g8w*R12)pm~aevj=J1#K%pE$H_^ns&S{16R|e&n^@Jjxt$F8uJYF6*W_PsVB#D)^YqnkjZ?DbBTJZ>oLh|cP?>!c0HzGHnZjh zDNoWVC)eBS{rL!5FcCOBzM#GR7%n z4%fUNA_KK2U2ekR9|a<@{-@%ooV=~ zB|X-=!Slv5cpZ9Oo(UZFIh0`R(`&dkCUCTs;+kf0T7ji??p;SqXhTVWvz6Yux^$`U zL#ZB;>8F3(k8Zz{(f7K_7v2R}Ppipmj&!$quk~M}zhXqQ*X!2uWQ1TC#$M;!Xq2)j zC5LUzu1*T(!C{le(6GhQ&n?a2jQ%3!gj$=bE#a#NbVDa`2mP-O1;ML=wQy+s&@-jm z*mnmbMh=&VS1QFyChj;_Blhc5Y7~7Aoy7D;Y^2TF!WSFT5H4Vnozu}_P(@onf;4JA zSo0y^_XEs#xybxjhwRYUHsamrAlA6B(O2ZrDmR%H(RcW6^VyR&30_z`D~mcQ3ZEhM z^#(t$U*aruMwvc4;NIV)wP)*#xskap6_wzxLeMmDf#g4wqP+TbLJ79FSGwPDBMmD6 z*Wyu|CtO`!k$DwRw6ImG=`E-7}xI5b%GJx=`I=6Ap0N_S3668msiJq`P$_f?w|!- z$r(#kf+c%lE#cY3N=d2E09g1v|#8|+E6C2 z0}o=#X+O#9ZNx7DnMNYIO$fMFGDg zzU@*DXZ3qQwi_>Kb?e<2x`^9c676RX3;)gTMm>j+9D#NdH(U5As72~9Hcx@>W=hDy z(_+i9>YKKHMO?QrUMXE=Wik(EoY`;s#;!CMdWwbF#FJOrxSkZQp6us$wD94TBlP;6 z&MWCD4qLL^NWvSA~34`y8<>E=xr?oBWV(SEX5ieIdyRH zHBBv{ENy*0E5-VvzSasNn{mNG#rN4m{Y*DaUs3}?50Mixr9ntBFznzRCIVC@NTaJX z1xhc;D%ZvQ&;fIc;AdY{Mz}OCpU7n*$YKdwVfGD54t5W(lPaE90-Ys~Dcd!<7WZUJ zm!@pJT(`S*(Ot48o147MJ;8fR9v8doTCTv@vD2H;d)^tX39C1JOuO#vAs%++L&#bH z`jDqw+lL{=5;a@WF4rcLpXS6xp4(B?-@W7Ni+`Zct1;5?$jx5%sglQQ)`L#jmC3jE zbxI~PdG&v`=~kf~XhP+yiVM$9o2H=a<~AS1l%U zu2?EMl(%o5yga%v5he;RL{&%l-5kp^Gzl|IYSFq*T7kEM(qS|sA{Id932>c2*{=5D zteRA)JiI{395ou&z~LZ2M`U}Io@Y7w5$ripny)cfn*i-B3YHd$=+74A{X?=4WLZJI z)n4}&ZK$dm0GXmOu*g&8=H`MqY_QS*kio~ovV;GeKcyaD-=RPBcGs4do3^z03msRS zJboBMAw54Y-=9#O96#Y*74YdXw2jWl&F817GyCp+)od%RVl^V;2BKf3ECtDkbp4%=l9N_=a|mapwt z6Q0kWt-dYMmKYY^C2pH6>8mq7-x@q(FBPl#th?< za0eOkGT#$om*>jyKC(HoPqLRTM?A3^3aF~PEwOU2ub3ZtVVooJfNVr?UvVSu4Lc@P z0iG#^;JXH7=rW&d_;CdtsMd*+^&o#e4J=g=0uEfDOzU;v{lR@U3i4Q>be!t>^UNUa zxLq-41%cpnZxJ4h(kS3&_eX}Y2&DXAbaeJ(NrXgx5SQEkk6v&z+F^Eu$mRyme=7aaWavAhV7A36eCzPf7B z&BNAs)1Zv(d~tCzE2yFlooWXd4Vk_{jWke#4`KXQ@j1mxL>40;)TNgl>@2lL*koe)1Y{Nj@O)0ZQHr5?f`Y09%^a<-qRDwh zDKIb)RkeWZq?$lFQd$In6#d>K`@mWEowh{Z2XJsjb)`curw-4~nopNAY;0_-Ksk^u zu=lGp0OFTqogoh&rNhdA+d1@-MA4Gfxc6 z>9B8~fPesOk&&&?ZU%Ny&z3?LYtVB^0WJ=uOk9>JU)@ce-@N=Cpup|+IIPQl++u6> zFxX-Z0#q=P;5&<)bV}wb=pdsd7OZQSVAl=YOBE358o>F$Dm4tijzq#_E>d~X?yCUN zjBV>FBrP$_GOYz>6-}3@sg`;kif%;l z8dreuLLGy#0jnsG_}u~FWGL`9kQIeEB5*aZ1cO^}nd~~G=B@&l`vh!f016oc^TUh4 zUqDzM3%z7wWPAr)9tOtR3W%b-LD+}}$Q2+hA0JOjG-Fpz!IJKVK8EORB%|Fpu5@@+ zvwQ58Rd-%JNI*>)-qhdLeedP-u=#ZMmk8BUKcd|~wV)q=JG5X)nn`C8mJdP-pObXK+T z+L)&g;0~Y^P%M^(j(+rbR^rL{FjwDv;MdUqi||LB9v>P~G&fJkG3G&@d(c7}FP}vl z@pk=fq{9j#xszgIZJoS7D?>a6KZ%M*??Gc5HPwfwhNDbR}LPtSFE56PI`naR(GgIPZfM|GbCqdKM+Q=gk&z37fOo9f9@6W*W3FSUFdJ~|fXdoo} zd*esn4s*=>geVgZ^hB~~_EK#hR+sSqCgViZ|KSc#4H_AS;m6}yFXi4C List[float]: + """ + Calculates the score based on a list of predictions and labels. + + Args: + predictions: A list of predictions that the agent system predicts + and returns as its final answer. + labels: A list of ground truth labels from the dataset. + + Returns: + A list of metrics, where each corresponds to the computed score for each prediction. + """ + pass + + +def load_dataset(file_path: str) -> List[Dict[str, Any]]: + """ + Loads in a dataset, with both input and targets, based on a file path. + Any preprocessing, such as adding few-shot examples, must be done in this function. + + Args: + file_path: A string representing the path of the dataset. + + Returns: + A list of dicts, where each dict has 'input' and 'targets' keys + corresponding to the input and ground truth labels, respectively. + + The 'input' should be a string containing the task instruction, + (optional) few-shot contexts, and the actual input data. + + The 'output' can be of any data type. Note that this will be used + in the `compute_metrics` function that benchmark uses. + """ + pass diff --git a/python/packages/autogen-core/samples/adas/utils_drop.py b/python/packages/autogen-core/samples/adas/utils_drop.py index 617797f81156..dec0c2d0f853 100644 --- a/python/packages/autogen-core/samples/adas/utils_drop.py +++ b/python/packages/autogen-core/samples/adas/utils_drop.py @@ -49,8 +49,7 @@ def _normalize_answer(text: str) -> str: """Lower text and remove punctuation, articles and extra whitespace.""" parts = [ - _white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) - for token in _tokenize(text) + _white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) for token in _tokenize(text) ] parts = [part for part in parts if part.strip()] normalized = " ".join(parts).strip() @@ -72,9 +71,7 @@ def _normalize_number(text: str) -> str: return text -def _answer_to_bags( - answer: Union[str, List[str], Tuple[str, ...]] -) -> Tuple[List[str], List[Set[str]]]: +def _answer_to_bags(answer: Union[str, List[str], Tuple[str, ...]]) -> Tuple[List[str], List[Set[str]]]: if isinstance(answer, (list, tuple)): raw_spans = answer else: @@ -116,11 +113,7 @@ def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float: recall = 1.0 else: recall = intersection / float(len(gold_bag)) - f1 = ( - (2 * precision * recall) / (precision + recall) - if not (precision == 0.0 and recall == 0.0) - else 0.0 - ) * 100 + f1 = ((2 * precision * recall) / (precision + recall) if not (precision == 0.0 and recall == 0.0) else 0.0) * 100 return f1 @@ -139,7 +132,7 @@ def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bo def get_drop_metrics( - predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]] + predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]] ) -> Tuple[float, float]: """ Takes a predicted answer and a gold answer (that are both either a string or a list of @@ -174,18 +167,12 @@ def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str elif "date" in answer: return ( tuple( - [ - "{0} {1} {2}".format( - answer["date"]["day"], answer["date"]["month"], answer["date"]["year"] - ).strip() - ] + ["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]).strip()] ), "date", ) else: - raise ValueError( - f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}" - ) + raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}") def answer_json_to_string(answer_json): @@ -239,9 +226,7 @@ def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: for q_idx, res in enumerate(predictions): try: correct_answers = labels[q_idx] - print( - f"extracted_answer {res}, correct_answers {correct_answers}" - ) + print(f"extracted_answer {res}, correct_answers {correct_answers}") em_score, f1_score = compute_drop_metrics(res, correct_answers) except Exception: acc_list.append(0) From 0fc794359cc62d1970ec9901124d5c1eb6c28037 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Dec 2024 10:54:25 -0500 Subject: [PATCH 07/21] Add preliminary results --- .../autogen-core/samples/adas/README.md | 56 +++----- .../autogen-core/samples/adas/adas.py | 5 +- ...gpt3.5_base_agent_results_run_archive.json | 124 ++++++++++++++++++ ..._gpt4o_base_agent_results_run_archive.json | 51 +++++++ 4 files changed, 197 insertions(+), 39 deletions(-) create mode 100644 python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json create mode 100644 python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index f76d09761808..fe4aa0761a33 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -97,7 +97,7 @@ python3 -m venv .venv source .venv/bin/activate # Clone the ADAS repo to be able to use the DROP dataset for the sample. -# Not required if you do not plan on evaluating with DROP, and intend to run with your own dataset / benchmark. +# Recommended to perform a demo, though not required if you do not plan on evaluating with DROP, and intend to run with your own dataset / benchmark. git clone https://github.com/ShengranHu/ADAS.git && cd .. # Install package at latest dev tag @@ -178,14 +178,14 @@ o1-preview is also reported to be great at writing code, and we suggest you try This should be passed as a JSON string to the `meta_agent_model_config` flag. ```bash ---meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://andyye-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": "o1-preview-2024-09-12"}' +--meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": "o1-preview-2024-09-12"}' ``` #### Choose the LLM for the base agents used within the agent system The paper authors use GPT-3.5 (for cost purposes), but we recommend GPT-4o for better quality. This should be passed as a JSON string to the `base_agent_model_config` flag. ```bash ---base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://andyye-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' +--base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' ``` ### Run ADAS ```bash @@ -215,14 +215,16 @@ python3 adas.py --n_generations 100 --max_workers 1 ``` ## Results for DROP benchmark ### Best Agent System that the Meta-Agent discovered -See this [section](#all-agent-systems-that-the-meta-agent-discovered-for-drop) for the full list of discovered Agent Systems. +See the files in the `adas/results` director for the full list of discovered Agent Systems. #### Meta-Agent used o1-preview, and Base Agents used GPT3.5 ``` -TODO +TODO: Testing/optimizations/reruns actively in progress. +See drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json for preliminary findings. ``` #### Meta-Agent used o1-preview, and Base Agents used GPT4.0 ``` -TODO +TODO: Testing/optimizations/reruns actively in progress. +See drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json for preliminary findings. ``` ### Performance with different LLMs @@ -265,8 +267,8 @@ The code for the Agent System compiles with no issue, but the systems hangs duri ``` INFO:autogen_core:Calling message handler for output_result with message type FinalAnswer published by coordinator_agent/default ERROR:autogen_core:Error processing publish message -Traceback (most recent call last): File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 372, in _process_publish agent = await self._get_agent(agent_id) - File "/home/andyye/autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 620, in _get_agent raise LookupError(f"Agent with name {agent_id.type} not found.") +Traceback (most recent call last): File "/home//autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 372, in _process_publish agent = await self._get_agent(agent_id) + File "/home//autogen/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py", line 620, in _get_agent raise LookupError(f"Agent with name {agent_id.type} not found.") LookupError: Agent with name output_result not found. ``` The easiest solution is to terminate the program with `Ctrl + \` command, and then rerun the `adas.py` script. @@ -287,15 +289,15 @@ The reason for this is unknown. If you see something like this during execution of the Agent System code, it should be fine. ``` INFO:autogen_core:Calling message handler for reasoning_agent with message type Question published by Unknown -ERROR:asyncio:Task exception was never retrieved future: exception=RuntimeError('Event loop is closed')> -Traceback (most recent call last): File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpx/_client.py", line 2031, in aclose await self._transport.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpx/_transports/default.py", line 389, in aclose await self._pool.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 313, in aclose await self._close_connections(closing_connections) File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 305, in _close_connections await connection.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection.py", line 171, in aclose await self._connection.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/http11.py", line 265, in aclose await self._network_stream.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 55, in aclose await self._stream.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/anyio/streams/tls.py", line 202, in aclose await self.transport_stream.aclose() - File "/home/andyye/autogen/python/.venv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 1202, in aclose self._transport.close() +ERROR:asyncio:Task exception was never retrieved future: /autogen/python/.venv/lib/python3.10/site-packages/httpx/_client.py:2024> exception=RuntimeError('Event loop is closed')> +Traceback (most recent call last): File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpx/_client.py", line 2031, in aclose await self._transport.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpx/_transports/default.py", line 389, in aclose await self._pool.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 313, in aclose await self._close_connections(closing_connections) File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 305, in _close_connections await connection.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/connection.py", line 171, in aclose await self._connection.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpcore/_async/http11.py", line 265, in aclose await self._network_stream.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 55, in aclose await self._stream.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/anyio/streams/tls.py", line 202, in aclose await self.transport_stream.aclose() + File "/home//autogen/python/.venv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 1202, in aclose self._transport.close() File "/usr/lib/python3.10/asyncio/selector_events.py", line 706, in close self._loop.call_soon(self._call_connection_lost, None) File "/usr/lib/python3.10/asyncio/base_events.py", line 753, in call_soon self._check_closed() File "/usr/lib/python3.10/asyncio/base_events.py", line 515, in _check_closed raise RuntimeError('Event loop is closed') @@ -315,23 +317,3 @@ The reason for this is unknown. - Finish adding Quality-Diversity, Role_Assignment, and Take_A_Step_Back Agent Systems to the archive - Improve prompts to the meta-agent to reduce code errors - Add extra_create_args options such as `temperature`, `max_completion_tokens`, `top_p` in the model client `create()`. i.e. `extra_create_args={"temperature": 0.0}` - -## Appendix - -### All Agent Systems that the Meta-Agent discovered for DROP - -#### Meta-Agent used o1-preview, and Base Agents used GPT3.5 -``` -TODO -``` -#### Meta-Agent used o1-preview, and Base Agents used GPT4.0 - -Tree of Thought -``` - -``` - -Tree of Thought Imp -``` - -``` \ No newline at end of file diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index 8308a3653935..9f2e10bf8b3e 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -1,7 +1,8 @@ """ -To run, type -`python packages/autogen-core/samples/common/adas/adas.py --data_filename=` +ADAS implementation in AutoGen. +This script uses a meta-agent to search for novel agent +systems. Please read the README.md for more information. """ import argparse diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json new file mode 100644 index 000000000000..07ecaebda239 --- /dev/null +++ b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json @@ -0,0 +1,124 @@ +[ + { + "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", + "name": "Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n # Define message types as data classes\n @dataclass\n class ChainOfThoughtTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n # Define the Chain-of-Thought Agent\n class ChainOfThoughtAgent(RoutedAgent):\n def __init__(self, description: str,\n model_client: ChatCompletionClient,\n system_prompt: str,\n instruction: str,\n ) -> None:\n super().__init__(description)\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=system_prompt,\n )\n ]\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> None:\n\n logging.info(f\"{self._description} received message: {message.task}\")\n user_prompt = message.task + \"\\n\" + self._instruction\n msgs = self._system_messages + [UserMessage(content=user_prompt, source=self.metadata[\"type\"])]\n model_result = await self._model_client.create(msgs)\n assert isinstance(model_result.content, str)\n\n await self.publish_message(\n message=FinalResult(model_result.content),\n topic_id=DefaultTopicId(),\n )\n\n\n # Define the main function to set up and run the agent system\n async def main():\n\n # Create a queue to collect final answer\n queue = asyncio.Queue[FinalResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the chain-of-thought agent\n agent_id = AgentId(\"COTAgent\", \"default\")\n cot_instruction = \"Please think step by step and then solve the task.\"\n await ChainOfThoughtAgent.register(\n runtime, \"COTAgent\", lambda: ChainOfThoughtAgent(\n description='Chain-of-Thought Agent',\n model_client=model_client,\n system_prompt=\"You are a helpful assistant. Directly answer the question. Keep it very concise.\",\n instruction=cot_instruction,\n )\n )\n # Create closure agent to collect final output result\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [DefaultSubscription()])\n\n # Start the runtime, and publish the first message\n runtime.start()\n initial_message = ChainOfThoughtTask(task=task)\n await runtime.send_message(initial_message, agent_id) # publish_message\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (25.1%, 28.4%), Median: 35.2%" + }, + { + "thought": "While an LLM can arrive at the correct answer, its reasoning may vary. By repeatedly asking the same question with high temperature settings, we can generate different reasoning paths. We then combine multiple answers from these Chain-of-Thought (CoT) agents to produce a more accurate final answer through ensembling.", + "name": "Self-Consistency with Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from typing import List\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WorkerTask:\n task: str\n previous_results: List[str]\n\n\n @dataclass\n class WorkerTaskResult:\n result: str\n\n\n @dataclass\n class UserTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n class WorkerAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(description=\"Worker Agent\")\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n user_prompt = message.task + \"\\n\" + self._instruction\n\n if message.previous_results:\n # If previous results are provided, we need to synthesize them to create a single prompt.\n # system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n system_prompt = \"Given all the solutions, reason over them carefully and provide a final answer.\"\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n model_result = await self._model_client.create(\n [SystemMessage(system_prompt), UserMessage(content=user_prompt, source=\"user\")]\n )\n else:\n # If no previous results are provided, we can simply pass the user query to the model.\n model_result = await self._model_client.create([UserMessage(content=user_prompt, source=\"user\")])\n assert isinstance(model_result.content, str)\n print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n return WorkerTaskResult(result=model_result.content)\n\n\n class OrchestratorAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n worker_agent_types: List[str],\n num_layers: int,\n ) -> None:\n super().__init__(description=\"Aggregator Agent\")\n self._model_client = model_client\n self._worker_agent_types = worker_agent_types\n self._num_layers = num_layers\n\n\n @message_handler\n async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n # Create task for the first layer.\n worker_task = WorkerTask(task=message.task, previous_results=[])\n # Iterate over layers.\n for i in range(self._num_layers):\n # Assign workers for this layer.\n worker_ids = [\n AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n for j, worker_type in enumerate(self._worker_agent_types)\n ]\n # Dispatch tasks to workers.\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n # Prepare task for the next layer.\n worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n # Perform final aggregation.\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n # system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n system_prompt = \"Given all the above solutions, reason over them carefully and provide a final answer.\"\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n model_result = await self._model_client.create(\n [SystemMessage(system_prompt), UserMessage(content=message.task, source=\"user\")]\n )\n assert isinstance(model_result.content, str)\n return FinalResult(result=model_result.content)\n\n # Define the main function to set up and run the agent system\n async def main():\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the agents\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"worker\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n await OrchestratorAgent.register(\n runtime,\n \"orchestrator\",\n lambda: OrchestratorAgent(\n model_client=model_client, worker_agent_types=[\"worker\"] * 5, num_layers=1\n ),\n )\n\n # Start the runtime, and publish the first message\n runtime.start()\n result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n\n # Return the result\n return result.result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (11.9%, 13.5%), Median: 17.1%" + }, + { + "thought": "To enhance its performance, an LLM can iteratively improve its answer based on feedback. By reflecting on its previous attempts and incorporating feedback, the model can refine its reasoning and provide a more accurate solution.", + "name": "Self-Refine (Reflexion)", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WritingTask:\n task: str\n\n\n @dataclass\n class WritingResult:\n task: str\n answer: str\n review: str\n\n\n @dataclass\n class ReviewTask:\n session_id: str\n writing_task: str\n answer_scratchpad: str\n answer: str\n\n\n @dataclass\n class ReviewResult:\n review: str\n session_id: str\n approved: bool\n\n\n @default_subscription\n class WorkerAgent(RoutedAgent):\n \"An agent that performs writing tasks.\"\n\n def __init__(self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(\"A helpful assistant\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a helpful assistant. Work with the critic to improve your answer.\n Make sure to directly answer the question. Keep it very concise.\n Respond using the following format:\n\n Thoughts: \n Answer: \n \"\"\",\n )\n ]\n self._model_client = model_client\n self._session_memory: Dict[str, List[WritingTask | ReviewTask | ReviewResult]] = {}\n self._instruction = instruction\n\n @message_handler\n async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) -> None:\n # Store the messages in a temporary memory for this request only.\n session_id = str(uuid.uuid4())\n self._session_memory.setdefault(session_id, []).append(message)\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=message.task + self._instruction, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n )\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n if answer is None:\n raise ValueError(\"Answer not found.\")\n # Create a review task.\n review_task = ReviewTask(\n session_id=session_id,\n writing_task=message.task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[session_id].append(review_task)\n # Publish a review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n @message_handler\n async def handle_review_result(self, message: ReviewResult, ctx: MessageContext) -> None:\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(message)\n # Obtain the request from previous messages.\n review_request = next(\n m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewTask)\n )\n assert review_request is not None\n # Check if the is approved.\n if message.approved:\n # Publish the writing result.\n await self.publish_message(\n WritingResult(\n answer=review_request.answer,\n task=review_request.writing_task,\n review=message.review,\n ),\n topic_id=TopicId(\"result\", self.id.key),\n )\n print(\"Writing Result:\")\n print(\"-\" * 80)\n print(f\"Task:\\n{review_request.writing_task}\")\n print(\"-\" * 80)\n print(f\"Answer:\\n{review_request.answer}\")\n print(\"-\" * 80)\n print(f\"Review:\\n{message.review}\")\n print(\"-\" * 80)\n else:\n # Create a list of LLM messages to send to the model.\n messages: List[LLMMessage] = [*self._system_messages]\n for m in self._session_memory[message.session_id]:\n if isinstance(m, ReviewResult):\n messages.append(UserMessage(content=m.review, source=\"Reviewer\"))\n elif isinstance(m, ReviewTask):\n messages.append(AssistantMessage(content=m.answer_scratchpad, source=\"Worker\"))\n elif isinstance(m, WritingTask):\n messages.append(UserMessage(content=m.task, source=\"User\"))\n else:\n raise ValueError(f\"Unexpected message type: {m}\")\n # Generate a revision using the chat completion API.\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n if answer is None:\n raise ValueError(\"Answer not found.\")\n # Create a new review task.\n review_task = ReviewTask(\n session_id=message.session_id,\n writing_task=review_request.writing_task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[message.session_id].append(review_task)\n # Publish a new review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n\n def _extract_answer(self, text: str) -> Union[str, None]:\n pattern = \"(?<=Answer: ).*\"\n # Search for the pattern in the markdown text\n match = re.search(pattern, text, re.DOTALL)\n # Extract the language and code block if a match is found\n if match:\n return match.group(0)\n return None\n\n @default_subscription\n class ReviewerAgent(RoutedAgent):\n \"\"\"An agent that critiques tasks.\"\"\"\n\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"A critic agent.\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a critic. Review answers and criticize on where it might be wrong.\n Respond using the following JSON format:\n {\n \"correctness\": \"\",\n \"approval\": \"\",\n \"suggested_changes\": \"\"\n }\n \"\"\",\n )\n ]\n self._session_memory: Dict[str, List[ReviewTask | ReviewResult]] = {}\n self._model_client = model_client\n\n @message_handler\n async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> None:\n # Format the prompt for the review.\n # Gather the previous feedback if available.\n previous_feedback = \"\"\n if message.session_id in self._session_memory:\n previous_review = next(\n (m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewResult)),\n None,\n )\n if previous_review is not None:\n previous_feedback = previous_review.review\n # Store the messages in a temporary memory for this request only.\n self._session_memory.setdefault(message.session_id, []).append(message)\n prompt = f\"\"\"The problem statement is: {message.writing_task}\n The answer is:\n ```\n {message.answer}\n ```\n\n Previous feedback:\n {previous_feedback}\n\n Please review the answer. If previous feedback was provided, see if it was addressed.\n \"\"\"\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=prompt, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n json_output=True,\n )\n assert isinstance(response.content, str)\n # TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.\n # Parse the response JSON.\n review = json.loads(response.content)\n # Construct the review text.\n review_text = \"Review:\\n\" + \"\\n\".join([f\"{k}: {v}\" for k, v in review.items()])\n approved = review[\"approval\"].lower().strip() == \"approve\"\n result = ReviewResult(\n review=review_text,\n session_id=message.session_id,\n approved=approved,\n )\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(result)\n # Publish the review result.\n await self.publish_message(result, topic_id=TopicId(\"default\", self.id.key))\n\n\n # Define the main function to set up and run the agent system\n async def main():\n # Create a queue to collect final answer\n queue = asyncio.Queue[WritingResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create agents\n await ReviewerAgent.register(\n runtime, \"ReviewerAgent\", lambda: ReviewerAgent(model_client=model_client)\n )\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"WorkerAgent\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n # Create closure agent to collect final output result\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime, and publish the first message\n runtime.start()\n await runtime.publish_message(\n message=WritingTask(task=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).answer\n \n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (17.7%, 21.4%), Median: 29.3%" + }, + { + "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", + "name": "LLM Debate", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n \n @dataclass\n class Question:\n content: str\n\n\n @dataclass\n class Answer:\n content: str\n\n\n @dataclass\n class SolverRequest:\n content: str\n question: str\n\n\n @dataclass\n class IntermediateSolverResponse:\n content: str\n question: str\n answer: str\n round: int\n\n\n @dataclass\n class FinalSolverResponse:\n answer: str\n\n @default_subscription\n class Solver(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n super().__init__(\"A debator.\")\n self._topic_type = topic_type\n self._model_client = model_client\n self._num_neighbors = num_neighbors\n self._history: List[LLMMessage] = []\n self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n self._system_messages = [\n SystemMessage(\n (\n \"You are a helpful assistant with expertise in reasoning. \"\n \"Your task is to assist in solving a reasoning problem by providing \"\n \"a clear and detailed solution. Limit your output within 100 words, \"\n \"and your final answer should be a single string.\"\n )\n )\n ]\n self._round = 0\n self._max_round = max_round\n\n @message_handler\n async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n # Add the question to the memory.\n self._history.append(UserMessage(content=message.content, source=\"user\"))\n # Make an inference using the model.\n model_result = await self._model_client.create(self._system_messages + self._history)\n assert isinstance(model_result.content, str)\n # Add the response to the memory.\n self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n # Increment the counter.\n self._round += 1\n if self._round == self._max_round:\n # If the counter reaches the maximum round, publishes a final response.\n await self.publish_message(FinalSolverResponse(answer=model_result.content), topic_id=DefaultTopicId())\n else:\n # Publish intermediate response to the topic associated with this solver.\n print(\"publish IntermediateSolverResponse\")\n await self.publish_message(\n IntermediateSolverResponse(\n content=model_result.content,\n question=message.question,\n answer=model_result.content,\n round=self._round,\n ),\n topic_id=DefaultTopicId(type=self._topic_type),\n )\n\n @message_handler\n async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n # Add neighbor's response to the buffer.\n self._buffer.setdefault(message.round, []).append(message)\n # Check if all neighbors have responded.\n if len(self._buffer[message.round]) == self._num_neighbors:\n print(\n f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n )\n # Prepare the prompt for the next question.\n prompt = \"These are the solutions to the problem from other agents:\\n\"\n for resp in self._buffer[message.round]:\n prompt += f\"One agent solution: {resp.content}\\n\"\n prompt += (\n \"Using the solutions from other agents as additional information, \"\n \"can you provide your answer to the problem? \"\n f\"The original problem is {message.question}. \"\n \"Your final answer should be a single string.\"\n )\n # Send the question to the agent itself to solve.\n await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n # Clear the buffer.\n self._buffer.pop(message.round)\n\n\n @default_subscription\n class Aggregator(RoutedAgent):\n def __init__(self, num_solvers: int) -> None:\n super().__init__(\"Aggregator\")\n self._num_solvers = num_solvers\n self._buffer: List[FinalSolverResponse] = []\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n prompt = (\n f\"Can you solve the following problem?\\n{message.content}\\n\"\n \"Explain your reasoning. Your final answer should be a single string.\"\n )\n print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n\n @message_handler\n async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n self._buffer.append(message)\n if len(self._buffer) == self._num_solvers:\n print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n # Find the majority answer.\n answers = [resp.answer for resp in self._buffer]\n majority_answer = max(set(answers), key=answers.count)\n # Publish the aggregated response.\n await self.publish_message(Answer(content=majority_answer), topic_id=TopicId(\"result\", self.id.key))\n # Clear the responses.\n self._buffer.clear()\n print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")\n\n\n # Define the main function to set up and run the agent system\n async def main():\n queue = asyncio.Queue[Answer]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n await Solver.register(\n runtime,\n \"SolverA\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverA\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverB\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverB\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverC\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverC\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverD\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverD\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Aggregator.register(runtime, \"Aggregator\", lambda: Aggregator(num_solvers=4))\n\n # Subscriptions for topic published to by SolverA.\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverD\"))\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverB\"))\n\n # Subscriptions for topic published to by SolverB.\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverA\"))\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverC\"))\n\n # Subscriptions for topic published to by SolverC.\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverB\"))\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverD\"))\n\n # Subscriptions for topic published to by SolverD.\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverC\"))\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverA\"))\n\n # All solvers and the aggregator subscribe to the default topic.\n\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n res = (await queue.get()).content\n print(f\"res {res}\")\n return res\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (48.7%, 53.4%), Median: 62.5%" + }, + { + "thought": "**Insights:**\nTo improve the agent's ability to handle discrete reasoning tasks like DROP, integrating step-by-step reasoning with tool usage is beneficial. The ReAct framework combines reasoning and action, allowing the agent to think critically and perform necessary computations or information retrieval when required.\n\n**Overall Idea:**\nDevelop a ReAct agent that utilizes chain-of-thought reasoning and can interact with tools to perform actions such as calculations. This agent will iteratively generate thoughts and actions. When an action is needed (e.g., a calculation), it communicates with a ToolAgent to perform the task. This approach allows the agent to handle complex reasoning steps and computations effectively, leading to more accurate answers.\n\n**Implementation:**\n- **ReActAgent**: Handles reasoning and decision-making. It processes the user task, generates thoughts, decides when to perform actions, and formulates the final answer.\n - Initializes with a system prompt guiding it to use 'Thought:', 'Action:', 'Observation:', and 'Answer:' statements.\n - Uses a loop to iteratively generate responses and check for actions or the final answer.\n - Communicates with the ToolAgent when an action is needed.\n - Publishes the final answer to the correct topic.\n- **ToolAgent**: Executes actions requested by the ReActAgent, such as calculations.\n - Contains a set of available tools (e.g., a calculator function).\n - Processes action requests and returns observations back to the ReActAgent.\n - Ensures safe evaluation of expressions.\n- **ClosureAgent**: Collects the final answer from the ReActAgent and returns it as the output.\n- **Main Function**: Sets up the agents, starts the runtime, publishes the initial user task, and retrieves the final answer from the queue.\n- Ensure proper use of topics and subscriptions to avoid conflicts and adhere to the correct implementation patterns.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import (\n RoutedAgent,\n default_subscription,\n message_handler,\n ClosureAgent,\n DefaultTopicId,\n TypeSubscription,\n )\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n import ast\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class UserTask:\n task: str\n\n @dataclass\n class Action:\n action: str\n input: str\n\n @dataclass\n class Observation:\n content: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class ReActAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"ReAct Agent\")\n self._model_client = model_client\n self._messages: List[LLMMessage] = [\n SystemMessage(\n \"You are a helpful assistant that uses reasoning and actions to solve tasks. Use 'Thought:', 'Action:', 'Observation:', and 'Answer.' to structure your reasoning. Finish with 'Answer.'\"\n )\n ]\n self._max_steps = 5\n\n @message_handler\n async def handle_user_task(self, message: UserTask, ctx: MessageContext) -> None:\n if message.task:\n self._messages.append(UserMessage(content=message.task, source=\"user\"))\n for _ in range(self._max_steps):\n response = await self._model_client.create(\n self._messages, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n self._messages.append(\n AssistantMessage(content=response.content, source=self.metadata[\"type\"])\n )\n\n # Check for 'Answer:'\n if \"Answer:\" in response.content:\n # Extract the final answer\n answer = response.content.split(\"Answer:\")[-1].strip()\n await self.publish_message(\n FinalAnswer(answer=answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n return\n # Check for 'Action:'\n action_match = re.search(r\"Action:\\s*(.*)\", response.content)\n if action_match:\n action_content = action_match.group(1).strip()\n # Extract action name and input\n action_name_match = re.match(r\"\\[(.*?)\\]\\s*Input:\\s*(.*)\", action_content)\n if action_name_match:\n action_name = action_name_match.group(1).strip()\n action_input = action_name_match.group(2).strip()\n # Send action to ToolAgent\n await self.send_message(\n Action(action=action_name, input=action_input), AgentId(\"tool_agent\", self.id.key)\n )\n return # Wait for observation\n # If max steps reached without answer, publish failure\n await self.publish_message(\n FinalAnswer(answer=\"Failed to find an answer within max steps.\"),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n @message_handler\n async def handle_observation(self, message: Observation, ctx: MessageContext) -> None:\n # Add the observation to the conversation\n self._messages.append(\n AssistantMessage(content=f\"Observation: {message.content}\", source=self.metadata[\"type\"])\n )\n # Continue the reasoning loop\n await self.handle_user_task(UserTask(task=\"\"), ctx)\n\n @default_subscription\n class ToolAgent(RoutedAgent):\n def __init__(self) -> None:\n super().__init__(\"Tool Agent\")\n # Define available tools\n self._tools = {\"Calculator\": self.calculate}\n\n async def calculate(self, expression: str) -> str:\n try:\n # Safely evaluate the expression\n result = str(ast.literal_eval(expression))\n return result\n except Exception as e:\n return f\"Error: {e}\"\n\n @message_handler\n async def handle_action(self, message: Action, ctx: MessageContext) -> None:\n action = message.action\n input_str = message.input\n if action in self._tools:\n result = await self._tools[action](input_str)\n else:\n result = f\"Unknown action: {action}\"\n # Send back the observation to the ReActAgent\n await self.send_message(\n Observation(content=result), ctx.sender\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await ToolAgent.register(runtime, \"tool_agent\", lambda: ToolAgent())\n await ReActAgent.register(\n runtime,\n \"react_agent\",\n lambda: ReActAgent(\n model_client=model_client\n ),\n )\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n await runtime.publish_message(UserTask(task=task), topic_id=DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (23.6%, 27.2%), Median: 34.5%", + "generation": 4 + }, + { + "thought": "**Insights:**\n\nTo improve performance on the DROP benchmark, we can design an agent that actively retrieves relevant information from the passage in an iterative manner. By refining its understanding of what information is needed, the agent can perform more precise reasoning.\n\n**Overall Idea:**\n\nCreate an 'Active Retrieval Agent' that interacts with a 'Retriever Agent'. The agent processes the question, identifies what information is needed, requests specific information from the retriever, and uses the retrieved information to generate an answer. This iterative process continues until the agent is confident in its answer. This approach can help the agent focus on the most relevant parts of the passage, improving comprehension and reasoning.\n\n**Implementation:**\n\n1. Define a `Question` message containing the passage and question.\n2. Implement an `ActiveRetrievalAgent` that handles the `Question` message.\n3. The agent identifies needed information and sends `RetrievalRequest` messages to a `RetrieverAgent`.\n4. The `RetrieverAgent` processes the requests and returns `RetrievalResponse` messages with relevant information extracted from the passage.\n5. The `ActiveRetrievalAgent` uses the retrieved information to update its understanding and decides whether to make further retrieval requests or generate the final answer.\n6. The agent publishes the final answer to a result topic that a `ClosureAgent` subscribes to.\n7. Ensure proper subscriptions and message handling to avoid implementation mistakes.", + "name": "Active Retrieval Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import MessageContext, AgentId, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription, ClosureAgent, DefaultTopicId\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components.models import (\n SystemMessage,\n UserMessage,\n ChatCompletionClient,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class Question:\n passage: str\n question: str\n\n @dataclass\n class RetrievalRequest:\n query: str\n\n @dataclass\n class RetrievalResponse:\n information: str\n\n @dataclass\n class AnswerMessage:\n answer: str\n\n @default_subscription\n class RetrieverAgent(RoutedAgent):\n def __init__(self, passage: str) -> None:\n super().__init__(\"Retriever Agent\")\n self._passage = passage\n\n @message_handler\n async def handle_retrieval_request(self, message: RetrievalRequest, ctx: MessageContext) -> None:\n # Simple retrieval implementation: return sentences containing the query words\n relevant_info = '\\n'.join([sent.strip() for sent in self._passage.split('.') if any(word.lower() in sent.lower() for word in message.query.split())])\n await self.publish_message(RetrievalResponse(information=relevant_info), topic_id=TopicId(\"retrieval_response\", ctx.sender.key))\n\n @default_subscription\n class ActiveRetrievalAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"Active Retrieval Agent\")\n self._model_client = model_client\n self._retrieved_info = \"\"\n self._question = \"\"\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n self._question = message.question\n # Initial retrieval request\n retrieval_request = RetrievalRequest(query=message.question)\n await self.publish_message(retrieval_request, topic_id=TopicId(\"retrieval_request\", self.id.key))\n\n @message_handler\n async def handle_retrieval_response(self, message: RetrievalResponse, ctx: MessageContext) -> None:\n self._retrieved_info += message.information + '\\n'\n # Decide whether to retrieve more or generate answer\n system_message = SystemMessage(\"You are a helpful assistant that answers questions based on the retrieved information.\")\n prompt = f\"Question: {self._question}\\nRetrieved Information: {self._retrieved_info}\\nDo you have enough information to answer the question? If not, specify what additional information you need.\"\n messages = [system_message, UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n if \"Yes\" in response.content or \"I have enough\" in response.content:\n # Generate final answer\n prompt = f\"Based on the retrieved information, answer the question: {self._question}\"\n messages = [system_message, UserMessage(content=prompt, source=\"user\")]\n final_response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(final_response.content, str)\n await self.publish_message(AnswerMessage(answer=final_response.content.strip()), topic_id=TopicId(\"result\", \"output_result\"))\n else:\n # Extract what information is needed and make another retrieval request\n needed_info = response.content.strip()\n retrieval_request = RetrievalRequest(query=needed_info)\n await self.publish_message(retrieval_request, topic_id=TopicId(\"retrieval_request\", self.id.key))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: AnswerMessage, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from task\n passage_and_question = task\n # Assume the task is a string containing 'Passage:\\n...\\nQuestion:\\n...'\n parts = passage_and_question.strip().split('Question:')\n passage = parts[0].replace('Passage:', '').strip()\n question = parts[1].strip()\n\n await RetrieverAgent.register(runtime, \"retriever_agent\", lambda: RetrieverAgent(passage=passage))\n await ActiveRetrievalAgent.register(runtime, \"active_retrieval_agent\", lambda: ActiveRetrievalAgent(model_client=model_client))\n\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Subscriptions\n await runtime.add_subscription(TypeSubscription(\"retrieval_request\", \"retriever_agent\"))\n await runtime.add_subscription(TypeSubscription(\"retrieval_response\", \"active_retrieval_agent\"))\n\n runtime.start()\n await runtime.publish_message(Question(passage=passage, question=question), topic_id=DefaultTopicId())\n await runtime.stop_when_idle()\n return (await queue.get()).answer\n\n return asyncio.run(main())\n", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 0.1%), Median: 0.3%", + "generation": 7 + }, + { + "thought": "**Insights:**\nThe DROP benchmark requires complex reasoning and careful comprehension, which can benefit from systematically exploring multiple reasoning paths.\nImplementing a Tree-of-Thought agent that effectively generates, evaluates, and selects reasoning paths can enhance performance.\n\n**Overall Idea:**\nDevelop a Tree-of-Thought Agent that uses beam search to explore multiple reasoning paths.\nAt each step, the agent will generate several possible continuations (thoughts) using the LLM, evaluate them using the LLM to score their relevance and correctness, and select the top candidates to expand further.\nThis process continues until a maximum depth is reached or a satisfactory answer is found.\nFinally, the agent extracts the final answer from the best reasoning path.\nThe agent starts by receiving the task and ends by returning the final answer.\n\n**Implementation:**\n- Create a `TreeOfThoughtAgent` that manages the tree search process.\n- Use the LLM to generate multiple continuations at each node, based on the beam width.\n- Implement an `evaluate_state` method that uses the LLM to score each reasoning path.\n- Ensure that the topic IDs and subscriptions align correctly to enable proper message passing.\n- Use consistent topic management throughout the implementation.", + "name": "Tree-of-Thought Agent", + "code": "def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n import json\n import logging\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentRuntime, AgentId, MessageContext, TopicId\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from typing import Any, List\n from dataclasses import dataclass, field\n import heapq\n\n # Set up logging\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(\"tree_of_thought_agent\")\n\n # Create the token provider for Azure OpenAI\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass(order=True)\n class PrioritizedItem:\n priority: float\n item: Any = field(compare=False)\n\n @dataclass\n class Task:\n content: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class TreeOfThoughtAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, beam_width: int, max_depth: int):\n super().__init__(\"Tree of Thought Agent\")\n self._model_client = model_client\n self._beam_width = beam_width\n self._max_depth = max_depth\n self._system_message = SystemMessage(\n \"You are an expert reasoning assistant. You should think step by step to solve the problem.\"\n )\n\n @message_handler\n async def handle_task(self, message: Task, ctx: MessageContext) -> None:\n logger.info(f\"Received task: {message.content}\")\n initial_state = \"\"\n frontier = [PrioritizedItem(priority=0, item=(initial_state, 0))]\n completed_paths = []\n\n while frontier:\n next_frontier = []\n # Explore the most promising nodes\n for _ in range(min(len(frontier), self._beam_width)):\n node = heapq.heappop(frontier)\n state, depth = node.item\n logger.info(f\"Expanding node at depth {depth} with state:\\n{state}\")\n if depth >= self._max_depth:\n completed_paths.append((state, -node.priority))\n continue\n # Expand the node by generating continuations\n continuations = await self.expand_state(message.content, state)\n for cont in continuations:\n new_state = state + cont[\"text\"] + \"\\n\"\n score = await self.evaluate_state(message.content, new_state)\n heapq.heappush(next_frontier, PrioritizedItem(priority=-score, item=(new_state, depth + 1)))\n if not next_frontier:\n break\n frontier = next_frontier\n # Select the best completed path\n if completed_paths:\n best_path = max(completed_paths, key=lambda x: x[1])[0]\n else:\n best_node = max(frontier, key=lambda x: x.priority)\n best_path = best_node.item[0]\n logger.info(f\"Best reasoning path:\\n{best_path}\")\n # Extract the final answer from the best reasoning path\n final_answer = await self.extract_answer(message.content, best_path)\n # Publish the final answer\n await self.publish_message(Answer(content=final_answer), topic_id=TopicId(\"result\", self.id.key))\n\n async def expand_state(self, task_content: str, state: str) -> List[dict]:\n # Use the model to generate multiple continuations\n prompt = [\n self._system_message,\n UserMessage(content=task_content, source=\"user\"),\n AssistantMessage(content=state, source=\"assistant\"),\n UserMessage(content=\"What are the possible next steps? Provide several possible continuations.\", source=\"user\")\n ]\n try:\n model_result = await self._model_client.create(prompt)\n response = model_result.content.strip()\n # Assume the model returns continuations separated by newline\n continuations = [\n {\"text\": cont.strip()} for cont in response.split(\"\\n\") if cont.strip()\n ]\n logger.info(f\"Generated {len(continuations)} continuations\")\n return continuations[:self._beam_width]\n except Exception as e:\n logger.error(f\"Error during state expansion: {e}\")\n return []\n\n async def evaluate_state(self, task_content: str, state: str) -> float:\n # Use the model to evaluate the state\n prompt = [\n self._system_message,\n UserMessage(content=task_content, source=\"user\"),\n AssistantMessage(content=state, source=\"assistant\"),\n UserMessage(content=\"Please rate the above reasoning on a scale from 1 to 10, where 10 is excellent.\", source=\"user\")\n ]\n try:\n model_result = await self._model_client.create(prompt)\n score_text = model_result.content.strip()\n score = float(score_text)\n logger.info(f\"Evaluated state with score: {score}\")\n return score\n except Exception as e:\n logger.error(f\"Error during state evaluation: {e}\")\n # Return a neutral score in case of error\n return 5.0\n\n async def extract_answer(self, task_content: str, reasoning_path: str) -> str:\n # Ask the model to produce the final answer based on the reasoning path\n prompt = [\n self._system_message,\n UserMessage(content=task_content, source=\"user\"),\n AssistantMessage(content=reasoning_path, source=\"assistant\"),\n UserMessage(content=\"Based on the above reasoning, what is the final answer?\", source=\"user\")\n ]\n try:\n model_result = await self._model_client.create(prompt)\n final_answer = model_result.content.strip()\n logger.info(f\"Extracted final answer: {final_answer}\")\n return final_answer\n except Exception as e:\n logger.error(f\"Error during answer extraction: {e}\")\n return \"Unable to generate an answer.\"\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register the TreeOfThoughtAgent\n await TreeOfThoughtAgent.register(runtime, \"tree_of_thought_agent\", lambda: TreeOfThoughtAgent(\n model_client=model_client,\n beam_width=3,\n max_depth=3\n ))\n\n # Register the ClosureAgent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n runtime.start()\n\n # Publish the initial task\n await runtime.publish_message(Task(content=task), topic_id=DefaultTopicId())\n\n # Wait until the runtime is idle\n await runtime.stop_when_idle()\n\n # Retrieve and return the final answer\n answer = (await queue.get()).content\n logger.info(f\"Final Answer: {answer}\")\n return answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (7.8%, 9.2%), Median: 11.9%", + "generation": 8 + }, + { + "thought": "**Insights:**\n\nThe DROP benchmark requires complex discrete reasoning over passages, which often involves handling numerical values, dates, and entities. By extracting structured information from the passage, we can enable more precise reasoning. Incorporating a knowledge extraction step can improve the agent's ability to handle complex questions.\n\n**Overall Idea:**\n\nI propose a \"Knowledge Extraction Agent\" architecture that consists of a \"ParserAgent\" and a \"ReasoningAgent\". The \"ParserAgent\" extracts structured information from the passage, such as entities, numbers, and their relationships. The \"ReasoningAgent\" then uses this structured data along with the question to perform detailed reasoning and generate the final answer.\n\n**Implementation:**\n\n- Implement a `ParserAgent` that, upon receiving a passage, uses the model to extract structured data and returns it in JSON format.\n- Implement a `ReasoningAgent` that takes the structured data and the question, and performs step-by-step reasoning to arrive at the answer.\n- Use a `CoordinatorAgent` to manage the flow: it sends the passage to the `ParserAgent`, then sends the structured data and question to the `ReasoningAgent`, and finally publishes the answer.\n- Use a `ClosureAgent` to collect the final answer and return it.\n- Ensure proper agent registration and subscriptions, and follow best practices for message passing and agent communication.", + "name": "Knowledge Extraction Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n from dataclasses import dataclass\n from typing import Any, Dict\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n\n # Create the model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class Passage:\n content: str\n\n @dataclass\n class StructuredData:\n data: Dict[str, Any]\n\n @dataclass\n class Question:\n content: str\n\n @dataclass\n class ReasoningInput:\n structured_data: Dict[str, Any]\n question: str\n\n @dataclass\n class Answer:\n content: str\n\n class ParserAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Parser Agent\")\n self._model_client = model_client\n self._system_message = SystemMessage(\"You are an assistant that extracts structured data from a passage. Extract entities, numbers, dates, and relationships, and present them in JSON format.\")\n\n @message_handler\n async def handle_passage(self, message: Passage, ctx: MessageContext) -> StructuredData:\n prompt = f\"Extract structured data from the following passage:\\n{message.content}\\nProvide the data in JSON format.\"\n messages = [self._system_message, UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n try:\n data = json.loads(response.content)\n except json.JSONDecodeError:\n data = {}\n return StructuredData(data=data)\n\n class ReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Reasoning Agent\")\n self._model_client = model_client\n self._system_message = SystemMessage(\"You are a reasoning assistant that uses structured data to answer questions. Think step by step and provide the final answer.\")\n\n @message_handler\n async def handle_reasoning(self, message: ReasoningInput, ctx: MessageContext) -> Answer:\n structured_data_str = json.dumps(message.structured_data)\n prompt = f\"Answer the following question using the structured data:\\nStructured Data: {structured_data_str}\\nQuestion: {message.question}\"\n messages = [self._system_message, UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n return Answer(content=response.content)\n\n class CoordinatorAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Coordinator Agent\")\n self.parser_agent_id = AgentId(\"parser_agent\", self.id.key)\n self.reasoning_agent_id = AgentId(\"reasoning_agent\", self.id.key)\n\n @message_handler\n async def handle_task(self, message: Passage, ctx: MessageContext) -> None:\n # Send passage to ParserAgent\n structured_data = await self.send_message(message, self.parser_agent_id)\n # Send structured data and question to ReasoningAgent\n question = Question(content=task)\n reasoning_input = ReasoningInput(structured_data=structured_data.data, question=question.content)\n answer = await self.send_message(reasoning_input, self.reasoning_agent_id)\n # Publish final answer\n await self.publish_message(\n Answer(content=answer.content),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await ParserAgent.register(runtime, \"parser_agent\", lambda: ParserAgent(model_client=model_client))\n await ReasoningAgent.register(runtime, \"reasoning_agent\", lambda: ReasoningAgent(model_client=model_client))\n await CoordinatorAgent.register(runtime, \"coordinator_agent\", lambda: CoordinatorAgent())\n\n # ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n\n # Start the process by sending the passage to the CoordinatorAgent\n passage = Passage(content=task)\n await runtime.send_message(passage, AgentId(\"coordinator_agent\", \"default\"))\n\n # Wait for completion\n await runtime.stop_when_idle()\n\n # Return the answer\n final_answer = (await queue.get()).content\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (8.2%, 9.5%), Median: 12.7%", + "generation": 11 + }, + { + "thought": "**Insights:**\nIn complex reasoning tasks like DROP, breaking down a complicated question into simpler sub-questions can make reasoning more manageable and accurate. By decomposing the main question into smaller, independent sub-questions, an agent can focus on specific pieces of information, leading to more precise answers.\n\n**Overall Idea:**\nDevelop a 'Question Decomposition Agent' system that consists of a 'DecomposerAgent', which splits the main question into sub-questions; a 'SolverAgent', which answers each sub-question; and a 'ComposerAgent', which synthesizes the sub-answers into the final answer. This approach allows the agent to handle complex reasoning in a structured way by tackling one piece at a time.\n\n**Implementation:**\n- **DecomposerAgent**: Receives the main question and decomposes it into sub-questions.\n - Sends a 'DecompositionInfo' message to the ComposerAgent with the total number of sub-questions.\n - Publishes 'SubQuestion' messages to be answered.\n- **SolverAgent**: Listens for 'SubQuestion' messages and provides answers to each.\n - Sends back 'SubAnswer' messages containing the answers to sub-questions.\n- **ComposerAgent**: Collects all 'SubAnswer' messages and waits until it has received all before combining them into a final answer.\n - Publishes the 'FinalAnswer' message with the composed answer to the correct topic.\n- **ClosureAgent**: Subscribes to the 'FinalAnswer' topic and returns the final answer.\n- Adjust topics and subscriptions to ensure messages are routed correctly, and agent IDs match between publishers and subscribers.\n- Handle the main() function correctly, and avoid any implementation mistakes outlined in the instructions.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import (\n RoutedAgent,\n default_subscription,\n message_handler,\n ClosureAgent,\n DefaultTopicId,\n TypeSubscription,\n )\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n LLMMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class MainQuestion:\n question: str\n passage: str\n\n @dataclass\n class SubQuestion:\n sub_question: str\n passage: str\n sub_question_id: int\n\n @dataclass\n class SubAnswer:\n sub_question_id: int\n answer: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @dataclass\n class DecompositionInfo:\n total_sub_questions: int\n\n @default_subscription\n class DecomposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"Decomposer Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_main_question(self, message: MainQuestion, ctx: MessageContext) -> None:\n system_prompt = \"You are an assistant that decomposes complex questions into simpler sub-questions.\"\n prompt = f\"Given the main question and the passage, decompose the main question into a list of simpler, answerable sub-questions.\\n\\nPassage:\\n{message.passage}\\n\\nMain Question:\\n{message.question}\\n\\nProvide the sub-questions as a numbered list.\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n sub_questions_text = response.content.strip()\n # Parse the sub-questions\n sub_questions = []\n lines = sub_questions_text.splitlines()\n for line in lines:\n if line.strip():\n # Remove numbering if present\n sub_question = line.strip().lstrip(\"0123456789. \").strip()\n sub_questions.append(sub_question)\n # Publish sub-questions\n for idx, sub_q in enumerate(sub_questions):\n sub_question_msg = SubQuestion(\n sub_question=sub_q,\n passage=message.passage,\n sub_question_id=idx,\n )\n await self.publish_message(\n sub_question_msg,\n topic_id=TopicId(\"SubQuestion\", self.id.key)\n )\n # Send the total number of sub-questions to ComposerAgent\n decomp_info_msg = DecompositionInfo(total_sub_questions=len(sub_questions))\n await self.publish_message(\n decomp_info_msg,\n topic_id=TopicId(\"DecompositionInfo\", \"composer_agent\")\n )\n\n @default_subscription\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"Solver Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_sub_question(self, message: SubQuestion, ctx: MessageContext) -> None:\n system_prompt = \"You are an assistant that answers questions based on the given passage. Provide concise answers.\"\n prompt = f\"Passage:\\n{message.passage}\\n\\nQuestion:\\n{message.sub_question}\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n answer = response.content.strip()\n sub_answer_msg = SubAnswer(\n sub_question_id=message.sub_question_id,\n answer=answer,\n )\n await self.publish_message(\n sub_answer_msg,\n topic_id=TopicId(\"SubAnswer\", \"composer_agent\")\n )\n\n @default_subscription\n class ComposerAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"Composer Agent\")\n self._model_client = model_client\n self._sub_answers = {}\n self._total_sub_questions = None\n\n @message_handler\n async def handle_decomposition_info(self, message: DecompositionInfo, ctx: MessageContext) -> None:\n self._total_sub_questions = message.total_sub_questions\n\n @message_handler\n async def handle_sub_answer(self, message: SubAnswer, ctx: MessageContext) -> None:\n self._sub_answers[message.sub_question_id] = message.answer\n if self._total_sub_questions is not None and len(self._sub_answers) >= self._total_sub_questions:\n # Compose the final answer\n # Sort sub-answers by their ids\n sorted_answers = [self._sub_answers[k] for k in sorted(self._sub_answers.keys())]\n sub_answers_text = \"\\n\".join(sorted_answers)\n system_prompt = \"You are an assistant that composes a final answer based on the answers to sub-questions.\"\n prompt = f\"Given the following answers to sub-questions, compose a final, concise answer to the main question.\\n\\nSub-Answers:\\n{sub_answers_text}\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n final_answer = response.content.strip()\n await self.publish_message(\n FinalAnswer(answer=final_answer),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await DecomposerAgent.register(\n runtime,\n \"decomposer_agent\",\n lambda: DecomposerAgent(model_client=model_client)\n )\n\n await SolverAgent.register(\n runtime,\n \"solver_agent\",\n lambda: SolverAgent(model_client=model_client)\n )\n\n await ComposerAgent.register(\n runtime,\n \"composer_agent\",\n lambda: ComposerAgent(model_client=model_client)\n )\n\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Subscriptions\n await runtime.add_subscription(TypeSubscription(\"SubQuestion\", \"solver_agent\"))\n await runtime.add_subscription(TypeSubscription(\"SubAnswer\", \"composer_agent\"))\n await runtime.add_subscription(TypeSubscription(\"DecompositionInfo\", \"composer_agent\"))\n\n runtime.start()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n main_question_msg = MainQuestion(question=question, passage=passage)\n await runtime.publish_message(main_question_msg, topic_id=DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (1.0%, 1.4%), Median: 2.2%", + "generation": 14 + }, + { + "thought": "**Insights:**\n\nThe Program-Aided Language Model (PAL) Agent introduces code generation and execution into the reasoning process, allowing for precise computations that can enhance performance on the DROP benchmark. By leveraging code execution, the agent can handle complex reasoning tasks more effectively.\n\n**Implementation:**\n\n- **PALAgent**:\n - Receives the passage and question in a `UserTask` message.\n - Generates Python code to solve the problem.\n - Sends a `CodeExecutionRequest` to a `CodeExecutionAgent`.\n- **CodeExecutionAgent**:\n - Manages the lifecycle of the code executor properly.\n - Receives `CodeExecutionRequest` messages containing code to execute.\n - Executes the code in a sandboxed environment and returns the result in a `CodeExecutionResult` message.\n- **PALAgent** (continued):\n - Receives the `CodeExecutionResult` and formulates the final answer.\n - Publishes the final answer in an `Answer` message to the result topic.\n- **ClosureAgent**:\n - Subscribes to the result topic and returns the final answer.\n- Properly handle initialization and shutdown of the code executor.\n- Ensure correct routing of messages between agents via topics and subscriptions.\n", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, AgentInstantiationContext, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage, AssistantMessage, LLMMessage\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n from autogen_core.components.tools import PythonCodeExecutionTool\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class UserTask:\n passage: str\n question: str\n\n @dataclass\n class CodeExecutionRequest:\n code: str\n\n @dataclass\n class CodeExecutionResult:\n output: str\n\n @dataclass\n class Answer:\n answer: str\n\n class CodeExecutionAgent(RoutedAgent):\n def __init__(self) -> None:\n super().__init__(\"Code Execution Agent\")\n self._code_executor = DockerCommandLineCodeExecutor()\n\n async def on_agent_started(self, ctx: AgentInstantiationContext) -> None:\n await self._code_executor.start()\n\n async def on_agent_stopped(self) -> None:\n await self._code_executor.stop()\n\n @message_handler\n async def handle_code_execution_request(self, message: CodeExecutionRequest, ctx: MessageContext) -> None:\n # Execute the code\n python_tool = PythonCodeExecutionTool(self._code_executor)\n try:\n result = await python_tool.run_json({\"code\": message.code}, ctx.cancellation_token)\n output = python_tool.return_value_as_string(result)\n except Exception as e:\n output = f\"Error during code execution: {e}\"\n # Send back the result\n await self.publish_message(\n CodeExecutionResult(output=output),\n topic_id=TopicId(\"code_result\", ctx.sender.key)\n )\n\n class PALAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"PAL Agent\")\n self._model_client = model_client\n self._conversation: List[LLMMessage] = [\n SystemMessage(\n \"You are a helpful assistant that writes Python code to solve problems. Read the following passage and question, then generate Python code that solves the problem. The code should output the final answer. Do not include any explanations, only provide the code.\"\n )\n ]\n\n @message_handler\n async def handle_user_task(self, message: UserTask, ctx: MessageContext) -> None:\n user_input = f\"Passage:\\n{message.passage}\\n\\nQuestion:\\n{message.question}\\n\\nWrite Python code that solves the problem and outputs the answer.\"\n self._conversation.append(UserMessage(content=user_input, source=\"user\"))\n # Generate code\n response = await self._model_client.create(\n self._conversation, cancellation_token=ctx.cancellation_token\n )\n code = response.content.strip()\n self._conversation.append(AssistantMessage(content=code, source=self.metadata[\"type\"]))\n # Send code for execution\n await self.publish_message(\n CodeExecutionRequest(code=code),\n topic_id=TopicId(\"code_request\", self.id.key)\n )\n\n @message_handler\n async def handle_code_execution_result(self, message: CodeExecutionResult, ctx: MessageContext) -> None:\n # Append the code execution result\n self._conversation.append(\n UserMessage(content=f\"Output:\\n{message.output}\", source=\"code_execution\")\n )\n # Generate the final answer\n response = await self._model_client.create(\n self._conversation + [UserMessage(content=\"Based on the code execution output, provide the final answer to the question.\", source=\"user\")],\n cancellation_token=ctx.cancellation_token\n )\n final_answer = response.content.strip()\n # Publish the final answer\n await self.publish_message(\n Answer(answer=final_answer),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n # Register agents with factory functions\n await CodeExecutionAgent.register(runtime, \"code_execution_agent\", lambda: CodeExecutionAgent())\n await PALAgent.register(runtime, \"pal_agent\", lambda: PALAgent(model_client=model_client))\n\n # Add Subscriptions\n await runtime.add_subscription(TypeSubscription(\"user_task\", \"pal_agent\"))\n await runtime.add_subscription(TypeSubscription(\"code_request\", \"code_execution_agent\"))\n await runtime.add_subscription(TypeSubscription(\"code_result\", \"pal_agent\"))\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n runtime.start()\n await runtime.publish_message(\n UserTask(passage=passage, question=question),\n topic_id=TopicId(\"user_task\", \"pal_agent\")\n )\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n return (await queue.get()).answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 0.0%), Median: 0.2%", + "generation": 38 + }, + { + "thought": "**Insights:**\nTo improve performance on the DROP benchmark, employing critical thinking and self-questioning can help the agent identify and correct errors in its reasoning. By challenging its own assumptions and conclusions through Socratic questioning, the agent can refine its answer and increase accuracy.\n\n**Overall Idea:**\nImplement a 'Socratic Agent' that generates an initial answer, then critically evaluates it by generating questions that challenge the answer. The agent attempts to answer these self-generated questions, and if inconsistencies or errors are found, it revises the initial answer. This iterative process allows the agent to refine its reasoning and produce a more robust final answer.\n\n**Implementation:**\n- **SocraticAgent**:\n - Receives the passage and question.\n - Generates an initial answer using chain-of-thought reasoning.\n - Generates critical questions challenging its own answer.\n - Attempts to answer these questions.\n - If inconsistencies are found, revises the initial answer.\n - Publishes the final answer to the result topic.\n- **ClosureAgent**:\n - Subscribes to the result topic and collects the final answer.\n- Ensure proper agent registration, subscriptions, and message passing.\n- Avoid implementation mistakes outlined in the prompt, specifically:\n - Use `DefaultTopicId()` when publishing messages to agents decorated with `@default_subscription`.\n - Ensure the agent publishes the final answer to the correct topic.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from autogen_core.base import AgentRuntime, AgentId, MessageContext, TopicId\n from autogen_core.components import (\n RoutedAgent,\n message_handler,\n default_subscription,\n ClosureAgent,\n TypeSubscription,\n DefaultTopicId,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n azure_deployment=model_client_kwargs[\"model\"],\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class Answer:\n answer: str\n\n @default_subscription\n class SocraticAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"Socratic Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_task(self, message: PassageQuestion, ctx: MessageContext) -> None:\n passage = message.passage\n question = message.question\n # Step 1: Generate initial answer using chain-of-thought reasoning\n initial_prompt = f\"\"\"You are a helpful assistant. Read the following passage and question, then provide a detailed answer using chain-of-thought reasoning.\\n\\nPassage:\\n{passage}\\n\\nQuestion:\\n{question}\\n\\nAnswer:\"\"\"\n response = await self._model_client.create(\n [UserMessage(content=initial_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n initial_answer = response.content.strip()\n # Step 2: Generate critical questions challenging the answer\n critique_prompt = f\"\"\"You are a critical thinker. Analyze the following answer to the question and generate three critical questions that challenge the assumptions or conclusions of the answer.\\n\\nPassage:\\n{passage}\\n\\nQuestion:\\n{question}\\n\\nAnswer:\\n{initial_answer}\\n\\nPlease provide the critical questions.\"\"\"\n response = await self._model_client.create(\n [UserMessage(content=critique_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n critical_questions = response.content.strip()\n # Step 3: Attempt to answer the critical questions\n answer_questions_prompt = f\"\"\"Using the passage, answer the following critical questions to verify the correctness of the initial answer.\\n\\nCritical Questions:\\n{critical_questions}\\n\\nAnswers:\"\"\"\n response = await self._model_client.create(\n [UserMessage(content=answer_questions_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n answers_to_critical_questions = response.content.strip()\n # Step 4: Evaluate if inconsistencies exist\n evaluation_prompt = f\"\"\"Based on the answers to the critical questions, determine if there are any inconsistencies or errors in the initial answer. If so, provide a revised answer. If not, confirm that the initial answer is correct.\\n\\nInitial Answer:\\n{initial_answer}\\n\\nAnswers to Critical Questions:\\n{answers_to_critical_questions}\\n\\nFinal Answer:\"\"\"\n response = await self._model_client.create(\n [UserMessage(content=evaluation_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n final_answer = response.content.strip()\n # Step 5: Publish the final answer\n await self.publish_message(\n Answer(answer=final_answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await SocraticAgent.register(\n runtime,\n \"socratic_agent\",\n lambda: SocraticAgent(model_client=model_client),\n )\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n await runtime.publish_message(\n PassageQuestion(passage=passage, question=question),\n topic_id=DefaultTopicId(),\n )\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.1%, 0.2%), Median: 0.4%", + "generation": 42 + }, + { + "thought": "**Insights:**\n\nBy allowing the agent to generate and answer its own sub-questions, we can encourage deeper reasoning and better handling of complex tasks. The 'Self-Ask' prompting strategy enables the agent to break down the main question into manageable parts, leading to a more accurate final answer.\n\n**Overall Idea:**\n\nDevelop a 'Self-Questioning Agent' that, upon receiving the passage and main question, enters an iterative loop where it generates a relevant sub-question, answers it, and uses this information to generate the next sub-question. This process continues until the agent determines it has enough information to answer the main question. This approach leverages the agent's ability to self-reflect and decompose the problem.\n\n**Implementation:**\n\n- **SelfQuestioningAgent**:\n - Receives a `PassageQuestion` message containing the passage and main question.\n - Initializes a reasoning loop with a maximum number of iterations to prevent infinite loops.\n - In each iteration:\n - Generates a sub-question based on the passage, main question, and previous sub-questions and answers.\n - Answers the sub-question using the passage and the reasoning history.\n - Adds the sub-question and its answer to the reasoning history.\n - Checks if it can now answer the main question.\n - If it determines that it can answer the main question, generates the final answer and publishes it to the result topic.\n- **ClosureAgent**:\n - Subscribes to the result topic to collect the final answer.\n\n- Ensure proper handling of message passing, iterations, and termination conditions.", + "name": "Self-Questioning Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription, type_subscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create the model client using the model_client_kwargs\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class Answer:\n content: str\n\n @type_subscription(topic_type=\"task\")\n class SelfQuestioningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, max_iterations: int = 5) -> None:\n super().__init__(\"Self-Questioning Agent\")\n self._model_client = model_client\n self._max_iterations = max_iterations\n\n @message_handler\n async def handle_task(self, message: PassageQuestion, ctx: MessageContext) -> None:\n passage = message.passage\n main_question = message.question\n reasoning_history = []\n iterations = 0\n while iterations < self._max_iterations:\n # Generate a sub-question\n system_prompt = \"You are an assistant that breaks down complex questions into sub-questions and answers them.\"\n history_str = \"\\n\".join([f\"Sub-Question: {q}\\nAnswer: {a}\" for q, a in reasoning_history])\n prompt = f\"Passage:\\n{passage}\\n\\nMain Question:\\n{main_question}\\n\\n{history_str}\\n\\nBased on the above, generate a relevant sub-question that helps answer the main question. If you have enough information to answer the main question, reply only with \\\"FINAL ANSWER\\\".\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n sub_question = response.content.strip()\n if \"FINAL ANSWER\" in sub_question.upper():\n # Generate final answer\n answer_prompt = f\"Passage:\\n{passage}\\n\\nMain Question:\\n{main_question}\\n\\n{history_str}\\n\\nProvide a concise and accurate answer to the main question based on the above information.\"\n messages = [SystemMessage(system_prompt), UserMessage(answer_prompt, source=\"user\")]\n final_response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n final_answer = final_response.content.strip()\n await self.publish_message(\n Answer(content=final_answer),\n topic_id=TopicId(\"result\", self.id.type)\n )\n return\n # Answer the sub-question\n answer_prompt = f\"Passage:\\n{passage}\\n\\nSub-Question:\\n{sub_question}\\n\\nProvide a concise and accurate answer to the sub-question based on the passage.\"\n messages = [SystemMessage(system_prompt), UserMessage(answer_prompt, source=\"user\")]\n sub_answer_response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n sub_answer = sub_answer_response.content.strip()\n reasoning_history.append((sub_question, sub_answer))\n iterations +=1\n # If max iterations reached without final answer\n await self.publish_message(\n Answer(content=\"Failed to find an answer within max iterations.\"),\n topic_id=TopicId(\"result\", self.id.type)\n )\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register the SelfQuestioningAgent\n await SelfQuestioningAgent.register(\n runtime,\n \"self_questioning_agent\",\n lambda: SelfQuestioningAgent(model_client=model_client)\n )\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n # Publish the PassageQuestion message to the 'task' topic\n await runtime.publish_message(\n PassageQuestion(passage=passage, question=question),\n topic_id=TopicId(\"task\", \"default\"),\n )\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n return (await queue.get()).content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.2%, 0.3%), Median: 1.1%", + "generation": 49 + }, + { + "thought": "**Insights:**\n\nThe DROP benchmark involves complex numerical reasoning and calculations based on information in the passage. By explicitly extracting relevant numerical data and forming mathematical equations, the agent can perform precise computations to solve the problems.\n\n**Overall Idea:**\n\nDevelop an **Equations Extraction Agent** system that consists of an `EquationExtractorAgent`, which parses the passage and question to extract relevant numerical data and formulate equations; a `SolverAgent`, which solves these equations to find the answer; and a `CoordinatorAgent` to manage the workflow. This approach allows the agent to handle complex numerical reasoning tasks more effectively, leading to improved performance on the DROP benchmark.\n\n**Implementation:**\n\n1. **Define Message Types**: `PassageQuestion`, `Equations`, `Solution`, and `AnswerMessage`.\n2. **Implement `EquationExtractorAgent`**:\n - Receives `PassageQuestion` messages.\n - Uses the LLM to extract numerical data and formulate equations.\n - Publishes an `Equations` message to the `SolverAgent`.\n3. **Implement `SolverAgent`**:\n - Receives `Equations` messages.\n - Solves the equations and provides the solution.\n - Publishes a `Solution` message to the `CoordinatorAgent`.\n4. **Implement `CoordinatorAgent`**:\n - Coordinates the process by sending the `PassageQuestion` to `EquationExtractorAgent` and collecting the `Solution`.\n - Forms the final answer and publishes an `AnswerMessage` to the result topic.\n5. **Implement `ClosureAgent`**:\n - Subscribes to the result topic to collect the final answer.\n6. **Set Up Subscriptions**:\n - Use `TypeSubscription` to subscribe agents to the relevant topics.\n7. **Main Function**:\n - Extracts passage and question from the task.\n - Registers agents and subscriptions.\n - Starts the runtime and initiates the process.\n - Waits for the final answer and returns it.\n8. **Avoid Common Mistakes**:\n - Ensure message passing, subscriptions, and agent registrations are correctly handled.\n - Use the model client correctly with `model_client_kwargs`.\n - Make sure the `ClosureAgent` subscribes to the correct topic.", + "name": "Equations Extraction Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n from dataclasses import dataclass\n from typing import Any, Dict\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Create the model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class Equations:\n equations: str\n\n @dataclass\n class Solution:\n answer: str\n\n @dataclass\n class AnswerMessage:\n answer: str\n\n class EquationExtractorAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Equation Extractor Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n # Use LLM to extract equations from the passage and question\n system_prompt = \"You are an assistant that extracts relevant numerical data and formulates mathematical equations to solve the question.\"\n prompt = f\"Passage:\\n{message.passage}\\n\\nQuestion:\\n{message.question}\\n\\nExtract the relevant numerical data and formulate the equations needed to solve the question. Provide the equations in Python-executable format. Assume all variables are defined.\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n equations = response.content.strip()\n # Send the equations to SolverAgent\n equations_message = Equations(equations=equations)\n await self.publish_message(equations_message, topic_id=TopicId(\"equations\", \"solver_agent\"))\n\n class SolverAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Solver Agent\")\n\n @message_handler\n async def handle_equations(self, message: Equations, ctx: MessageContext) -> None:\n # Solve the equations\n equations = message.equations\n try:\n local_vars = {}\n exec(equations, {}, local_vars)\n # Assume the final answer is stored in a variable named 'answer'\n answer = str(local_vars.get('answer', ''))\n except Exception as e:\n answer = f\"Error solving equations: {e}\"\n # Send the solution to CoordinatorAgent\n solution_message = Solution(answer=answer)\n await self.publish_message(solution_message, topic_id=TopicId(\"solution\", \"coordinator_agent\"))\n\n class CoordinatorAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Coordinator Agent\")\n self._solution_received = False\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n # Forward the passage and question to EquationExtractorAgent\n await self.publish_message(message, topic_id=TopicId(\"passage_question\", \"equation_extractor_agent\"))\n\n @message_handler\n async def handle_solution(self, message: Solution, ctx: MessageContext) -> None:\n # Publish the final answer\n if not self._solution_received:\n self._solution_received = True\n final_answer = message.answer\n await self.publish_message(AnswerMessage(answer=final_answer), topic_id=TopicId(\"result\", \"output_result\"))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: AnswerMessage, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n if len(parts) < 2:\n raise ValueError(\"Invalid task format. Expected 'Passage:\\n...\\nQuestion:\\n...'\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n # Register agents\n await EquationExtractorAgent.register(runtime, \"equation_extractor_agent\", lambda: EquationExtractorAgent(model_client=model_client))\n await SolverAgent.register(runtime, \"solver_agent\", lambda: SolverAgent())\n await CoordinatorAgent.register(runtime, \"coordinator_agent\", lambda: CoordinatorAgent())\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Subscriptions\n await runtime.add_subscription(TypeSubscription(\"passage_question\", \"equation_extractor_agent\"))\n await runtime.add_subscription(TypeSubscription(\"equations\", \"solver_agent\"))\n await runtime.add_subscription(TypeSubscription(\"solution\", \"coordinator_agent\"))\n\n runtime.start()\n\n # Send the PassageQuestion message to the CoordinatorAgent\n passage_question = PassageQuestion(passage=passage, question=question)\n await runtime.send_message(passage_question, AgentId(\"coordinator_agent\", \"default\"))\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 0.0%), Median: 0.7%", + "generation": 51 + }, + { + "thought": "**Insights:**\n\nAnalogical reasoning allows the agent to solve complex problems by relating them to similar known scenarios. By finding an analogy, the agent can transfer insights from a familiar situation to the current problem, potentially leading to more accurate and creative solutions.\n\n**Overall Idea:**\n\nDevelop an 'Analogical Reasoning Agent' that, upon receiving the passage and question, identifies a similar or analogous situation (which could be from general knowledge or the passage itself). The agent then uses this analogy to reason through the problem and generate an answer. This approach taps into the LLM's capability to draw parallels and apply insights from one context to another.\n\n**Implementation:**\n\n- **AnalogicalReasoningAgent**:\n - Receives the `PassageQuestion` containing the passage and question.\n - Uses the LLM to find an analogous situation related to the question.\n - Applies the analogy to the current problem to reason out the answer.\n - Publishes the final answer to the result topic.\n- **ClosureAgent**:\n - Subscribes to the result topic and collects the final answer.\n- Ensure correct message passing, proper subscriptions, and that the `ClosureAgent` subscribes to the correct topic.\n- Avoid implementation mistakes, and follow the best practices for agent registration and message handling.", + "name": "Analogical Reasoning Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import RoutedAgent, message_handler, default_subscription, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class Answer:\n content: str\n\n @default_subscription\n class AnalogicalReasoningAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Analogical Reasoning Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n system_prompt = (\"You are an assistant that answers questions by finding analogous situations. \"\n \"Read the passage and question, identify a relevant analogy, and use it to answer the question.\")\n prompt = f\"Passage:\\n{message.passage}\\n\\nQuestion:\\n{message.question}\"\n messages = [SystemMessage(system_prompt), UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n answer = response.content.strip()\n # Publish the final answer\n await self.publish_message(Answer(content=answer), topic_id=TopicId(\"result\", self.id.type))\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n if len(parts) < 2:\n raise ValueError(\"Invalid task format. Expected 'Passage:\\n...\\nQuestion:\\n...'\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n await AnalogicalReasoningAgent.register(runtime, \"analogical_reasoning_agent\", lambda: AnalogicalReasoningAgent(model_client=model_client))\n\n # ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n\n # Send PassageQuestion to AnalogicalReasoningAgent\n passage_question = PassageQuestion(passage=passage, question=question)\n await runtime.publish_message(passage_question, topic_id=DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_answer = (await queue.get()).content\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.6%, 0.9%), Median: 1.5%", + "generation": 53 + }, + { + "thought": "**Insights:**\n\nComplex passages and questions in the DROP benchmark can overwhelm the agent's reasoning capabilities. By abstracting the passage and question, the agent can focus on the main ideas before dealing with intricate details. An 'Iterative Abstraction-Refinement Agent' first generates an abstract summary of the passage and question to understand the core components. It then iteratively refines its understanding by incorporating more details, leading to a more precise and accurate answer.\n\n**Overall Idea:**\n\nDevelop an agent that performs iterative abstraction and refinement. The agent begins by creating high-level summaries of the passage and question. With this abstract understanding, it attempts to answer the question. If the initial answer lacks sufficient detail or accuracy, the agent refines the summaries by adding more specifics from the passage. This process repeats until the agent produces a satisfactory answer. This approach helps the agent manage complex information by focusing on essential elements first and gradually incorporating necessary details.\n\n**Implementation:**\n\n- **Define Message Types**: `PassageQuestion`, `AbstractSummary`, `RefinedSummary`, `AnswerMessage`.\n- **Implement `AbstractionRefinementAgent`**:\n - Receives `PassageQuestion` messages containing the passage and question.\n - Generates an abstract summary of the passage.\n - Attempts to answer the question based on the abstract summary.\n - If the answer is inadequate, the agent refines the summary by adding more details from the passage.\n - Repeats the process until a satisfactory answer is produced or a maximum number of iterations is reached.\n - Publishes the final answer to the result topic.\n- **Implement `ClosureAgent`**:\n - Subscribes to the result topic to collect the final answer.\n- **Ensure Proper Message Passing and Agent Registration**:\n - Use appropriate topics and subscriptions for message routing.\n - Follow best practices for agent registration and message handling.\n- **Avoid Common Implementation Mistakes**:\n - Ensure correct function signatures and return types.\n - Properly handle asynchronous operations and message publishing.\n - Use `DefaultTopicId` and `default_subscription` where appropriate.", + "name": "Iterative Abstraction-Refinement Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.components import (\n RoutedAgent,\n default_subscription,\n message_handler,\n ClosureAgent,\n DefaultTopicId,\n TypeSubscription,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n AssistantMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create the model client using the model_client_kwargs\n model_client = AzureOpenAIChatCompletionClient(\n azure_deployment=model_client_kwargs[\"model\"],\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class AnswerMessage:\n answer: str\n\n @default_subscription\n class AbstractionRefinementAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, max_iterations: int = 3):\n super().__init__(\"Abstraction Refinement Agent\")\n self._model_client = model_client\n self._max_iterations = max_iterations\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n passage = message.passage\n question = message.question\n iterations = 0\n abstract_summary = \"\"\n answer = \"\"\n while iterations < self._max_iterations:\n # Generate abstract summary\n if iterations == 0:\n system_prompt = \"You are an assistant that reads a passage and generates an abstract summary focusing on the main ideas. Keep it concise.\"\n prompt = f\"Passage:\\n{passage}\\n\\nAbstract Summary:\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n abstract_summary = response.content.strip()\n else:\n # Refine the summary by adding more details\n system_prompt = \"You are an assistant that refines an abstract summary by adding relevant details to better answer the question.\"\n prompt = f\"Passage:\\n{passage}\\n\\nCurrent Summary:\\n{abstract_summary}\\n\\nQuestion:\\n{question}\\n\\nRefined Summary:\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n abstract_summary = response.content.strip()\n # Attempt to answer the question\n system_prompt = \"You are an assistant that answers questions based on a summary of a passage.\"\n prompt = f\"Summary:\\n{abstract_summary}\\n\\nQuestion:\\n{question}\\n\\nAnswer:\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n answer_response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n answer = answer_response.content.strip()\n # Check if the answer is satisfactory\n verification_prompt = f\"Based on the summary and the answer, is the answer sufficient and correct? Reply YES if it is, or NO if it needs more details.\"\n messages = [SystemMessage(\"You are an assistant that verifies the correctness of an answer based on a summary.\"), UserMessage(verification_prompt, source=\"user\")]\n verification_response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n verification = verification_response.content.strip().lower()\n if \"yes\" in verification:\n # Publish the final answer\n await self.publish_message(\n AnswerMessage(answer=answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n return\n else:\n # Increment iterations and refine further\n iterations += 1\n # If maximum iterations reached, publish the latest answer\n await self.publish_message(\n AnswerMessage(answer=answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n async def main():\n queue = asyncio.Queue[AnswerMessage]()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: AnswerMessage, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await AbstractionRefinementAgent.register(\n runtime,\n \"abstraction_refinement_agent\",\n lambda: AbstractionRefinementAgent(model_client=model_client),\n )\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n if len(parts) < 2:\n raise ValueError(\"Invalid task format. Expected 'Passage:\\n...\\nQuestion:\\n...'\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n passage_question = PassageQuestion(passage=passage, question=question)\n await runtime.publish_message(passage_question, topic_id=DefaultTopicId())\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.1%, 0.2%), Median: 0.6%", + "generation": 59 + }, + { + "thought": "**Insights:**\n\nTo improve performance on the DROP benchmark, utilizing hypothetical reasoning can help the agent explore different possible answers and reason more deeply about the question. By generating and evaluating multiple hypotheses, the agent can consider various interpretations and select the most plausible one based on the passage.\n\n**Overall Idea:**\n\nDevelop a 'Hypothetical Reasoning Agent' system consisting of a `HypothesisGeneratorAgent` that generates possible hypotheses, an `EvaluatorAgent` that evaluates these hypotheses against the passage, and a `CoordinatorAgent` that manages the workflow and selects the best hypothesis to form the final answer. This approach encourages the agent to think critically and explore different reasoning paths, potentially improving accuracy on complex tasks.\n\n**Implementation:**\n\n1. **Define Message Types**: `PassageQuestion`, `HypothesesGenerated`, `Hypothesis`, `Evaluation`, `AnswerMessage`.\n2. **Implement `HypothesisGeneratorAgent`**:\n - Receives `PassageQuestion` messages.\n - Uses the LLM to generate multiple hypotheses answering the question.\n - Sends a `HypothesesGenerated` message to the `CoordinatorAgent`.\n3. **Implement `EvaluatorAgent`**:\n - Receives `Hypothesis` messages containing the hypothesis and passage.\n - Evaluates each hypothesis against the passage, assigning a plausibility score.\n - Sends back `Evaluation` messages to the `CoordinatorAgent`.\n4. **Implement `CoordinatorAgent`**:\n - Receives `HypothesesGenerated` and sends `Hypothesis` messages to `EvaluatorAgent`.\n - Collects all `Evaluation` messages.\n - Selects the hypothesis with the highest score.\n - Generates the final answer based on this hypothesis.\n - Publishes the `AnswerMessage` to the result topic.\n5. **Implement `ClosureAgent`**:\n - Subscribes to the result topic to collect the final answer.\n6. **Set Up Subscriptions**:\n - Use `TypeSubscription` to route messages appropriately.\n7. **Main Function**:\n - Extracts passage and question from the task.\n - Registers agents and subscriptions.\n - Starts the runtime and initiates the process.\n - Waits for the final answer and returns it.\n8. **Ensure Correct Implementation**:\n - Avoid common mistakes in agent registration, message passing, and topic subscriptions.\n - Properly handle agent IDs and topics.\n - Make sure the final answer is published to a topic that the `ClosureAgent` subscribes to.", + "name": "Hypothetical Reasoning Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, message_handler, type_subscription, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Create the model client using the model_client_kwargs\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class HypothesesGenerated:\n hypotheses: List[str]\n passage: str\n question: str\n\n @dataclass\n class Hypothesis:\n hypothesis: str\n passage: str\n\n @dataclass\n class Evaluation:\n hypothesis: str\n score: float\n\n @dataclass\n class AnswerMessage:\n answer: str\n\n @type_subscription(topic_type=\"passage_question\")\n class HypothesisGeneratorAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Hypothesis Generator Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n system_prompt = \"You are an assistant that generates multiple plausible hypotheses to answer the question based on the passage.\"\n prompt = f\"\"\"\nPassage:\n{message.passage}\n\nQuestion:\n{message.question}\n\nGenerate a list of possible hypotheses that could answer the question.\n\"\"\"\n messages = [SystemMessage(system_prompt), UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n hypotheses_text = response.content.strip()\n # Parse hypotheses from the response\n hypotheses = []\n lines = hypotheses_text.splitlines()\n for line in lines:\n if line.strip():\n # Remove numbering if present\n hypothesis = line.strip().lstrip(\"0123456789. \").strip()\n hypotheses.append(hypothesis)\n # Send HypothesesGenerated message to CoordinatorAgent\n await self.publish_message(\n HypothesesGenerated(hypotheses=hypotheses, passage=message.passage, question=message.question),\n topic_id=TopicId(\"hypotheses_generated\", \"default\"),\n )\n\n @type_subscription(topic_type=\"hypothesis\")\n class EvaluatorAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Evaluator Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_hypothesis(self, message: Hypothesis, ctx: MessageContext) -> None:\n system_prompt = \"You are an assistant that evaluates hypotheses based on the given passage. Assign a score between 0 and 1 indicating the plausibility of the hypothesis, where 1 is highly plausible and 0 is implausible.\"\n prompt = f\"\"\"\nPassage:\n{message.passage}\n\nHypothesis:\n{message.hypothesis}\n\nEvaluate the above hypothesis and assign a plausibility score between 0 and 1.\nProvide only the score.\n\"\"\"\n messages = [SystemMessage(system_prompt), UserMessage(content=prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n score_text = response.content.strip()\n try:\n score = float(score_text)\n except ValueError:\n score = 0.0\n await self.publish_message(\n Evaluation(hypothesis=message.hypothesis, score=score),\n topic_id=TopicId(\"evaluation\", \"default\"),\n )\n\n @type_subscription(topic_type=\"hypotheses_generated\")\n @type_subscription(topic_type=\"evaluation\")\n class CoordinatorAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Coordinator Agent\")\n self.evaluations = []\n self.expected_evaluations = None\n\n @message_handler\n async def handle_hypotheses_generated(self, message: HypothesesGenerated, ctx: MessageContext) -> None:\n self.evaluations = []\n self.expected_evaluations = len(message.hypotheses)\n for hypo in message.hypotheses:\n await self.publish_message(\n Hypothesis(hypothesis=hypo, passage=message.passage),\n topic_id=TopicId(\"hypothesis\", \"default\"),\n )\n\n @message_handler\n async def handle_evaluation(self, message: Evaluation, ctx: MessageContext) -> None:\n self.evaluations.append(message)\n if self.expected_evaluations is not None and len(self.evaluations) >= self.expected_evaluations:\n # Select the hypothesis with the highest score\n best_eval = max(self.evaluations, key=lambda x: x.score)\n # Generate final answer\n final_answer = best_eval.hypothesis\n await self.publish_message(\n AnswerMessage(answer=final_answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n # Forward the message to HypothesisGeneratorAgent\n await self.publish_message(message, topic_id=TopicId(\"passage_question\", \"default\"))\n\n async def main():\n queue = asyncio.Queue[AnswerMessage]()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: AnswerMessage, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n if len(parts) < 2:\n raise ValueError(\"Invalid task format. Expected 'Passage:\\n...\\nQuestion:\\n...'\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n # Register agents with agent key 'default'\n await HypothesisGeneratorAgent.register(runtime, \"hypothesis_generator_agent\", lambda: HypothesisGeneratorAgent(model_client=model_client))\n await EvaluatorAgent.register(runtime, \"evaluator_agent\", lambda: EvaluatorAgent(model_client=model_client))\n await CoordinatorAgent.register(runtime, \"coordinator_agent\", lambda: CoordinatorAgent())\n\n # Register ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n\n # Send PassageQuestion to CoordinatorAgent\n passage_question = PassageQuestion(passage=passage, question=question)\n await runtime.publish_message(passage_question, topic_id=TopicId(\"passage_question\", \"default\"))\n\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.6%, 1.0%), Median: 2.0%", + "generation": 65 + }, + { + "thought": "**Insights:**\n\nCombining multiple reasoning strategies can enhance the agent's performance on complex tasks like the DROP benchmark. By generating diverse reasoning paths using different prompting techniques and then aggregating their outputs, the agent can leverage the strengths of each method.\n\n**Overall Idea:**\n\nDevelop an 'Ensemble Reasoning Agent' system where multiple solver agents employ various prompting strategies (e.g., Chain-of-Thought, ReAct, Self-Consistency) to generate answers. A Coordinator agent collects these answers and aggregates them, possibly using voting or confidence measures, to produce a final answer. This approach harnesses the diversity of reasoning strategies to improve accuracy.\n\n**Implementation:**\n\n1. **Define Message Types**: `TaskMessage`, `AnswerMessage`.\n2. **Implement Multiple Solver Agents**:\n - Each solver uses a different reasoning strategy.\n - Receives `TaskMessage` and returns `AnswerMessage`.\n3. **Implement a `CoordinatorAgent`**:\n - Sends the `TaskMessage` to all solvers and awaits their responses.\n - Aggregates the answers to produce a final answer.\n - Publishes the `AnswerMessage` to the result topic.\n4. **Implement `ClosureAgent`**:\n - Subscribes to the result topic to collect the final answer.\n5. **Set Up Subscriptions**:\n - Ensure proper message routing between agents.\n6. **Main Function**:\n - Registers agents and starts the runtime.\n - Initiates the process by sending the `TaskMessage` to the `CoordinatorAgent`.\n - Waits for the final answer.\n7. **Ensure Correct Implementation:**\n - Avoid mistakes in message passing and agent registration.\n - Ensure the final answer is published to the topic that `ClosureAgent` subscribes to.", + "name": "Ensemble Reasoning Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, MessageContext, AgentRuntime, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, TypeSubscription\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Create the model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class AnswerMessage:\n answer: str\n solver_type: str\n\n class SolverAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, reasoning_style: str):\n super().__init__(description=f\"Solver Agent using {reasoning_style}\")\n self._model_client = model_client\n self._reasoning_style = reasoning_style\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> AnswerMessage:\n system_prompt = \"You are a helpful assistant.\"\n if self._reasoning_style == \"chain_of_thought\":\n instruction = \"Please think step by step to solve the problem.\"\n elif self._reasoning_style == \"react\":\n instruction = \"Use reasoning and actions to solve the problem. Think step by step.\"\n elif self._reasoning_style == \"self_consistency\":\n instruction = \"Provide your answer, ensuring consistency.\"\n else:\n instruction = \"Provide a concise answer.\"\n prompt = f\"{message.content}\\n{instruction}\"\n messages = [SystemMessage(system_prompt), UserMessage(prompt, source=\"user\")]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n answer = response.content.strip()\n return AnswerMessage(answer=answer, solver_type=self._reasoning_style)\n\n class CoordinatorAgent(RoutedAgent):\n def __init__(self, num_solvers: int):\n super().__init__(description=\"Coordinator Agent\")\n self._num_solvers = num_solvers\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n # Send task to all solvers and collect answers\n solver_types = [\"chain_of_thought\", \"react\", \"self_consistency\"]\n tasks = []\n for solver_type in solver_types:\n solver_agent_id = AgentId(solver_type, self.id.key)\n tasks.append(self.send_message(message, solver_agent_id))\n responses = await asyncio.gather(*tasks)\n # Aggregate answers\n answers = [response.answer for response in responses]\n # For simplicity, select the most common answer\n final_answer = max(set(answers), key=answers.count)\n # Publish final answer\n await self.publish_message(\n AnswerMessage(answer=final_answer, solver_type=\"final\"),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n queue = asyncio.Queue[AnswerMessage]()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: AnswerMessage, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n num_solvers = 3\n solver_types = [\"chain_of_thought\", \"react\", \"self_consistency\"]\n\n # Register SolverAgents\n for solver_type in solver_types:\n await SolverAgent.register(\n runtime,\n solver_type,\n lambda solver_type=solver_type: SolverAgent(\n model_client=model_client,\n reasoning_style=solver_type\n )\n )\n\n # Register CoordinatorAgent\n await CoordinatorAgent.register(\n runtime,\n \"coordinator_agent\",\n lambda: CoordinatorAgent(num_solvers=num_solvers)\n )\n\n # Register ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n runtime.start()\n\n # Send TaskMessage to CoordinatorAgent\n await runtime.send_message(TaskMessage(content=task), AgentId(\"coordinator_agent\", \"default\"))\n\n await runtime.stop_when_idle()\n\n # Get final answer\n final_answer = (await queue.get()).answer\n return final_answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (7.3%, 8.9%), Median: 12.7%", + "generation": 68 + }, + { + "thought": "**Insights:**\n\nBy incorporating step-by-step verification into the reasoning process, we can enhance the agent's ability to provide accurate answers grounded in the passage. While similar to previous agents, this approach uniquely emphasizes verifying each reasoning step directly against the provided text.\n\n**Overall Idea:**\n\nDevelop a 'Fact-Checked Chain-of-Thought Agent' that generates reasoning steps and verifies each against the passage. The agent ensures that all information used is supported by the text, reducing hallucinations and improving accuracy.\n\n**Implementation:**\n\n- **FactCheckedCoTAgent**:\n - Receives the passage and question in a `PassageQuestion` message.\n - Generates initial reasoning steps.\n - Verifies each reasoning step against the passage, correcting any unsupported steps.\n - Compiles the verified reasoning and provides the final answer.\n - Publishes the final answer to the result topic using `TopicId(\"result\", \"output_result\")`.\n- **ClosureAgent**:\n - Subscribes to the result topic `TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")` and collects the final answer.\n- Adjusted the function signature and ensured double quotes are used consistently.", + "name": "Fact-Checked Chain-of-Thought Agent", + "code": "def forward(self, task, model_client_kwargs) -> str:\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import (\n RoutedAgent,\n default_subscription,\n message_handler,\n ClosureAgent,\n TypeSubscription,\n DefaultTopicId,\n )\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n LLMMessage,\n AssistantMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class PassageQuestion:\n passage: str\n question: str\n\n @dataclass\n class Answer:\n answer: str\n\n @default_subscription\n class FactCheckedCoTAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Fact-Checked CoT Agent\")\n self._model_client = model_client\n\n @message_handler\n async def handle_passage_question(self, message: PassageQuestion, ctx: MessageContext) -> None:\n passage = message.passage\n question = message.question\n # Step 1: Generate initial reasoning using chain-of-thought\n initial_prompt = (\n \"You are a helpful assistant. Read the following passage and question, then \"\n \"provide a detailed, step-by-step reasoning (number each step) to arrive at the answer. \"\n \"Finish with the final answer.\\n\\n\"\n f\"Passage:\\n{passage}\\n\\nQuestion:\\n{question}\\n\\nAnswer:\"\n )\n response = await self._model_client.create(\n [UserMessage(content=initial_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n reasoning = response.content.strip()\n # Step 2: Split reasoning into individual steps\n steps = self._split_reasoning(reasoning)\n # Step 3: Verify each step against the passage\n verified_steps = []\n for step in steps:\n verification_prompt = (\n \"You are to verify whether the following reasoning step is supported by the passage. \"\n \"If it is correct and supported, reply 'Yes' and briefly cite evidence from the passage. \"\n \"If it is incorrect or unsupported, reply 'No' and provide the corrected reasoning step based on the passage.\\n\\n\"\n f\"Passage:\\n{passage}\\n\\nReasoning Step:\\n{step}\\n\"\n )\n verification_response = await self._model_client.create(\n [UserMessage(content=verification_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n verification = verification_response.content.strip()\n # Process verification\n if verification.lower().startswith(\"yes\"):\n verified_steps.append(step)\n elif verification.lower().startswith(\"no\"):\n # Extract corrected step\n corrected_step = self._extract_corrected_step(verification)\n verified_steps.append(corrected_step)\n else:\n # Handle unexpected format\n corrected_step = step # Keep original step if unable to verify\n verified_steps.append(corrected_step)\n # Step 4: Compile verified reasoning and generate final answer\n final_reasoning = \"\\n\".join(verified_steps)\n final_answer_prompt = (\n \"Based on the verified reasoning steps below, provide the final answer to the question.\\n\\n\"\n f\"Verified Reasoning Steps:\\n{final_reasoning}\\n\\nFinal Answer:\"\n )\n final_response = await self._model_client.create(\n [UserMessage(content=final_answer_prompt, source=\"user\")],\n cancellation_token=ctx.cancellation_token,\n )\n final_answer = final_response.content.strip()\n # Step 5: Publish the final answer\n await self.publish_message(\n Answer(answer=final_answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n def _split_reasoning(self, reasoning_text: str) -> List[str]:\n # Split reasoning into steps based on numbering\n steps = []\n lines = reasoning_text.strip().split(\"\\n\")\n current_step = \"\"\n for line in lines:\n if line.strip() == \"\":\n continue\n if re.match(r\"^\\d+[).\\]]\", line.strip()):\n if current_step != \"\":\n steps.append(current_step.strip())\n current_step = line.strip()\n else:\n current_step += \" \" + line.strip()\n if current_step != \"\":\n steps.append(current_step.strip())\n return steps\n\n def _extract_corrected_step(self, verification_text: str) -> str:\n # Try to extract the corrected step from the verification response\n match = re.search(r\"Corrected reasoning step:\\s*(.*)\", verification_text, re.DOTALL)\n if match:\n corrected_step = match.group(1).strip()\n else:\n corrected_step = verification_text.strip()\n return corrected_step\n\n async def main():\n queue = asyncio.Queue()\n\n async def output_result(\n _runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n await FactCheckedCoTAgent.register(\n runtime,\n \"fact_checked_cot_agent\",\n lambda: FactCheckedCoTAgent(model_client=model_client),\n )\n\n # ClosureAgent subscribes to the result topic\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n runtime.start()\n\n # Extract passage and question from task\n parts = task.strip().split(\"Question:\")\n if len(parts) < 2:\n raise ValueError(\"Invalid task format. Expected 'Passage:\\n...\\nQuestion:\\n...'\")\n passage = parts[0].replace(\"Passage:\", \"\").strip()\n question = parts[1].strip()\n\n await runtime.publish_message(\n PassageQuestion(passage=passage, question=question),\n topic_id=DefaultTopicId(),\n )\n\n await runtime.stop_when_idle()\n\n return (await queue.get()).answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 0.0%), Median: 0.2%", + "generation": 70 + } +] \ No newline at end of file diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json new file mode 100644 index 000000000000..ad22d4f14adf --- /dev/null +++ b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json @@ -0,0 +1,51 @@ +[ + { + "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", + "name": "Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n # Define message types as data classes\n @dataclass\n class ChainOfThoughtTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n # Define the Chain-of-Thought Agent\n class ChainOfThoughtAgent(RoutedAgent):\n def __init__(self, description: str,\n model_client: ChatCompletionClient,\n system_prompt: str,\n instruction: str,\n ) -> None:\n super().__init__(description)\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=system_prompt,\n )\n ]\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> None:\n\n logging.info(f\"{self._description} received message: {message.task}\")\n user_prompt = message.task + \"\\n\" + self._instruction\n msgs = self._system_messages + [UserMessage(content=user_prompt, source=self.metadata[\"type\"])]\n model_result = await self._model_client.create(msgs)\n assert isinstance(model_result.content, str)\n\n await self.publish_message(\n message=FinalResult(model_result.content),\n topic_id=DefaultTopicId(),\n )\n\n\n # Define the main function to set up and run the agent system\n async def main():\n\n # Create a queue to collect final answer\n queue = asyncio.Queue[FinalResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the chain-of-thought agent\n agent_id = AgentId(\"COTAgent\", \"default\")\n cot_instruction = \"Please think step by step and then solve the task.\"\n await ChainOfThoughtAgent.register(\n runtime, \"COTAgent\", lambda: ChainOfThoughtAgent(\n description='Chain-of-Thought Agent',\n model_client=model_client,\n system_prompt=\"You are a helpful assistant. Directly answer the question. Keep it very concise.\",\n instruction=cot_instruction,\n )\n )\n # Create closure agent to collect final output result\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [DefaultSubscription()])\n\n # Start the runtime, and publish the first message\n runtime.start()\n initial_message = ChainOfThoughtTask(task=task)\n await runtime.send_message(initial_message, agent_id) # publish_message\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (8.4%, 9.6%), Median: 12.4%" + }, + { + "thought": "While an LLM can arrive at the correct answer, its reasoning may vary. By repeatedly asking the same question with high temperature settings, we can generate different reasoning paths. We then combine multiple answers from these Chain-of-Thought (CoT) agents to produce a more accurate final answer through ensembling.", + "name": "Self-Consistency with Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from typing import List\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WorkerTask:\n task: str\n previous_results: List[str]\n\n\n @dataclass\n class WorkerTaskResult:\n result: str\n\n\n @dataclass\n class UserTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n class WorkerAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(description=\"Worker Agent\")\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n user_prompt = message.task + \"\\n\" + self._instruction\n\n if message.previous_results:\n # If previous results are provided, we need to synthesize them to create a single prompt.\n # system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n system_prompt = \"Given all the solutions, reason over them carefully and provide a final answer.\"\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n model_result = await self._model_client.create(\n [SystemMessage(system_prompt), UserMessage(content=user_prompt, source=\"user\")]\n )\n else:\n # If no previous results are provided, we can simply pass the user query to the model.\n model_result = await self._model_client.create([UserMessage(content=user_prompt, source=\"user\")])\n assert isinstance(model_result.content, str)\n print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n return WorkerTaskResult(result=model_result.content)\n\n\n class OrchestratorAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n worker_agent_types: List[str],\n num_layers: int,\n ) -> None:\n super().__init__(description=\"Aggregator Agent\")\n self._model_client = model_client\n self._worker_agent_types = worker_agent_types\n self._num_layers = num_layers\n\n\n @message_handler\n async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n # Create task for the first layer.\n worker_task = WorkerTask(task=message.task, previous_results=[])\n # Iterate over layers.\n for i in range(self._num_layers):\n # Assign workers for this layer.\n worker_ids = [\n AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n for j, worker_type in enumerate(self._worker_agent_types)\n ]\n # Dispatch tasks to workers.\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n # Prepare task for the next layer.\n worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n # Perform final aggregation.\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n # system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n system_prompt = \"Given all the above solutions, reason over them carefully and provide a final answer.\"\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n model_result = await self._model_client.create(\n [SystemMessage(system_prompt), UserMessage(content=message.task, source=\"user\")]\n )\n assert isinstance(model_result.content, str)\n return FinalResult(result=model_result.content)\n\n # Define the main function to set up and run the agent system\n async def main():\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the agents\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"worker\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n await OrchestratorAgent.register(\n runtime,\n \"orchestrator\",\n lambda: OrchestratorAgent(\n model_client=model_client, worker_agent_types=[\"worker\"] * 5, num_layers=1\n ),\n )\n\n # Start the runtime, and publish the first message\n runtime.start()\n result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n\n # Return the result\n return result.result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (9.3%, 11.1%), Median: 15.7%" + }, + { + "thought": "To enhance its performance, an LLM can iteratively improve its answer based on feedback. By reflecting on its previous attempts and incorporating feedback, the model can refine its reasoning and provide a more accurate solution.", + "name": "Self-Refine (Reflexion)", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WritingTask:\n task: str\n\n\n @dataclass\n class WritingResult:\n task: str\n answer: str\n review: str\n\n\n @dataclass\n class ReviewTask:\n session_id: str\n writing_task: str\n answer_scratchpad: str\n answer: str\n\n\n @dataclass\n class ReviewResult:\n review: str\n session_id: str\n approved: bool\n\n\n @default_subscription\n class WorkerAgent(RoutedAgent):\n \"An agent that performs writing tasks.\"\n\n def __init__(self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(\"A helpful assistant\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a helpful assistant. Work with the critic to improve your answer.\n Make sure to directly answer the question. Keep it very concise.\n Respond using the following format:\n\n Thoughts: \n Answer: \n \"\"\",\n )\n ]\n self._model_client = model_client\n self._session_memory: Dict[str, List[WritingTask | ReviewTask | ReviewResult]] = {}\n self._instruction = instruction\n\n @message_handler\n async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) -> None:\n # Store the messages in a temporary memory for this request only.\n session_id = str(uuid.uuid4())\n self._session_memory.setdefault(session_id, []).append(message)\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=message.task + self._instruction, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n )\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n # Create a review task.\n review_task = ReviewTask(\n session_id=session_id,\n writing_task=message.task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[session_id].append(review_task)\n # Publish a review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n @message_handler\n async def handle_review_result(self, message: ReviewResult, ctx: MessageContext) -> None:\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(message)\n # Obtain the request from previous messages.\n review_request = next(\n m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewTask)\n )\n assert review_request is not None\n # Check if the is approved.\n if message.approved:\n # Publish the writing result.\n await self.publish_message(\n WritingResult(\n answer=review_request.answer,\n task=review_request.writing_task,\n review=message.review,\n ),\n topic_id=TopicId(\"result\", self.id.key),\n )\n print(\"Writing Result:\")\n print(\"-\" * 80)\n print(f\"Task:\\n{review_request.writing_task}\")\n print(\"-\" * 80)\n print(f\"Answer:\\n{review_request.answer}\")\n print(\"-\" * 80)\n print(f\"Review:\\n{message.review}\")\n print(\"-\" * 80)\n else:\n # Create a list of LLM messages to send to the model.\n messages: List[LLMMessage] = [*self._system_messages]\n for m in self._session_memory[message.session_id]:\n if isinstance(m, ReviewResult):\n messages.append(UserMessage(content=m.review, source=\"Reviewer\"))\n elif isinstance(m, ReviewTask):\n messages.append(AssistantMessage(content=m.answer_scratchpad, source=\"Worker\"))\n elif isinstance(m, WritingTask):\n messages.append(UserMessage(content=m.task, source=\"User\"))\n else:\n raise ValueError(f\"Unexpected message type: {m}\")\n # Generate a revision using the chat completion API.\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n # Create a new review task.\n review_task = ReviewTask(\n session_id=message.session_id,\n writing_task=review_request.writing_task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[message.session_id].append(review_task)\n # Publish a new review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n\n def _extract_answer(self, text: str) -> Union[str, None]:\n pattern = \"(?<=Answer: ).*\"\n # Search for the pattern in the markdown text\n match = re.search(pattern, text, re.DOTALL)\n # Extract the language and code block if a match is found\n if match:\n return match.group(0)\n return None\n\n @default_subscription\n class ReviewerAgent(RoutedAgent):\n \"\"\"An agent that critiques tasks.\"\"\"\n\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"A critic agent.\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a critic. Review answers and criticize on where it might be wrong.\n Respond using the following JSON format:\n {\n \"correctness\": \"\",\n \"approval\": \"\",\n \"suggested_changes\": \"\"\n }\n \"\"\",\n )\n ]\n self._session_memory: Dict[str, List[ReviewTask | ReviewResult]] = {}\n self._model_client = model_client\n\n @message_handler\n async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> None:\n # Format the prompt for the review.\n # Gather the previous feedback if available.\n previous_feedback = \"\"\n if message.session_id in self._session_memory:\n previous_review = next(\n (m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewResult)),\n None,\n )\n if previous_review is not None:\n previous_feedback = previous_review.review\n # Store the messages in a temporary memory for this request only.\n self._session_memory.setdefault(message.session_id, []).append(message)\n prompt = f\"\"\"The problem statement is: {message.writing_task}\n The answer is:\n ```\n {message.answer}\n ```\n\n Previous feedback:\n {previous_feedback}\n\n Please review the answer. If previous feedback was provided, see if it was addressed.\n \"\"\"\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=prompt, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n json_output=True,\n )\n assert isinstance(response.content, str)\n # TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.\n # Parse the response JSON.\n review = json.loads(response.content)\n # Construct the review text.\n review_text = \"Review:\\n\" + \"\\n\".join([f\"{k}: {v}\" for k, v in review.items()])\n approved = review[\"approval\"].lower().strip() == \"approve\"\n result = ReviewResult(\n review=review_text,\n session_id=message.session_id,\n approved=approved,\n )\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(result)\n # Publish the review result.\n await self.publish_message(result, topic_id=TopicId(\"default\", self.id.key))\n\n\n # Define the main function to set up and run the agent system\n async def main():\n # Create a queue to collect final answer\n queue = asyncio.Queue[WritingResult]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create agents\n await ReviewerAgent.register(\n runtime, \"ReviewerAgent\", lambda: ReviewerAgent(model_client=model_client)\n )\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"WorkerAgent\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n # Create closure agent to collect final output result\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime, and publish the first message\n runtime.start()\n await runtime.publish_message(\n message=WritingTask(task=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n print(f\"queue {queue}\")\n return (await queue.get()).answer\n \n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (34.7%, 38.3%), Median: 46.0%" + }, + { + "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", + "name": "LLM Debate", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n \n @dataclass\n class Question:\n content: str\n\n\n @dataclass\n class Answer:\n content: str\n\n\n @dataclass\n class SolverRequest:\n content: str\n question: str\n\n\n @dataclass\n class IntermediateSolverResponse:\n content: str\n question: str\n answer: str\n round: int\n\n\n @dataclass\n class FinalSolverResponse:\n answer: str\n\n @default_subscription\n class Solver(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n super().__init__(\"A debator.\")\n self._topic_type = topic_type\n self._model_client = model_client\n self._num_neighbors = num_neighbors\n self._history: List[LLMMessage] = []\n self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n self._system_messages = [\n SystemMessage(\n (\n \"You are a helpful assistant with expertise in reasoning. \"\n \"Your task is to assist in solving a reasoning problem by providing \"\n \"a clear and detailed solution. Limit your output within 100 words, \"\n \"and your final answer should be a single string.\"\n )\n )\n ]\n self._round = 0\n self._max_round = max_round\n\n @message_handler\n async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n # Add the question to the memory.\n self._history.append(UserMessage(content=message.content, source=\"user\"))\n # Make an inference using the model.\n model_result = await self._model_client.create(self._system_messages + self._history)\n assert isinstance(model_result.content, str)\n # Add the response to the memory.\n self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n # Increment the counter.\n self._round += 1\n if self._round == self._max_round:\n # If the counter reaches the maximum round, publishes a final response.\n await self.publish_message(FinalSolverResponse(answer=model_result.content), topic_id=DefaultTopicId())\n else:\n # Publish intermediate response to the topic associated with this solver.\n print(\"publish IntermediateSolverResponse\")\n await self.publish_message(\n IntermediateSolverResponse(\n content=model_result.content,\n question=message.question,\n answer=model_result.content,\n round=self._round,\n ),\n topic_id=DefaultTopicId(type=self._topic_type),\n )\n\n @message_handler\n async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n # Add neighbor's response to the buffer.\n self._buffer.setdefault(message.round, []).append(message)\n # Check if all neighbors have responded.\n if len(self._buffer[message.round]) == self._num_neighbors:\n print(\n f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n )\n # Prepare the prompt for the next question.\n prompt = \"These are the solutions to the problem from other agents:\\n\"\n for resp in self._buffer[message.round]:\n prompt += f\"One agent solution: {resp.content}\\n\"\n prompt += (\n \"Using the solutions from other agents as additional information, \"\n \"can you provide your answer to the problem? \"\n f\"The original problem is {message.question}. \"\n \"Your final answer should be a single string.\"\n )\n # Send the question to the agent itself to solve.\n await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n # Clear the buffer.\n self._buffer.pop(message.round)\n\n\n @default_subscription\n class Aggregator(RoutedAgent):\n def __init__(self, num_solvers: int) -> None:\n super().__init__(\"Aggregator\")\n self._num_solvers = num_solvers\n self._buffer: List[FinalSolverResponse] = []\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n prompt = (\n f\"Can you solve the following problem?\\n{message.content}\\n\"\n \"Explain your reasoning. Your final answer should be a single string.\"\n )\n print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n\n @message_handler\n async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n self._buffer.append(message)\n if len(self._buffer) == self._num_solvers:\n print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n # Find the majority answer.\n answers = [resp.answer for resp in self._buffer]\n majority_answer = max(set(answers), key=answers.count)\n # Publish the aggregated response.\n await self.publish_message(Answer(content=majority_answer), topic_id=TopicId(\"result\", self.id.key))\n # Clear the responses.\n self._buffer.clear()\n print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")\n\n\n # Define the main function to set up and run the agent system\n async def main():\n queue = asyncio.Queue[Answer]()\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n await Solver.register(\n runtime,\n \"SolverA\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverA\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverB\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverB\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverC\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverC\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverD\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverD\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Aggregator.register(runtime, \"Aggregator\", lambda: Aggregator(num_solvers=4))\n\n # Subscriptions for topic published to by SolverA.\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverD\"))\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverB\"))\n\n # Subscriptions for topic published to by SolverB.\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverA\"))\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverC\"))\n\n # Subscriptions for topic published to by SolverC.\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverB\"))\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverD\"))\n\n # Subscriptions for topic published to by SolverD.\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverC\"))\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverA\"))\n\n # All solvers and the aggregator subscribe to the default topic.\n\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n res = (await queue.get()).content\n print(f\"res {res}\")\n return res\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (77.2%, 80.6%), Median: 87.3%" + }, + { + "thought": "By using a tree search strategy, the model can explore multiple branches of thoughts, where at any step of the problem, multiple independent thoughts are generated and evaluated to find the most useful ones.", + "name": "Tree of Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n from dataclasses import dataclass\n from typing import List, Dict, Any\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n AssistantMessage,\n LLMMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.application.logging import TRACE_LOGGER_NAME \n\n # Configure logging as per documentation \n logging.basicConfig(level=logging.WARNING) \n logger = logging.getLogger(TRACE_LOGGER_NAME) \n logger.setLevel(logging.INFO)\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n \n @dataclass \n class Message: \n content: str \n \n @dataclass \n class FinalAnswer: \n answer: str \n\n @default_subscription\n class TreeOfThoughtsAgent(RoutedAgent): \n def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): \n super().__init__(\"TreeOfThoughtsAgent\") \n self._model_client = model_client \n self._max_depth = max_depth \n self._beam_width = beam_width \n self._system_messages = [ \n SystemMessage( \n content=\"You are a helpful assistant who reasons step-by-step to solve complex problems.\") \n ] \n \n async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: \n # Generate multiple thoughts using the model \n thoughts = [] \n # Create multiple async tasks to generate thoughts in parallel \n tasks = [] \n for _ in range(num_thoughts): \n tasks.append(self._model_client.create( \n prompt, \n extra_create_args={\"temperature\": 1.0},\n cancellation_token=cancellation_token, \n )) \n responses = await asyncio.gather(*tasks) \n for response in responses: \n thoughts.append(response.content.strip()) \n return thoughts \n \n async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[str]: \n # Batch evaluation of thoughts \n eval_prompt = [ \n SystemMessage(content=\"You are an assistant that evaluates reasoning steps for solving a problem.\"), \n UserMessage( \n content=f\"Evaluate the following thoughts for their usefulness in solving the problem. Rank them from most useful to least useful and provide the rankings.\\n\\nThoughts:\\n\" + \"\\n\".join( \n [f\"{i+1}. {t}\" for i, t in enumerate(thoughts)]), \n source=\"user\" \n ) \n ] \n eval_response = await self._model_client.create( \n eval_prompt, \n cancellation_token=ctx.cancellation_token, \n ) \n # Parse the response to extract rankings \n rankings_text = eval_response.content.strip() \n # For simplicity, assume the model outputs the rankings as a list of numbers \n rankings = [] \n for line in rankings_text.split('\\n'): \n line = line.strip() \n if line and line[0].isdigit(): \n rankings.append(int(line[0]) - 1) # Subtract 1 to get index \n # Select top-k thoughts \n best_thoughts = [thoughts[i] for i in rankings[:self._beam_width]] \n return best_thoughts \n \n @message_handler \n async def handle_message(self, message: Message, ctx: MessageContext) -> None: \n logger.info(f\"Received task: {message.content}\") \n initial_prompt = self._system_messages + [UserMessage(content=message.content, source=\"user\")] \n tree = [[]] # Initialize the tree with an empty path \n for depth in range(self._max_depth): \n new_branches = [] \n logger.info(f\"Depth {depth+1}\") \n for path in tree: \n # Build the prompt up to this point \n prompt = initial_prompt.copy() \n for thought in path: \n prompt.append(AssistantMessage(content=thought, source=\"assistant\")) \n # Generate thoughts \n thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) \n logger.info(f\"Generated thoughts: {thoughts}\") \n # Evaluate thoughts \n best_thoughts = await self.evaluate_thoughts(thoughts, ctx) \n logger.info(f\"Best thoughts: {best_thoughts}\") \n # Expand tree with best thoughts \n for thought in best_thoughts: \n new_path = path + [thought] \n new_branches.append(new_path) \n # Update tree with new branches \n if not new_branches: \n logger.info(\"No more branches to expand.\") \n break # No more thoughts to expand \n tree = new_branches \n # After reaching max depth, select the best path \n # For simplicity, select the first path \n best_path = tree[0] \n final_answer = best_path[-1] \n logger.info(f\"Final answer: {final_answer}\") \n # Publish the final answer \n await self.publish_message( \n FinalAnswer(answer=final_answer), \n topic_id=TopicId(type=\"result\", source=self.id.key) \n ) \n \n # Main function \n async def main(): \n # Create a queue to collect the final answer \n queue = asyncio.Queue[FinalAnswer]() \n \n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None: \n await queue.put(message) \n \n # Initialize runtime \n runtime = SingleThreadedAgentRuntime() \n \n # Register TreeOfThoughtsAgent \n await TreeOfThoughtsAgent.register( \n runtime, \n \"TreeOfThoughtsAgent\", \n lambda: TreeOfThoughtsAgent(model_client) \n ) \n \n # Register ClosureAgent with agent key matching self.id.key (default is \"default\") \n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\") \n await ClosureAgent.register( \n runtime, \n \"output_result\", \n output_result, \n subscriptions=lambda: [result_topic] \n ) \n \n # Start the runtime \n runtime.start() \n \n # Publish initial message to TreeOfThoughtsAgent\n await runtime.publish_message( \n Message(content=task), \n topic_id=DefaultTopicId() \n ) \n \n # Wait until idle \n await runtime.stop_when_idle() \n \n # Return the final answer \n final_message = await queue.get() \n return final_message.answer\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (32.7%, 37.2%), Median: 46.2%" + }, + { + "thought": "**Insights:**\nIntegrating tool use into the agent's reasoning process through the ReAct framework can enhance performance on tasks requiring complex computations. By interleaving reasoning steps with actions, the agent can utilize external tools like a calculator to perform tasks beyond the capabilities of the language model alone.\n\n**Overall Idea:**\nImplement a `ReActAgent` that uses chain-of-thought reasoning interleaved with tool use. The agent decides when to call tools (e.g., a calculator) during its reasoning process. A `ToolAgent` will execute the requested tool actions. The `ReActAgent` will communicate with the `ToolAgent` via direct messages. The final answer will be published to a 'result' topic, and a `ClosureAgent` will collect the final answer.\n\n**Implementation:**\n- Use the `@default_subscription` decorator for the `ReActAgent` to subscribe to the default topic.\n- Modify the `ReActAgent` to use `tool_agent_caller_loop` for tool interactions.\n- Adjust the `ClosureAgent` to subscribe to `TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")`.\n- Ensure that the agents communicate properly and that the message handling follows the framework's guidelines.", + "name": "ReAct (Reasoning and Acting)", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n from dataclasses import dataclass\n from typing import List\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import RoutedAgent, message_handler, ClosureAgent, default_subscription, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n AssistantMessage,\n LLMMessage,\n )\n from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop\n from autogen_core.components.tools import FunctionTool\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Set up Azure OpenAI credentials\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create the AzureOpenAI model client\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"function_calling\": True,\n \"vision\": True,\n \"json_output\": True,\n },\n )\n\n # Message class\n @dataclass\n class Message:\n content: str\n\n # Calculator tool\n async def calculator(expression: str) -> str:\n try:\n result = eval(expression)\n return str(result)\n except Exception as e:\n return \"Error: \" + str(e)\n\n calculator_tool = FunctionTool(\n func=calculator,\n description=\"A calculator that can evaluate mathematical expressions.\",\n )\n\n # ReActAgent\n @default_subscription\n class ReActAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, tool_agent_id: AgentId, tools: List[FunctionTool]):\n super().__init__(\"ReAct Agent\")\n self._model_client = model_client\n self._tool_agent_id = tool_agent_id\n self._tools = tools\n self._system_messages = [\n SystemMessage(\n content=(\n \"You are an intelligent agent that can reason and act. \"\n \"Use tools to perform actions when necessary. \"\n \"You should use the following format:\\n\\n\"\n \"Thought: what you think\\n\"\n \"Action: the action to perform, must be one of [calculator]\\n\"\n \"Action Input: the input to the action\\n\"\n \"Observation: the result of the action\\n\"\n \"... (this Thought/Action/Action Input/Observation can repeat N times)\\n\"\n \"Thought: I now know the final answer\\n\"\n \"Answer: the final answer to the original question\"\n )\n )\n ]\n self._conversation: List[LLMMessage] = []\n\n @message_handler\n async def handle_message(self, message: Message, ctx: MessageContext) -> None:\n user_message = UserMessage(content=message.content, source=\"user\")\n self._conversation.append(user_message)\n\n # Use tool_agent_caller_loop to manage tool calls\n messages = await tool_agent_caller_loop(\n self,\n tool_agent_id=self._tool_agent_id,\n model_client=self._model_client,\n input_messages=self._conversation,\n tool_schema=[tool.schema for tool in self._tools],\n cancellation_token=ctx.cancellation_token,\n )\n\n # Extract final answer from the last assistant message\n final_answer = messages[-1].content\n\n # Publish final answer to 'result' topic with topic_source 'output_result'\n await self.publish_message(\n Message(content=final_answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n # Main function\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue[Message]()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: Message, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register ToolAgent\n tools = [calculator_tool]\n await ToolAgent.register(\n runtime,\n \"ToolAgent\",\n lambda: ToolAgent(\"Tool Agent\", tools),\n )\n\n # Register ReActAgent\n tool_agent_id = AgentId(\"ToolAgent\", \"default\")\n await ReActAgent.register(\n runtime,\n \"ReActAgent\",\n lambda: ReActAgent(model_client, tool_agent_id, tools),\n )\n\n # Register ClosureAgent with corrected subscription\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime\n runtime.start()\n\n # Publish initial message to ReActAgent\n await runtime.publish_message(\n Message(content=task),\n topic_id=DefaultTopicId(),\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (10.7%, 12.2%), Median: 15.9%", + "generation": 4 + }, + { + "thought": "We will improve the 'Tree of Thought' agent by enhancing the thought evaluation process to make it more robust and by refining the final answer selection mechanism to choose the best path based on cumulative evaluation scores. Specifically, we'll modify the `evaluate_thoughts` method to have the model output JSON-formatted evaluations for each thought, ensuring reliable parsing. We'll also keep track of cumulative scores for each path and select the one with the highest total score.", + "name": "Improved Tree of Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n from dataclasses import dataclass\n from typing import List, Dict, Any\n from autogen_core.application import SingleThreadedAgentRuntime\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n AssistantMessage,\n LLMMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.application.logging import TRACE_LOGGER_NAME\n\n # Configure logging as per documentation\n logging.basicConfig(level=logging.WARNING)\n logger = logging.getLogger(TRACE_LOGGER_NAME)\n logger.setLevel(logging.INFO)\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class Message:\n content: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @default_subscription\n class TreeOfThoughtsAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3):\n super().__init__(\"TreeOfThoughtsAgent\")\n self._model_client = model_client\n self._max_depth = max_depth\n self._beam_width = beam_width\n self._system_messages = [\n SystemMessage(\n content=\"You are a helpful assistant who reasons step-by-step to solve complex problems.\")\n ]\n\n async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]:\n # Generate multiple thoughts using the model\n thoughts = []\n # Create multiple async tasks to generate thoughts in parallel\n tasks = []\n for _ in range(num_thoughts):\n tasks.append(self._model_client.create(\n prompt,\n extra_create_args={\"temperature\": 1.0},\n cancellation_token=cancellation_token,\n ))\n responses = await asyncio.gather(*tasks)\n for response in responses:\n thoughts.append(response.content.strip())\n return thoughts\n\n async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[Dict[str, Any]]:\n # Evaluate thoughts with the model outputting JSON-formatted scores\n eval_prompt = [\n SystemMessage(content=\"You are an assistant that evaluates reasoning steps for solving a problem.\"),\n UserMessage(\n content=(\n \"Evaluate the following thoughts for their usefulness in solving the problem. \"\n \"Provide a JSON array of objects with 'thought' and 'score' (from 1 to 10).\\n\\nThoughts:\\n\" + \"\\n\".join(\n [f\"- {t}\" for t in thoughts])\n ),\n source=\"user\"\n )\n ]\n eval_response = await self._model_client.create(\n eval_prompt,\n cancellation_token=ctx.cancellation_token,\n )\n # Parse the JSON response\n import json\n try:\n evaluations = json.loads(eval_response.content.strip())\n # Each evaluation should be a dict with 'thought' and 'score'\n return evaluations\n except json.JSONDecodeError:\n # If parsing fails, assign default scores\n return [{\"thought\": t, \"score\": 5} for t in thoughts]\n\n @message_handler\n async def handle_message(self, message: Message, ctx: MessageContext) -> None:\n logger.info(f\"Received task: {message.content}\")\n initial_prompt = self._system_messages + [UserMessage(content=message.content, source=\"user\")]\n tree = [[{\"thought\": \"\", \"score\": 0, \"cumulative_score\": 0}]] # Initialize the tree with an empty path\n for depth in range(self._max_depth):\n new_branches = []\n logger.info(f\"Depth {depth+1}\")\n for path in tree:\n # Build the prompt up to this point\n prompt = initial_prompt.copy()\n for node in path:\n if node[\"thought\"]:\n prompt.append(AssistantMessage(content=node[\"thought\"], source=\"assistant\"))\n # Generate thoughts\n thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token)\n logger.info(f\"Generated thoughts: {thoughts}\")\n # Evaluate thoughts\n evaluations = await self.evaluate_thoughts(thoughts, ctx)\n logger.info(f\"Evaluations: {evaluations}\")\n # Expand tree with evaluated thoughts\n for eval in evaluations:\n new_path = path + [{\n \"thought\": eval[\"thought\"],\n \"score\": eval[\"score\"],\n \"cumulative_score\": path[-1][\"cumulative_score\"] + eval[\"score\"]\n }]\n new_branches.append(new_path)\n # Select top-k paths based on cumulative_score\n if not new_branches:\n logger.info(\"No more branches to expand.\")\n break # No more thoughts to expand\n # Sort paths by cumulative score\n new_branches.sort(key=lambda p: p[-1][\"cumulative_score\"], reverse=True)\n tree = new_branches[:self._beam_width]\n # After reaching max depth, select the best path\n best_path = tree[0]\n final_answer = best_path[-1][\"thought\"]\n logger.info(f\"Final answer: {final_answer}\")\n # Publish the final answer to topic_id=TopicId(type=\"result\", source=\"default\")\n await self.publish_message(\n FinalAnswer(answer=final_answer),\n topic_id=TopicId(type=\"result\", source=\"default\")\n )\n\n # Main function\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue()\n\n async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register TreeOfThoughtsAgent\n await TreeOfThoughtsAgent.register(\n runtime,\n \"TreeOfThoughtsAgent\",\n lambda: TreeOfThoughtsAgent(model_client)\n )\n\n # Register ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n\n # Publish initial message to TreeOfThoughtsAgent\n await runtime.publish_message(\n Message(content=task),\n topic_id=DefaultTopicId()\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_message = await queue.get()\n return final_message.answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (35.0%, 39.6%), Median: 48.6%", + "generation": 6 + } +] \ No newline at end of file From e4868500eb38b348ed11b12052e3aae728b5cbf6 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Dec 2024 16:04:08 -0500 Subject: [PATCH 08/21] Update README --- .../autogen-core/samples/adas/README.md | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index fe4aa0761a33..82739fb52623 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -1,5 +1,200 @@ # User Guide for using ADAS in AutoGen +TLDR: +This is a feature to use a meta-agent to generate new agent systems. For example, this agent system was discovered and written entirely by an agent using o1-preview: +``` +def forward(self, task, model_client_kwargs): + import asyncio + import logging + from dataclasses import dataclass + from typing import List, Dict, Any + from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId + from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId + from autogen_core.components.models import ( + ChatCompletionClient, + SystemMessage, + UserMessage, + AssistantMessage, + LLMMessage, + ) + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + from autogen_core.application.logging import TRACE_LOGGER_NAME + + # Configure logging as per documentation + logging.basicConfig(level=logging.WARNING) + logger = logging.getLogger(TRACE_LOGGER_NAME) + logger.setLevel(logging.INFO) + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + # Create an AzureOpenAI model client. + model_client = AzureOpenAIChatCompletionClient( + model=model_client_kwargs["model"], + api_version=model_client_kwargs["api_version"], + azure_endpoint=model_client_kwargs["azure_endpoint"], + azure_ad_token_provider=token_provider, + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + @dataclass + class Message: + content: str + + @dataclass + class FinalAnswer: + answer: str + + @default_subscription + class TreeOfThoughtsAgent(RoutedAgent): + def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): + super().__init__("TreeOfThoughtsAgent") + self._model_client = model_client + self._max_depth = max_depth + self._beam_width = beam_width + self._system_messages = [ + SystemMessage( + content="You are a helpful assistant who reasons step-by-step to solve complex problems.") + ] + + async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: + # Generate multiple thoughts using the model + thoughts = [] + # Create multiple async tasks to generate thoughts in parallel + tasks = [] + for _ in range(num_thoughts): + tasks.append(self._model_client.create( + prompt, + extra_create_args={"temperature": 1.0}, + cancellation_token=cancellation_token, + )) + responses = await asyncio.gather(*tasks) + for response in responses: + thoughts.append(response.content.strip()) + return thoughts + + async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[Dict[str, Any]]: + # Evaluate thoughts with the model outputting JSON-formatted scores + eval_prompt = [ + SystemMessage(content="You are an assistant that evaluates reasoning steps for solving a problem."), + UserMessage( + content=( + "Evaluate the following thoughts for their usefulness in solving the problem. " + "Provide a JSON array of objects with 'thought' and 'score' (from 1 to 10).\n\nThoughts:\n" + "\n".join( + [f"- {t}" for t in thoughts]) + ), + source="user" + ) + ] + eval_response = await self._model_client.create( + eval_prompt, + cancellation_token=ctx.cancellation_token, + ) + # Parse the JSON response + import json + try: + evaluations = json.loads(eval_response.content.strip()) + # Each evaluation should be a dict with 'thought' and 'score' + return evaluations + except json.JSONDecodeError: + # If parsing fails, assign default scores + return [{"thought": t, "score": 5} for t in thoughts] + + @message_handler + async def handle_message(self, message: Message, ctx: MessageContext) -> None: + logger.info(f"Received task: {message.content}") + initial_prompt = self._system_messages + [UserMessage(content=message.content, source="user")] + tree = [[{"thought": "", "score": 0, "cumulative_score": 0}]] # Initialize the tree with an empty path + for depth in range(self._max_depth): + new_branches = [] + logger.info(f"Depth {depth+1}") + for path in tree: + # Build the prompt up to this point + prompt = initial_prompt.copy() + for node in path: + if node["thought"]: + prompt.append(AssistantMessage(content=node["thought"], source="assistant")) + # Generate thoughts + thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) + logger.info(f"Generated thoughts: {thoughts}") + # Evaluate thoughts + evaluations = await self.evaluate_thoughts(thoughts, ctx) + logger.info(f"Evaluations: {evaluations}") + # Expand tree with evaluated thoughts + for eval in evaluations: + new_path = path + [{ + "thought": eval["thought"], + "score": eval["score"], + "cumulative_score": path[-1]["cumulative_score"] + eval["score"] + }] + new_branches.append(new_path) + # Select top-k paths based on cumulative_score + if not new_branches: + logger.info("No more branches to expand.") + break # No more thoughts to expand + # Sort paths by cumulative score + new_branches.sort(key=lambda p: p[-1]["cumulative_score"], reverse=True) + tree = new_branches[:self._beam_width] + # After reaching max depth, select the best path + best_path = tree[0] + final_answer = best_path[-1]["thought"] + logger.info(f"Final answer: {final_answer}") + # Publish the final answer to topic_id=TopicId(type="result", source="default") + await self.publish_message( + FinalAnswer(answer=final_answer), + topic_id=TopicId(type="result", source="default") + ) + + # Main function + async def main(): + # Create a queue to collect the final answer + queue = asyncio.Queue() + + async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None: + await queue.put(message) + + # Initialize runtime + runtime = SingleThreadedAgentRuntime() + + # Register TreeOfThoughtsAgent + await TreeOfThoughtsAgent.register( + runtime, + "TreeOfThoughtsAgent", + lambda: TreeOfThoughtsAgent(model_client) + ) + + # Register ClosureAgent + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register( + runtime, + "output_result", + output_result, + subscriptions=lambda: [result_topic] + ) + + # Start the runtime + runtime.start() + + # Publish initial message to TreeOfThoughtsAgent + await runtime.publish_message( + Message(content=task), + topic_id=DefaultTopicId() + ) + + # Wait until idle + await runtime.stop_when_idle() + + # Return the final answer + final_message = await queue.get() + return final_message.answer + + return asyncio.run(main()) +``` + ## Motivation The Automated Design of Agentic Systems (ADAS) [paper](https://arxiv.org/pdf/2408.08435) introduces a way to automatically create powerful agentic system designs. This is motivated by the observation that in the field of machine learning, hand-designed solutions are often replaced by learned solutions over time. From 0618a50080f625469e693ac7965e01b63132ead6 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 6 Dec 2024 14:12:30 -0500 Subject: [PATCH 09/21] Flatten APIs due to rebase --- .../autogen-core/samples/adas/README.md | 7 +- .../autogen-core/samples/adas/adas.py | 21 +++-- .../autogen-core/samples/adas/adas_prompt.py | 77 ++++++++----------- 3 files changed, 47 insertions(+), 58 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index 82739fb52623..f272628073cc 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -8,9 +8,8 @@ def forward(self, task, model_client_kwargs): import logging from dataclasses import dataclass from typing import List, Dict, Any - from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core import SingleThreadedAgentRuntime, default_subscription, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription, DefaultTopicId from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId - from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId from autogen_core.components.models import ( ChatCompletionClient, SystemMessage, @@ -154,7 +153,7 @@ def forward(self, task, model_client_kwargs): # Create a queue to collect the final answer queue = asyncio.Queue() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None: await queue.put(message) # Initialize runtime @@ -169,7 +168,7 @@ def forward(self, task, model_client_kwargs): # Register ClosureAgent result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register( + await ClosureAgent.register_closure( runtime, "output_result", output_result, diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index 9f2e10bf8b3e..e00edb81b69e 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -21,9 +21,8 @@ import numpy as np from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt -from autogen_core.application import SingleThreadedAgentRuntime +from autogen_core import DefaultTopicId, RoutedAgent, SingleThreadedAgentRuntime, default_subscription, message_handler from autogen_core.base import MessageContext -from autogen_core.components import DefaultTopicId, RoutedAgent, default_subscription, message_handler from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -33,6 +32,7 @@ ) from autogen_ext.models import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from pydantic import BaseModel from tqdm import tqdm from utils import bootstrap_confidence_interval @@ -49,8 +49,7 @@ class ADASTask: task: str -@dataclass -class LLMMessageList: +class LLMMessageList(BaseModel): llm_message_list: List[LLMMessage] @@ -158,7 +157,7 @@ def __init__(self, model_client: ChatCompletionClient, system_prompt: str, args, self._session_memory: Dict[str, List[ADASTask]] = {} self._system_messages: List[LLMMessage] = [ - # SystemMessage is not allowed in o1-preview API. + # SystemMessage is not allowed in o1-preview API. TODO: Accomodate o1 model # SystemMessage( AssistantMessage( content=system_prompt, @@ -230,7 +229,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None msg_list = [UserMessage(content=prompt, source=self.metadata["type"])] try: - response = await self.send_message(LLMMessageList(msg_list), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=msg_list), self.id) next_solution = response.json_content ( reflexion_prompt_1, @@ -245,7 +244,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=reflexion_prompt_1, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_1 {response}") @@ -254,7 +253,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=reflexion_prompt_2, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_2 {next_solution}") @@ -263,7 +262,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=reflexion_prompt_3, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_3 {next_solution}") @@ -272,7 +271,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None AssistantMessage(content=str(next_solution), source=self.metadata["type"]), UserMessage(content=reflexion_prompt_4, source=self.metadata["type"]), ] - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=new_messages), self.id) next_solution = response.json_content print(f"--After reflexion_prompt_4 {next_solution}") except Exception as e: @@ -301,7 +300,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None ), ] try: - response = await self.send_message(LLMMessageList(new_messages), self.id) + response = await self.send_message(LLMMessageList(llm_message_list=new_messages), self.id) next_solution = response.json_content except Exception as e: print("During LLM generate new solution:") diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index c3ef5ab9f1a4..82cedb10ce2f 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -64,9 +64,8 @@ def get_autogen_documentation(): import json from dataclasses import dataclass import sys - from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription from autogen_core.base import AgentId, AgentRuntime, MessageContext - from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription from autogen_core.components.models import ( ChatCompletionClient, LLMMessage, @@ -139,7 +138,7 @@ async def main(): # Create a queue to collect final answer queue = asyncio.Queue[FinalResult]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: FinalResult, ctx: MessageContext) -> None: await queue.put(message) # Initialize the agent runtime @@ -157,7 +156,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul ) ) # Create closure agent to collect final output result - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) # Start the runtime, and publish the first message runtime.start() @@ -183,9 +182,8 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul import json from dataclasses import dataclass import sys - from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription from autogen_core.base import AgentId, AgentRuntime, MessageContext - from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, DefaultSubscription from autogen_core.components.models import ( ChatCompletionClient, LLMMessage, @@ -252,7 +250,7 @@ async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerT system_prompt = "Given all the solutions, reason over them carefully and provide a final answer." system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(message.previous_results)]) model_result = await self._model_client.create( - [SystemMessage(system_prompt), UserMessage(content=user_prompt, source="user")] + [SystemMessage(content=system_prompt), UserMessage(content=user_prompt, source="user")] ) else: # If no previous results are provided, we can simply pass the user query to the model. @@ -295,11 +293,10 @@ async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResu worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results]) # Perform final aggregation. print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation") - # system_prompt = "You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:" system_prompt = "Given all the above solutions, reason over them carefully and provide a final answer." system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(worker_task.previous_results)]) model_result = await self._model_client.create( - [SystemMessage(system_prompt), UserMessage(content=message.task, source="user")] + [SystemMessage(content=system_prompt), UserMessage(content=message.task, source="user")] ) assert isinstance(model_result.content, str) return FinalResult(result=model_result.content) @@ -347,7 +344,7 @@ async def main(): from dataclasses import dataclass from typing import Dict, List, Union from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime - from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription + from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, TypeSubscription, DefaultSubscription, ClosureAgent, ClosureContext, message_handler, default_subscription from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -355,8 +352,6 @@ async def main(): SystemMessage, UserMessage, ) - from autogen_core.application import SingleThreadedAgentRuntime - from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent from autogen_ext.models import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider @@ -592,7 +587,7 @@ async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> async def main(): # Create a queue to collect final answer queue = asyncio.Queue[WritingResult]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: WritingResult, ctx: MessageContext) -> None: await queue.put(message) # Initialize the agent runtime @@ -608,7 +603,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes ) # Create closure agent to collect final output result result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) # Start the runtime, and publish the first message runtime.start() @@ -641,7 +636,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes from dataclasses import dataclass from typing import Dict, List, Union from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime - from autogen_core.components import RoutedAgent, default_subscription, message_handler, TypeSubscription + from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, default_subscription, message_handler, TypeSubscription, ClosureAgent, ClosureContext, DefaultTopicId from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -649,8 +644,6 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes SystemMessage, UserMessage, ) - from autogen_core.application import SingleThreadedAgentRuntime - from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler, ClosureAgent from autogen_ext.models import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider @@ -707,7 +700,7 @@ def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neig self._history: List[LLMMessage] = [] self._buffer: Dict[int, List[IntermediateSolverResponse]] = {} self._system_messages = [ - SystemMessage( + SystemMessage(content= ( "You are a helpful assistant with expertise in reasoning. " "Your task is to assist in solving a reasoning problem by providing " @@ -807,7 +800,7 @@ async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: # Define the main function to set up and run the agent system async def main(): queue = asyncio.Queue[Answer]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageContext) -> None: await queue.put(message) runtime = SingleThreadedAgentRuntime() @@ -872,7 +865,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct # All solvers and the aggregator subscribe to the default topic. result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) runtime.start() await runtime.publish_message(Question(content=task), DefaultTopicId()) @@ -895,9 +888,8 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct import logging from dataclasses import dataclass from typing import List, Dict, Any - from autogen_core.application import SingleThreadedAgentRuntime + from autogen_core import SingleThreadedAgentRuntime, default_subscription, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription, DefaultTopicId from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId - from autogen_core.components import default_subscription, RoutedAgent, message_handler, ClosureAgent, TypeSubscription, DefaultTopicId from autogen_core.components.models import ( ChatCompletionClient, SystemMessage, @@ -1034,7 +1026,7 @@ async def main(): # Create a queue to collect the final answer queue = asyncio.Queue[FinalAnswer]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswer, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None: await queue.put(message) # Initialize runtime @@ -1049,7 +1041,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswe # Register ClosureAgent with agent key matching self.id.key (default is "default") result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register( + await ClosureAgent.register_closure( runtime, "output_result", output_result, @@ -1075,7 +1067,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswe """, } -# TODO(yeandy): Take a Step Back currently not used as a seed in the archive. Refactor using the AutoGen API +# TODO: Take a Step Back currently not used as a seed in the archive. Refactor using the AutoGen API Take_a_step_back = { "thought": "Let LLM first think about the principles involved in solving this task which could be helpful. By understanding the underlying principles, the model can better reason through the problem and provide a more accurate solution.", "name": "Step-back Abstraction", @@ -1099,7 +1091,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswe """, } -# TODO(yeandy): QD currently not used as a seed in the archive. Refactor using the AutoGen API +# TODO: QD currently not used as a seed in the archive. Refactor using the AutoGen API QD = { "thought": "Similar to Quality-Diversity methods, let LLM generate multiple diverse interesting solutions could help. By encouraging the model to explore different reasoning paths, we can increase the chances of finding the best solution.", "name": "Quality-Diversity", @@ -1139,7 +1131,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalAnswe """, } -# TODO(yeandy): Role Assignment currently not used as a seed in the archive. Refactor using the AutoGen API +# TODO: Role Assignment currently not used as a seed in the archive. Refactor using the AutoGen API Role_Assignment = { "thought": "Similar to Auto-GPT and expert prompting, we can use dynamic control flow in the design to let the agent decide what expert we should use.", "name": "Dynamic Assignment of Roles", @@ -1244,7 +1236,7 @@ async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) - async def main(): # Create a queue to collect final answer queue = asyncio.Queue[FinalResult]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResult, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: FinalResult, ctx: MessageContext) -> None: await queue.put(message) runtime = SingleThreadedAgentRuntime() @@ -1256,7 +1248,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: FinalResul runtime, "WorkerAgent", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction) ) # Create closure agent to collect final output result - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [DefaultSubscription()]) runtime.start() await runtime.publish_message( @@ -1371,11 +1363,10 @@ async def handle_task(self, message: UserTask, ctx: MessageContext) -> None: worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results]) # Perform final aggregation. print(f"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation") - # system_prompt = "You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:" system_prompt = "Given all the above solutions, reason over them carefully and provide a final answer." system_prompt += "\\n" + "\\n\\n".join([f"{i+1}. {r}" for i, r in enumerate(worker_task.previous_results)]) model_result = await self._model_client.create( - [SystemMessage(system_prompt), UserMessage(content=message.task, source="user")] + [SystemMessage(content=system_prompt), UserMessage(content=message.task, source="user")] ) assert isinstance(model_result.content, str) return FinalResult(result=model_result.content) @@ -1414,36 +1405,36 @@ class Message: Publishing should be called with `self.publish_message()`. 7. This is WRONG: ``` -await ClosureAgent.register(runtime, "final_collection", collect_final_result, subscriptions=[TypeSubscription("consensus_result", "consensus_agent")]) +await ClosureAgent.register_closure(runtime, "final_collection", collect_final_result, subscriptions=[TypeSubscription("consensus_result", "consensus_agent")]) ``` The argument passed to `subscriptions` should not be a list. It should be a lambda function to a list. For example: ``` -await ClosureAgent.register(runtime, "final_collection", collect_final_result, subscriptions=lambda: [TypeSubscription("consensus_result", "consensus_agent")]) +await ClosureAgent.register_closure(runtime, "final_collection", collect_final_result, subscriptions=lambda: [TypeSubscription("consensus_result", "consensus_agent")]) ``` 8. This is WRONG: ``` async def main(): queue = asyncio.Queue[FinalDecision]() - async def output_result(_runtime, _id, message, _ctx): + async def output_result(_agent, message, _ctx): await queue.put(message) # Closure Agent for collecting results result_topic = TopicId("result", "output_result") await runtime.add_subscription(TypeSubscription("result", "output_result")) - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) ``` The function `output_result` that the `ClosureAgent` must follow the following signature: ``` async def main(): - async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageContext) -> None: await queue.put(message) # Closure Agent for collecting results result_topic = TopicId("result", "output_result") await runtime.add_subscription(TypeSubscription("result", "output_result")) - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) ``` where the type of `message` can be whatever dataclass is used by the agent publishing the final message. In this case, it is the `Answer` dataclass. -Additionally, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. +Additionally, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. 9. This is WRONG: ``` await runtime.publish_message(Task(content='What is the highest mountain in the world?'), topic_id=TypeSubscription("initial_task", "worker_agent").topic_id()) @@ -1572,7 +1563,7 @@ async def main(): async def main(): # Create a queue to collect final answer queue = asyncio.Queue[WritingResult]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingResult, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: WritingResult, ctx: MessageContext) -> None: await queue.put(message) # Initialize the agent runtime @@ -1582,7 +1573,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: WritingRes # Create closure agent to collect final output result result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) # Start the runtime, and publish the first message runtime.start() @@ -1614,7 +1605,7 @@ async def handle_response(self, message: SolverResponse, ctx: MessageContext) -> async def main(): queue = asyncio.Queue[Answer]() - async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ctx: MessageContext) -> None: + async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageContext) -> None: await queue.put(message) runtime = SingleThreadedAgentRuntime() @@ -1625,7 +1616,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct await Coordinator.register(runtime, "coordinator", lambda: Coordinator(num_solvers=3)) result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) + await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic]) runtime.start() await runtime.publish_message(Question(content=task, reasoning_type='general'), DefaultTopicId()) @@ -1635,7 +1626,7 @@ async def output_result(_runtime: AgentRuntime, id: AgentId, message: Answer, ct return (await queue.get()).content ``` The `Coordinator` agent is publishing the final message. It publishes to the topic_id object `TopicId('result', self.id.type)`, where the `type` is `result`, and the `source` is `self.id.type`. This matches the result topic `TypeSubscription(topic_type="result", agent_type="output_result")`, which the `ClosureAgent` subscribes to. Importantly, the `topic_type="result"` matches the topic type "result" used in `publish_message` by the Coordinator agent. -In other words, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. +In other words, the ClosureAgent MUST subscribe to a `result_topic` called `TopicId("result", "output_result")` using this line `await ClosureAgent.register_closure(runtime, "output_result", output_result, subscriptions=lambda: [result_topic])`. And the agent that publishes the final answer MUST publish to the `topic_id=TopicId("result", self.id.type)`. ## Documentation [DOCUMENTATION] From f5736eb297b164a2cba917ee68e3e921bd80a07f Mon Sep 17 00:00:00 2001 From: root Date: Mon, 9 Dec 2024 11:40:00 -0500 Subject: [PATCH 10/21] Update instructions --- python/packages/autogen-core/samples/adas/README.md | 8 +++++--- python/packages/autogen-core/samples/adas/adas_prompt.py | 1 - .../packages/autogen-core/samples/adas/requirements.txt | 4 ++++ 3 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 python/packages/autogen-core/samples/adas/requirements.txt diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index f272628073cc..1073730882ae 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -294,12 +294,14 @@ source .venv/bin/activate # Recommended to perform a demo, though not required if you do not plan on evaluating with DROP, and intend to run with your own dataset / benchmark. git clone https://github.com/ShengranHu/ADAS.git && cd .. -# Install package at latest dev tag -pip install 'autogen-core==0.4.0.dev7' - # Clone the AutoGen package, and switch to branch with the adas script. git clone -b yeandy_adas https://github.com/yeandy/autogen.git cd autogen/python + +# Install autogen-core and autogen-ext in editable mode +pip install -e packages/autogen-core +pip install -e packages/autogen-ext +pip install -r packages/autogen-core/samples/adas/requirements.txt ``` ### Agent System code definitions diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index 82cedb10ce2f..0d1f8d638d4c 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -35,7 +35,6 @@ def print_repo_contents(repo, path="", indent=""): return documentation -# TODO: pip install pygithub def get_autogen_documentation(): repo_name = "microsoft/autogen" directory_name = "python/packages/autogen-core/docs/src/user-guide/core-user-guide" diff --git a/python/packages/autogen-core/samples/adas/requirements.txt b/python/packages/autogen-core/samples/adas/requirements.txt new file mode 100644 index 000000000000..8542771ca293 --- /dev/null +++ b/python/packages/autogen-core/samples/adas/requirements.txt @@ -0,0 +1,4 @@ +azure-identity +numpy +pygithub +scipy \ No newline at end of file From 08e0d18ee851f3af13c09af026176454127c28aa Mon Sep 17 00:00:00 2001 From: root Date: Mon, 9 Dec 2024 12:19:23 -0500 Subject: [PATCH 11/21] Lint --- .../autogen-core/samples/adas/adas_prompt.py | 325 +++++++++--------- ...o_base_agent_results_run_archive_run2.json | 90 +++++ .../autogen-core/samples/adas/utils_drop.py | 2 +- 3 files changed, 254 insertions(+), 163 deletions(-) create mode 100644 python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index 0d1f8d638d4c..edf5123bbc21 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -617,7 +617,7 @@ async def output_result(_agent: ClosureContext, message: WritingResult, ctx: Mes # Return the first answer from the queue print(f"queue {queue}") return (await queue.get()).answer - + return asyncio.run(main()) ''', } @@ -660,7 +660,7 @@ async def output_result(_agent: ClosureContext, message: WritingResult, ctx: Mes "json_output": True, }, ) - + @dataclass class Question: content: str @@ -898,11 +898,11 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon ) from autogen_ext.models import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider - from autogen_core.application.logging import TRACE_LOGGER_NAME + from autogen_core.application.logging import TRACE_LOGGER_NAME - # Configure logging as per documentation - logging.basicConfig(level=logging.WARNING) - logger = logging.getLogger(TRACE_LOGGER_NAME) + # Configure logging as per documentation + logging.basicConfig(level=logging.WARNING) + logger = logging.getLogger(TRACE_LOGGER_NAME) logger.setLevel(logging.INFO) token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") @@ -918,149 +918,149 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon "json_output": True, }, ) - - @dataclass - class Message: - content: str - - @dataclass - class FinalAnswer: - answer: str + + @dataclass + class Message: + content: str + + @dataclass + class FinalAnswer: + answer: str @default_subscription - class TreeOfThoughtsAgent(RoutedAgent): - def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): - super().__init__("TreeOfThoughtsAgent") - self._model_client = model_client - self._max_depth = max_depth - self._beam_width = beam_width - self._system_messages = [ - SystemMessage( - content="You are a helpful assistant who reasons step-by-step to solve complex problems.") - ] - - async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: - # Generate multiple thoughts using the model - thoughts = [] - # Create multiple async tasks to generate thoughts in parallel - tasks = [] - for _ in range(num_thoughts): - tasks.append(self._model_client.create( - prompt, + class TreeOfThoughtsAgent(RoutedAgent): + def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): + super().__init__("TreeOfThoughtsAgent") + self._model_client = model_client + self._max_depth = max_depth + self._beam_width = beam_width + self._system_messages = [ + SystemMessage( + content="You are a helpful assistant who reasons step-by-step to solve complex problems.") + ] + + async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: + # Generate multiple thoughts using the model + thoughts = [] + # Create multiple async tasks to generate thoughts in parallel + tasks = [] + for _ in range(num_thoughts): + tasks.append(self._model_client.create( + prompt, extra_create_args={"temperature": 1.0}, - cancellation_token=cancellation_token, - )) - responses = await asyncio.gather(*tasks) - for response in responses: - thoughts.append(response.content.strip()) - return thoughts - - async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[str]: - # Batch evaluation of thoughts - eval_prompt = [ - SystemMessage(content="You are an assistant that evaluates reasoning steps for solving a problem."), - UserMessage( - content=f"Evaluate the following thoughts for their usefulness in solving the problem. Rank them from most useful to least useful and provide the rankings.\\n\\nThoughts:\\n" + "\\n".join( - [f"{i+1}. {t}" for i, t in enumerate(thoughts)]), - source="user" - ) - ] - eval_response = await self._model_client.create( - eval_prompt, - cancellation_token=ctx.cancellation_token, - ) - # Parse the response to extract rankings - rankings_text = eval_response.content.strip() - # For simplicity, assume the model outputs the rankings as a list of numbers - rankings = [] - for line in rankings_text.split('\\n'): - line = line.strip() - if line and line[0].isdigit(): - rankings.append(int(line[0]) - 1) # Subtract 1 to get index - # Select top-k thoughts - best_thoughts = [thoughts[i] for i in rankings[:self._beam_width]] - return best_thoughts - - @message_handler - async def handle_message(self, message: Message, ctx: MessageContext) -> None: - logger.info(f"Received task: {message.content}") - initial_prompt = self._system_messages + [UserMessage(content=message.content, source="user")] - tree = [[]] # Initialize the tree with an empty path - for depth in range(self._max_depth): - new_branches = [] - logger.info(f"Depth {depth+1}") - for path in tree: - # Build the prompt up to this point - prompt = initial_prompt.copy() - for thought in path: - prompt.append(AssistantMessage(content=thought, source="assistant")) - # Generate thoughts - thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) - logger.info(f"Generated thoughts: {thoughts}") - # Evaluate thoughts - best_thoughts = await self.evaluate_thoughts(thoughts, ctx) - logger.info(f"Best thoughts: {best_thoughts}") - # Expand tree with best thoughts - for thought in best_thoughts: - new_path = path + [thought] - new_branches.append(new_path) - # Update tree with new branches - if not new_branches: - logger.info("No more branches to expand.") - break # No more thoughts to expand - tree = new_branches - # After reaching max depth, select the best path - # For simplicity, select the first path - best_path = tree[0] - final_answer = best_path[-1] - logger.info(f"Final answer: {final_answer}") - # Publish the final answer - await self.publish_message( - FinalAnswer(answer=final_answer), - topic_id=TopicId(type="result", source=self.id.key) - ) - - # Main function - async def main(): - # Create a queue to collect the final answer - queue = asyncio.Queue[FinalAnswer]() - - async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None: - await queue.put(message) - - # Initialize runtime - runtime = SingleThreadedAgentRuntime() - - # Register TreeOfThoughtsAgent - await TreeOfThoughtsAgent.register( - runtime, - "TreeOfThoughtsAgent", - lambda: TreeOfThoughtsAgent(model_client) - ) - - # Register ClosureAgent with agent key matching self.id.key (default is "default") - result_topic = TypeSubscription(topic_type="result", agent_type="output_result") - await ClosureAgent.register_closure( - runtime, - "output_result", - output_result, - subscriptions=lambda: [result_topic] - ) - - # Start the runtime - runtime.start() - + cancellation_token=cancellation_token, + )) + responses = await asyncio.gather(*tasks) + for response in responses: + thoughts.append(response.content.strip()) + return thoughts + + async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[str]: + # Batch evaluation of thoughts + eval_prompt = [ + SystemMessage(content="You are an assistant that evaluates reasoning steps for solving a problem."), + UserMessage( + content=f"Evaluate the following thoughts for their usefulness in solving the problem. Rank them from most useful to least useful and provide the rankings.\\n\\nThoughts:\\n" + "\\n".join( + [f"{i+1}. {t}" for i, t in enumerate(thoughts)]), + source="user" + ) + ] + eval_response = await self._model_client.create( + eval_prompt, + cancellation_token=ctx.cancellation_token, + ) + # Parse the response to extract rankings + rankings_text = eval_response.content.strip() + # For simplicity, assume the model outputs the rankings as a list of numbers + rankings = [] + for line in rankings_text.split('\\n'): + line = line.strip() + if line and line[0].isdigit(): + rankings.append(int(line[0]) - 1) # Subtract 1 to get index + # Select top-k thoughts + best_thoughts = [thoughts[i] for i in rankings[:self._beam_width]] + return best_thoughts + + @message_handler + async def handle_message(self, message: Message, ctx: MessageContext) -> None: + logger.info(f"Received task: {message.content}") + initial_prompt = self._system_messages + [UserMessage(content=message.content, source="user")] + tree = [[]] # Initialize the tree with an empty path + for depth in range(self._max_depth): + new_branches = [] + logger.info(f"Depth {depth+1}") + for path in tree: + # Build the prompt up to this point + prompt = initial_prompt.copy() + for thought in path: + prompt.append(AssistantMessage(content=thought, source="assistant")) + # Generate thoughts + thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) + logger.info(f"Generated thoughts: {thoughts}") + # Evaluate thoughts + best_thoughts = await self.evaluate_thoughts(thoughts, ctx) + logger.info(f"Best thoughts: {best_thoughts}") + # Expand tree with best thoughts + for thought in best_thoughts: + new_path = path + [thought] + new_branches.append(new_path) + # Update tree with new branches + if not new_branches: + logger.info("No more branches to expand.") + break # No more thoughts to expand + tree = new_branches + # After reaching max depth, select the best path + # For simplicity, select the first path + best_path = tree[0] + final_answer = best_path[-1] + logger.info(f"Final answer: {final_answer}") + # Publish the final answer + await self.publish_message( + FinalAnswer(answer=final_answer), + topic_id=TopicId(type="result", source=self.id.key) + ) + + # Main function + async def main(): + # Create a queue to collect the final answer + queue = asyncio.Queue[FinalAnswer]() + + async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None: + await queue.put(message) + + # Initialize runtime + runtime = SingleThreadedAgentRuntime() + + # Register TreeOfThoughtsAgent + await TreeOfThoughtsAgent.register( + runtime, + "TreeOfThoughtsAgent", + lambda: TreeOfThoughtsAgent(model_client) + ) + + # Register ClosureAgent with agent key matching self.id.key (default is "default") + result_topic = TypeSubscription(topic_type="result", agent_type="output_result") + await ClosureAgent.register_closure( + runtime, + "output_result", + output_result, + subscriptions=lambda: [result_topic] + ) + + # Start the runtime + runtime.start() + # Publish initial message to TreeOfThoughtsAgent - await runtime.publish_message( - Message(content=task), - topic_id=DefaultTopicId() - ) - - # Wait until idle - await runtime.stop_when_idle() - - # Return the final answer - final_message = await queue.get() + await runtime.publish_message( + Message(content=task), + topic_id=DefaultTopicId() + ) + + # Wait until idle + await runtime.stop_when_idle() + + # Return the final answer + final_message = await queue.get() return final_message.answer return asyncio.run(main()) """, @@ -1073,14 +1073,14 @@ async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: Messa "code": """def forward(self, taskInfo): # Instruction for understanding the principles involved in the task principle_instruction = "What are the physics, chemistry or biology principles and concepts involved in solving this task? First think step by step. Then list all involved principles and explain them." - + # Instruction for solving the task based on the principles cot_instruction = "Given the question and the involved principle behind the question, think step by step and then solve the task." - + # Instantiate LLM agents principle_agent = LLMAgentBase(['thinking', 'principle'], 'Principle Agent') cot_agent = LLMAgentBase(['thinking', 'answer'], 'Chain-of-Thought Agent') - + # Get the principles involved in the task thinking, principle = principle_agent([taskInfo], principle_instruction) @@ -1105,7 +1105,7 @@ async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: Messa # Instruction for final decision-making based on collected reasoning and answers final_decision_instruction = "Given all the above solutions, reason over them carefully and provide a final answer." final_decision_agent = LLMAgentBase(['thinking', 'answer'], 'Final Decision Agent', temperature=0.1) - + N_max = 3 # Maximum number of attempts # Initial attempt @@ -1172,7 +1172,7 @@ async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: Messa Passage: Non-nationals make up more than half of the population of Bahrain, with immigrants making up about 55% of the overall population. Of those, the vast majority come from South and Southeast Asia: according to various media reports and government statistics dated between 2005-2009 roughly 290,000 Indians, 125,000 Bangladeshis, 45,000 Pakistanis, 45,000 Filipinos, and 8,000 Indonesians.\nQuestion: What two nationalities had the same number of people living in Bahrain between 2005-2009? Answer [Not Given]: -Pakistanis and Filipinos +Pakistanis and Filipinos # The utility code: @@ -1188,11 +1188,11 @@ class AgentArchitecture: def forward(self, task, model_client_kwargs) -> str: \""" Placeholder method for processing task information. - + Args: - task (Info): Task information. - model_client_kwargs (Dict): Information for the AzureOpenAIChatCompletionClient - + Returns: - Answer (str): Your FINAL Answer. Return a string of answers. \""" @@ -1207,14 +1207,14 @@ def forward(self, task, model_client_kwargs) -> str: # Output Instruction and Example: The first key should be ("thought"), and it should capture your thought process for designing the next function. In the "thought" section, first reason about what should be the next interesting agent to try, then describe your reasoning and the overall concept behind the agent design, and finally detail the implementation steps. Make sure to talk about the agent(s) that are supposted to start and end the system. -The second key ("name") corresponds to the name of your next agent architecture. +The second key ("name") corresponds to the name of your next agent architecture. The last key ("code") corresponds to the exact “forward()” function in Python code that you would like to try. You must write a COMPLETE CODE in "code": Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets. Here is an example of the output format for the next agent architecture: [EXAMPLE] -You must use the exact function interface used above. You need to specify the instruction, input information, and the required output fields for various LLM agents to do their specific part of the architecture. +You must use the exact function interface used above. You need to specify the instruction, input information, and the required output fields for various LLM agents to do their specific part of the architecture. Also, it could be helpful to set the LLM’s role to further control the LLM’s response. DO NOT FORGET the `task` input to LLM if you think it is needed, otherwise LLM will not know about the task. @@ -1370,7 +1370,7 @@ async def handle_task(self, message: UserTask, ctx: MessageContext) -> None: assert isinstance(model_result.content, str) return FinalResult(result=model_result.content) ``` -Directly returning a message dataclass `FinalResult` requires setting the return type of the `handle_task` function to return `FinalResult`. Example: `async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:`. +Directly returning a message dataclass `FinalResult` requires setting the return type of the `handle_task` function to return `FinalResult`. Example: `async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:`. 5. This is WRONG: ``` # Main orchestration @@ -1458,7 +1458,7 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon 11. This is WRONG: ``` class OrchestratorAgent(RoutedAgent): pass - + async def main(): await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) @@ -1483,7 +1483,7 @@ async def main(): @type_subscription(topic_type="orchestrator_type") class OrchestratorAgent(RoutedAgent): pass - + async def main(): await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) @@ -1497,7 +1497,7 @@ async def main(): 12. This is WRONG: ``` class OrchestratorAgent(RoutedAgent): pass - + async def main(): await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) @@ -1512,7 +1512,7 @@ async def main(): @default_subscription class OrchestratorAgent(RoutedAgent): pass - + async def main(): await OrchestratorAgent.register(runtime, "orchestrator", lambda: OrchestratorAgent()) @@ -1584,7 +1584,7 @@ async def output_result(_agent: ClosureContext, message: WritingResult, ctx: Mes # Return the first answer from the queue print(f"queue {queue}") return (await queue.get()).answer - + return asyncio.run(main()) ``` This is the format for the `main` function. Make sure that when creating a `ClosureAgent`, you have created `queue` from which you can call `return (await queue.get()).answer` at the very end of the `main` function. The datatype of the Queue should be the final message that the agent system publishes to indicate that the system is terminating. @@ -1702,7 +1702,8 @@ def get_init_archive(): ] # TODO: Take_a_step_back, QD, Role_Assignment -def get_prompt(current_archive, adaptive=False): +# from typing import tuple +def get_prompt(current_archive, adaptive=False) -> tuple[str, str]: archive_str = ",\n".join([json.dumps(sol) for sol in current_archive]) archive_str = f"[{archive_str}]" prompt = base.replace("[ARCHIVE]", archive_str) @@ -1711,7 +1712,7 @@ def get_prompt(current_archive, adaptive=False): return system_prompt, prompt -def get_reflexion_prompt(prev_example): +def get_reflexion_prompt(prev_example) -> tuple[str, str, str, str]: prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" r1 = ( Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json new file mode 100644 index 000000000000..cb68e49e28c4 --- /dev/null +++ b/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json @@ -0,0 +1,90 @@ +[ + { + "thought": "By encouraging the LLM to think step by step rather than directly outputting an answer, chain-of-thought reasoning enables complex problem-solving through intermediate steps. This practice improves the model's ability to handle tasks that require deeper reasoning and provides insight into its decision-making process.", + "name": "Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from typing import List\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n # Define message types as data classes\n @dataclass\n class ChainOfThoughtTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n # Define the Chain-of-Thought Agent\n class ChainOfThoughtAgent(RoutedAgent):\n def __init__(self, description: str,\n model_client: ChatCompletionClient,\n system_prompt: str,\n instruction: str,\n ) -> None:\n super().__init__(description)\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=system_prompt,\n )\n ]\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: ChainOfThoughtTask, ctx: MessageContext) -> None:\n\n logging.info(f\"{self._description} received message: {message.task}\")\n user_prompt = message.task + \"\\n\" + self._instruction\n msgs = self._system_messages + [UserMessage(content=user_prompt, source=self.metadata[\"type\"])]\n model_result = await self._model_client.create(msgs)\n assert isinstance(model_result.content, str)\n\n await self.publish_message(\n message=FinalResult(model_result.content),\n topic_id=DefaultTopicId(),\n )\n\n\n # Define the main function to set up and run the agent system\n async def main():\n\n # Create a queue to collect final answer\n queue = asyncio.Queue[FinalResult]()\n async def output_result(_agent: ClosureContext, message: FinalResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the chain-of-thought agent\n agent_id = AgentId(\"COTAgent\", \"default\")\n cot_instruction = \"Please think step by step and then solve the task.\"\n await ChainOfThoughtAgent.register(\n runtime, \"COTAgent\", lambda: ChainOfThoughtAgent(\n description='Chain-of-Thought Agent',\n model_client=model_client,\n system_prompt=\"You are a helpful assistant. Directly answer the question. Keep it very concise.\",\n instruction=cot_instruction,\n )\n )\n # Create closure agent to collect final output result\n await ClosureAgent.register_closure(runtime, \"output_result\", output_result, subscriptions=lambda: [DefaultSubscription()])\n\n # Start the runtime, and publish the first message\n runtime.start()\n initial_message = ChainOfThoughtTask(task=task)\n await runtime.send_message(initial_message, agent_id) # publish_message\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n return (await queue.get()).result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 7.8%), Median: 26.8%" + }, + { + "thought": "While an LLM can arrive at the correct answer, its reasoning may vary. By repeatedly asking the same question with high temperature settings, we can generate different reasoning paths. We then combine multiple answers from these Chain-of-Thought (CoT) agents to produce a more accurate final answer through ensembling.", + "name": "Self-Consistency with Chain-of-Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n import json\n from dataclasses import dataclass\n import sys\n from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription\n from autogen_core.base import AgentId, AgentRuntime, MessageContext\n from autogen_core.components.models import (\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from typing import List\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WorkerTask:\n task: str\n previous_results: List[str]\n\n\n @dataclass\n class WorkerTaskResult:\n result: str\n\n\n @dataclass\n class UserTask:\n task: str\n\n\n @dataclass\n class FinalResult:\n result: str\n\n\n class WorkerAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(description=\"Worker Agent\")\n self._model_client = model_client\n self._instruction = instruction\n\n @message_handler\n async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n user_prompt = message.task + \"\\n\" + self._instruction\n\n if message.previous_results:\n # If previous results are provided, we need to synthesize them to create a single prompt.\n # system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n system_prompt = \"Given all the solutions, reason over them carefully and provide a final answer.\"\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n model_result = await self._model_client.create(\n [SystemMessage(content=system_prompt), UserMessage(content=user_prompt, source=\"user\")]\n )\n else:\n # If no previous results are provided, we can simply pass the user query to the model.\n model_result = await self._model_client.create([UserMessage(content=user_prompt, source=\"user\")])\n assert isinstance(model_result.content, str)\n print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n return WorkerTaskResult(result=model_result.content)\n\n\n class OrchestratorAgent(RoutedAgent):\n def __init__(\n self,\n model_client: ChatCompletionClient,\n worker_agent_types: List[str],\n num_layers: int,\n ) -> None:\n super().__init__(description=\"Aggregator Agent\")\n self._model_client = model_client\n self._worker_agent_types = worker_agent_types\n self._num_layers = num_layers\n\n\n @message_handler\n async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n # Create task for the first layer.\n worker_task = WorkerTask(task=message.task, previous_results=[])\n # Iterate over layers.\n for i in range(self._num_layers):\n # Assign workers for this layer.\n worker_ids = [\n AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n for j, worker_type in enumerate(self._worker_agent_types)\n ]\n # Dispatch tasks to workers.\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n # Prepare task for the next layer.\n worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n # Perform final aggregation.\n print(\"6666\")\n print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n print(\"1234\")\n system_prompt = \"Given all the above solutions, reason over them carefully and provide a final answer.\"\n print(\"aaasdf\")\n system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n print(\"aasdf\")\n model_result = await self._model_client.create(\n [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n )\n print(\"asdf\")\n assert isinstance(model_result.content, str)\n return FinalResult(result=model_result.content)\n # await self.publish_message(\n # message=FinalResult(result=model_result.content),\n # topic_id=DefaultTopicId(),\n # )\n \n # Define the main function to set up and run the agent system\n async def main():\n # Create a queue to collect final answer\n queue = asyncio.Queue[FinalResult]()\n async def output_result(_agent: ClosureContext, message: FinalResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create the agents\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"worker\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n await OrchestratorAgent.register(\n runtime,\n \"orchestrator\",\n lambda: OrchestratorAgent(\n model_client=model_client, worker_agent_types=[\"worker\"] * 5, num_layers=1\n ),\n )\n # Create closure agent to collect final output result\n await ClosureAgent.register_closure(runtime, \"output_result\", output_result, subscriptions=lambda: [DefaultSubscription()])\n\n # Start the runtime, and publish the first message\n runtime.start()\n result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n\n # Return the result\n print(\"fdsa\")\n return result.result\n\n \n\n\n \n # await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n # await runtime.stop_when_idle()\n\n # # Return the first answer from the queue\n # print(\"final\")\n # return (await queue.get()).result\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (3.0%, 9.3%), Median: 29.4%" + }, + { + "thought": "To enhance its performance, an LLM can iteratively improve its answer based on feedback. By reflecting on its previous attempts and incorporating feedback, the model can refine its reasoning and provide a more accurate solution.", + "name": "Self-Refine (Reflexion)", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, TypeSubscription, DefaultSubscription, ClosureAgent, ClosureContext, message_handler, default_subscription\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class WritingTask:\n task: str\n\n\n @dataclass\n class WritingResult:\n task: str\n answer: str\n review: str\n\n\n @dataclass\n class ReviewTask:\n session_id: str\n writing_task: str\n answer_scratchpad: str\n answer: str\n\n\n @dataclass\n class ReviewResult:\n review: str\n session_id: str\n approved: bool\n\n\n @default_subscription\n class WorkerAgent(RoutedAgent):\n \"An agent that performs writing tasks.\"\n\n def __init__(self,\n model_client: ChatCompletionClient,\n instruction: str,\n ) -> None:\n super().__init__(\"A helpful assistant\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a helpful assistant. Work with the critic to improve your answer.\n Make sure to directly answer the question. Keep it very concise.\n Respond using the following format:\n\n Thoughts: \n Answer: \n \"\"\",\n )\n ]\n self._model_client = model_client\n self._session_memory: Dict[str, List[WritingTask | ReviewTask | ReviewResult]] = {}\n self._instruction = instruction\n\n @message_handler\n async def handle_writing_task(self, message: WritingTask, ctx: MessageContext) -> None:\n # Store the messages in a temporary memory for this request only.\n session_id = str(uuid.uuid4())\n self._session_memory.setdefault(session_id, []).append(message)\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=message.task + self._instruction, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n )\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n # Create a review task.\n review_task = ReviewTask(\n session_id=session_id,\n writing_task=message.task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[session_id].append(review_task)\n # Publish a review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n @message_handler\n async def handle_review_result(self, message: ReviewResult, ctx: MessageContext) -> None:\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(message)\n # Obtain the request from previous messages.\n review_request = next(\n m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewTask)\n )\n assert review_request is not None\n # Check if the is approved.\n if message.approved:\n # Publish the writing result.\n await self.publish_message(\n WritingResult(\n answer=review_request.answer,\n task=review_request.writing_task,\n review=message.review,\n ),\n topic_id=TopicId(\"result\", self.id.key),\n )\n print(\"Writing Result:\")\n print(\"-\" * 80)\n print(f\"Task:\\n{review_request.writing_task}\")\n print(\"-\" * 80)\n print(f\"Answer:\\n{review_request.answer}\")\n print(\"-\" * 80)\n print(f\"Review:\\n{message.review}\")\n print(\"-\" * 80)\n else:\n # Create a list of LLM messages to send to the model.\n messages: List[LLMMessage] = [*self._system_messages]\n for m in self._session_memory[message.session_id]:\n if isinstance(m, ReviewResult):\n messages.append(UserMessage(content=m.review, source=\"Reviewer\"))\n elif isinstance(m, ReviewTask):\n messages.append(AssistantMessage(content=m.answer_scratchpad, source=\"Worker\"))\n elif isinstance(m, WritingTask):\n messages.append(UserMessage(content=m.task, source=\"User\"))\n else:\n raise ValueError(f\"Unexpected message type: {m}\")\n # Generate a revision using the chat completion API.\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Extract the answer from the response.\n answer = self._extract_answer(response.content)\n # Create a new review task.\n review_task = ReviewTask(\n session_id=message.session_id,\n writing_task=review_request.writing_task,\n answer_scratchpad=response.content,\n answer=answer,\n )\n # Store the review task in the session memory.\n self._session_memory[message.session_id].append(review_task)\n # Publish a new review task.\n await self.publish_message(review_task, topic_id=TopicId(\"default\", self.id.key))\n\n\n def _extract_answer(self, text: str) -> Union[str, None]:\n pattern = \"(?<=Answer: ).*\"\n # Search for the pattern in the markdown text\n match = re.search(pattern, text, re.DOTALL)\n # Extract the language and code block if a match is found\n if match:\n return match.group(0)\n return None\n\n @default_subscription\n class ReviewerAgent(RoutedAgent):\n \"\"\"An agent that critiques tasks.\"\"\"\n\n def __init__(self, model_client: ChatCompletionClient) -> None:\n super().__init__(\"A critic agent.\")\n self._system_messages: List[LLMMessage] = [\n SystemMessage(\n content=\"\"\"You are a critic. Review answers and criticize on where it might be wrong.\n Respond using the following JSON format:\n {\n \"correctness\": \"\",\n \"approval\": \"\",\n \"suggested_changes\": \"\"\n }\n \"\"\",\n )\n ]\n self._session_memory: Dict[str, List[ReviewTask | ReviewResult]] = {}\n self._model_client = model_client\n\n @message_handler\n async def handle_review_task(self, message: ReviewTask, ctx: MessageContext) -> None:\n # Format the prompt for the review.\n # Gather the previous feedback if available.\n previous_feedback = \"\"\n if message.session_id in self._session_memory:\n previous_review = next(\n (m for m in reversed(self._session_memory[message.session_id]) if isinstance(m, ReviewResult)),\n None,\n )\n if previous_review is not None:\n previous_feedback = previous_review.review\n # Store the messages in a temporary memory for this request only.\n self._session_memory.setdefault(message.session_id, []).append(message)\n prompt = f\"\"\"The problem statement is: {message.writing_task}\n The answer is:\n ```\n {message.answer}\n ```\n\n Previous feedback:\n {previous_feedback}\n\n Please review the answer. If previous feedback was provided, see if it was addressed.\n \"\"\"\n # Generate a response using the chat completion API.\n response = await self._model_client.create(\n self._system_messages + [UserMessage(content=prompt, source=self.metadata[\"type\"])],\n cancellation_token=ctx.cancellation_token,\n json_output=True,\n )\n assert isinstance(response.content, str)\n # TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.\n # Parse the response JSON.\n review = json.loads(response.content)\n # Construct the review text.\n review_text = \"Review:\\n\" + \"\\n\".join([f\"{k}: {v}\" for k, v in review.items()])\n approved = review[\"approval\"].lower().strip() == \"approve\"\n result = ReviewResult(\n review=review_text,\n session_id=message.session_id,\n approved=approved,\n )\n # Store the review result in the session memory.\n self._session_memory[message.session_id].append(result)\n # Publish the review result.\n await self.publish_message(result, topic_id=TopicId(\"default\", self.id.key))\n\n\n # Define the main function to set up and run the agent system\n async def main():\n # Create a queue to collect final answer\n queue = asyncio.Queue[WritingResult]()\n async def output_result(_agent: ClosureContext, message: WritingResult, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Create agents\n await ReviewerAgent.register(\n runtime, \"ReviewerAgent\", lambda: ReviewerAgent(model_client=model_client)\n )\n cot_instruction = \"Please think step by step and then solve the task.\"\n await WorkerAgent.register(\n runtime, \"WorkerAgent\", lambda: WorkerAgent(model_client=model_client, instruction=cot_instruction)\n )\n # Create closure agent to collect final output result\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n # Start the runtime, and publish the first message\n runtime.start()\n await runtime.publish_message(\n message=WritingTask(task=task),\n topic_id=DefaultTopicId(),\n )\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the first answer from the queue\n print(f\"queue {queue}\")\n return (await queue.get()).answer\n \n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 20.0%), Median: 55.5%" + }, + { + "thought": "By letting different LLMs debate with each other, we can leverage their diverse perspectives to find better solutions for tasks.", + "name": "LLM Debate", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import json\n import logging\n import re\n import sys\n import uuid\n from dataclasses import dataclass\n from typing import Dict, List, Union\n from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, default_subscription, message_handler, TypeSubscription, ClosureAgent, ClosureContext, DefaultTopicId\n from autogen_core.components.models import (\n AssistantMessage,\n ChatCompletionClient,\n LLMMessage,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n \n @dataclass\n class Question:\n content: str\n\n\n @dataclass\n class Answer:\n content: str\n\n\n @dataclass\n class SolverRequest:\n content: str\n question: str\n\n\n @dataclass\n class IntermediateSolverResponse:\n content: str\n question: str\n answer: str\n round: int\n\n\n @dataclass\n class FinalSolverResponse:\n answer: str\n\n @default_subscription\n class Solver(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n super().__init__(\"A debator.\")\n self._topic_type = topic_type\n self._model_client = model_client\n self._num_neighbors = num_neighbors\n self._history: List[LLMMessage] = []\n self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n self._system_messages = [\n SystemMessage(content=\n (\n \"You are a helpful assistant with expertise in reasoning. \"\n \"Your task is to assist in solving a reasoning problem by providing \"\n \"a clear and detailed solution. Limit your output within 100 words, \"\n \"and your final answer should be a single string.\"\n )\n )\n ]\n self._round = 0\n self._max_round = max_round\n\n @message_handler\n async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n # Add the question to the memory.\n self._history.append(UserMessage(content=message.content, source=\"user\"))\n # Make an inference using the model.\n model_result = await self._model_client.create(self._system_messages + self._history)\n assert isinstance(model_result.content, str)\n # Add the response to the memory.\n self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n # Increment the counter.\n self._round += 1\n if self._round == self._max_round:\n # If the counter reaches the maximum round, publishes a final response.\n await self.publish_message(FinalSolverResponse(answer=model_result.content), topic_id=DefaultTopicId())\n else:\n # Publish intermediate response to the topic associated with this solver.\n print(\"publish IntermediateSolverResponse\")\n await self.publish_message(\n IntermediateSolverResponse(\n content=model_result.content,\n question=message.question,\n answer=model_result.content,\n round=self._round,\n ),\n topic_id=DefaultTopicId(type=self._topic_type),\n )\n\n @message_handler\n async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n # Add neighbor's response to the buffer.\n self._buffer.setdefault(message.round, []).append(message)\n # Check if all neighbors have responded.\n if len(self._buffer[message.round]) == self._num_neighbors:\n print(\n f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n )\n # Prepare the prompt for the next question.\n prompt = \"These are the solutions to the problem from other agents:\\n\"\n for resp in self._buffer[message.round]:\n prompt += f\"One agent solution: {resp.content}\\n\"\n prompt += (\n \"Using the solutions from other agents as additional information, \"\n \"can you provide your answer to the problem? \"\n f\"The original problem is {message.question}. \"\n \"Your final answer should be a single string.\"\n )\n # Send the question to the agent itself to solve.\n await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n # Clear the buffer.\n self._buffer.pop(message.round)\n\n\n @default_subscription\n class Aggregator(RoutedAgent):\n def __init__(self, num_solvers: int) -> None:\n super().__init__(\"Aggregator\")\n self._num_solvers = num_solvers\n self._buffer: List[FinalSolverResponse] = []\n\n @message_handler\n async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n prompt = (\n f\"Can you solve the following problem?\\n{message.content}\\n\"\n \"Explain your reasoning. Your final answer should be a single string.\"\n )\n print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n\n @message_handler\n async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n self._buffer.append(message)\n if len(self._buffer) == self._num_solvers:\n print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n # Find the majority answer.\n answers = [resp.answer for resp in self._buffer]\n majority_answer = max(set(answers), key=answers.count)\n # Publish the aggregated response.\n await self.publish_message(Answer(content=majority_answer), topic_id=TopicId(\"result\", self.id.key))\n # Clear the responses.\n self._buffer.clear()\n print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")\n\n\n # Define the main function to set up and run the agent system\n async def main():\n queue = asyncio.Queue[Answer]()\n async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n await Solver.register(\n runtime,\n \"SolverA\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverA\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverB\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverB\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverC\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverC\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Solver.register(\n runtime,\n \"SolverD\",\n lambda: Solver(\n model_client=model_client,\n topic_type=\"SolverD\",\n num_neighbors=2,\n max_round=3,\n ),\n )\n await Aggregator.register(runtime, \"Aggregator\", lambda: Aggregator(num_solvers=4))\n\n # Subscriptions for topic published to by SolverA.\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverD\"))\n await runtime.add_subscription(TypeSubscription(\"SolverA\", \"SolverB\"))\n\n # Subscriptions for topic published to by SolverB.\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverA\"))\n await runtime.add_subscription(TypeSubscription(\"SolverB\", \"SolverC\"))\n\n # Subscriptions for topic published to by SolverC.\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverB\"))\n await runtime.add_subscription(TypeSubscription(\"SolverC\", \"SolverD\"))\n\n # Subscriptions for topic published to by SolverD.\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverC\"))\n await runtime.add_subscription(TypeSubscription(\"SolverD\", \"SolverA\"))\n\n # All solvers and the aggregator subscribe to the default topic.\n\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n await runtime.publish_message(Question(content=task), DefaultTopicId())\n\n # Keep processing messages until idle.\n await runtime.stop_when_idle()\n\n # Return the answer from the queue\n return (await queue.get()).content\n\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (16.7%, 33.3%), Median: 76.7%" + }, + { + "thought": "By using a tree search strategy, the model can explore multiple branches of thoughts, where at any step of the problem, multiple independent thoughts are generated and evaluated to find the most useful ones.", + "name": "Tree of Thought", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import logging\n from dataclasses import dataclass\n from typing import List, Dict, Any\n from autogen_core import SingleThreadedAgentRuntime, default_subscription, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription, DefaultTopicId\n from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n AssistantMessage,\n LLMMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n from autogen_core.application.logging import TRACE_LOGGER_NAME \n\n # Configure logging as per documentation \n logging.basicConfig(level=logging.WARNING) \n logger = logging.getLogger(TRACE_LOGGER_NAME) \n logger.setLevel(logging.INFO)\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs['model'],\n api_version=model_client_kwargs['api_version'],\n azure_endpoint=model_client_kwargs['azure_endpoint'],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n \n @dataclass \n class Message: \n content: str \n \n @dataclass \n class FinalAnswer: \n answer: str \n\n @default_subscription\n class TreeOfThoughtsAgent(RoutedAgent): \n def __init__(self, model_client: ChatCompletionClient, max_depth: int = 3, beam_width: int = 3): \n super().__init__(\"TreeOfThoughtsAgent\") \n self._model_client = model_client \n self._max_depth = max_depth \n self._beam_width = beam_width \n self._system_messages = [ \n SystemMessage( \n content=\"You are a helpful assistant who reasons step-by-step to solve complex problems.\") \n ] \n \n async def generate_thoughts(self, prompt: List[LLMMessage], num_thoughts: int, cancellation_token) -> List[str]: \n # Generate multiple thoughts using the model \n thoughts = [] \n # Create multiple async tasks to generate thoughts in parallel \n tasks = [] \n for _ in range(num_thoughts): \n tasks.append(self._model_client.create( \n prompt, \n extra_create_args={\"temperature\": 1.0},\n cancellation_token=cancellation_token, \n )) \n responses = await asyncio.gather(*tasks) \n for response in responses: \n thoughts.append(response.content.strip()) \n return thoughts \n \n async def evaluate_thoughts(self, thoughts: List[str], ctx: MessageContext) -> List[str]: \n # Batch evaluation of thoughts \n eval_prompt = [ \n SystemMessage(content=\"You are an assistant that evaluates reasoning steps for solving a problem.\"), \n UserMessage( \n content=f\"Evaluate the following thoughts for their usefulness in solving the problem. Rank them from most useful to least useful and provide the rankings.\\n\\nThoughts:\\n\" + \"\\n\".join( \n [f\"{i+1}. {t}\" for i, t in enumerate(thoughts)]), \n source=\"user\" \n ) \n ] \n eval_response = await self._model_client.create( \n eval_prompt, \n cancellation_token=ctx.cancellation_token, \n ) \n # Parse the response to extract rankings \n rankings_text = eval_response.content.strip() \n # For simplicity, assume the model outputs the rankings as a list of numbers \n rankings = [] \n for line in rankings_text.split('\\n'): \n line = line.strip() \n if line and line[0].isdigit(): \n rankings.append(int(line[0]) - 1) # Subtract 1 to get index \n # Select top-k thoughts \n best_thoughts = [thoughts[i] for i in rankings[:self._beam_width]] \n return best_thoughts \n \n @message_handler \n async def handle_message(self, message: Message, ctx: MessageContext) -> None: \n logger.info(f\"Received task: {message.content}\") \n initial_prompt = self._system_messages + [UserMessage(content=message.content, source=\"user\")] \n tree = [[]] # Initialize the tree with an empty path \n for depth in range(self._max_depth): \n new_branches = [] \n logger.info(f\"Depth {depth+1}\") \n for path in tree: \n # Build the prompt up to this point \n prompt = initial_prompt.copy() \n for thought in path: \n prompt.append(AssistantMessage(content=thought, source=\"assistant\")) \n # Generate thoughts \n thoughts = await self.generate_thoughts(prompt, self._beam_width, ctx.cancellation_token) \n logger.info(f\"Generated thoughts: {thoughts}\") \n # Evaluate thoughts \n best_thoughts = await self.evaluate_thoughts(thoughts, ctx) \n logger.info(f\"Best thoughts: {best_thoughts}\") \n # Expand tree with best thoughts \n for thought in best_thoughts: \n new_path = path + [thought] \n new_branches.append(new_path) \n # Update tree with new branches \n if not new_branches: \n logger.info(\"No more branches to expand.\") \n break # No more thoughts to expand \n tree = new_branches \n # After reaching max depth, select the best path \n # For simplicity, select the first path \n best_path = tree[0] \n final_answer = best_path[-1] \n logger.info(f\"Final answer: {final_answer}\") \n # Publish the final answer \n await self.publish_message( \n FinalAnswer(answer=final_answer), \n topic_id=TopicId(type=\"result\", source=self.id.key) \n ) \n \n # Main function \n async def main(): \n # Create a queue to collect the final answer \n queue = asyncio.Queue[FinalAnswer]() \n \n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None: \n await queue.put(message) \n \n # Initialize runtime \n runtime = SingleThreadedAgentRuntime() \n \n # Register TreeOfThoughtsAgent \n await TreeOfThoughtsAgent.register( \n runtime, \n \"TreeOfThoughtsAgent\", \n lambda: TreeOfThoughtsAgent(model_client) \n ) \n \n # Register ClosureAgent with agent key matching self.id.key (default is \"default\") \n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\") \n await ClosureAgent.register_closure( \n runtime, \n \"output_result\", \n output_result, \n subscriptions=lambda: [result_topic] \n ) \n \n # Start the runtime \n runtime.start() \n \n # Publish initial message to TreeOfThoughtsAgent\n await runtime.publish_message( \n Message(content=task), \n topic_id=DefaultTopicId() \n ) \n \n # Wait until idle \n await runtime.stop_when_idle() \n \n # Return the final answer \n final_message = await queue.get() \n return final_message.answer\n return asyncio.run(main())\n", + "generation": "initial", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 6.4%), Median: 37.8%" + }, + { + "thought": "**Insights:**\nTo improve performance on DROP tasks requiring complex reasoning and calculations, allowing the LLM to generate and execute code can enhance accuracy. This aligns with the Program-Aided Language Models (PAL) method.\n**Overall Idea:**\nDesign an agent that instructs the LLM to generate Python code for the task. The code is then executed safely, and the output is used as the final answer. This leverages Python's computational abilities for precise calculations.\n**Implementation:**\n- Implement 'PALAgent' that prompts the LLM to generate Python code for the task.\n- Extract code from the LLM's response.\n- Use 'CodeExecutionAgent' to execute the code safely.\n- Capture the execution output and return it as the final answer.\n- Handle potential errors during code execution.\n- Ensure code execution is secure and isolated.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription\n from autogen_core.base import AgentId, MessageContext, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_core.code_executor import CodeBlock\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class Message:\n content: str\n\n @dataclass\n class FinalAnswer:\n answer: str\n\n @dataclass\n class ExecuteCode:\n code: str\n\n @dataclass\n class ExecutionResult:\n output: str\n\n class PALAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, code_agent_type: str):\n super().__init__(\"PAL Agent\")\n self._model_client = model_client\n self._system_messages = [\n SystemMessage(content=\"You are an expert reasoning agent. Solve the following task by generating Python code to compute the answer. Enclose your code in markdown code blocks. After execution, provide the final answer.\")\n ]\n self._code_agent_id = AgentId(code_agent_type, self.id.key)\n\n @message_handler\n async def handle_message(self, message: Message, ctx: MessageContext) -> None:\n user_message = UserMessage(content=message.content, source=\"user\")\n messages = self._system_messages + [user_message]\n response = await self._model_client.create(messages, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n # Extract code from the response\n code_blocks = re.findall(r'```(?:python)?\\n(.*?)```', response.content, re.DOTALL)\n if code_blocks:\n code = code_blocks[0]\n # Send code to CodeExecutionAgent for execution\n execution_result = await self.send_message(ExecuteCode(code=code), self._code_agent_id)\n final_answer = execution_result.output.strip()\n # Publish final answer\n await self.publish_message(\n FinalAnswer(answer=final_answer),\n topic_id=TopicId(\"result\", self.id.key)\n )\n else:\n # No code found, return the LLM's response as is\n await self.publish_message(\n FinalAnswer(answer=response.content),\n topic_id=TopicId(\"result\", self.id.key)\n )\n\n class CodeExecutionAgent(RoutedAgent):\n def __init__(self) -> None:\n super().__init__(\"Code Execution Agent\")\n self._code_executor = LocalCommandLineCodeExecutor()\n\n async def start_code_executor(self):\n # No initialization needed for LocalCommandLineCodeExecutor\n pass\n\n @message_handler\n async def handle_execute_code(self, message: ExecuteCode, ctx: MessageContext) -> ExecutionResult:\n code = message.code\n result = await self._code_executor.execute_code_blocks(\n code_blocks=[CodeBlock(language=\"python\", code=code)],\n cancellation_token=ctx.cancellation_token,\n )\n output = result.output\n return ExecutionResult(output=output)\n\n async def create_code_execution_agent():\n agent = CodeExecutionAgent()\n await agent.start_code_executor()\n return agent\n\n async def main():\n queue = asyncio.Queue[FinalAnswer]()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await PALAgent.register(runtime, \"pal_agent\", lambda: PALAgent(model_client, code_agent_type=\"code_execution_agent\"))\n await CodeExecutionAgent.register(runtime, \"code_execution_agent\", create_code_execution_agent)\n\n # Register ClosureAgent\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(runtime, \"output_result\", output_result, subscriptions=lambda: [result_topic])\n\n runtime.start()\n # Send initial message directly to PALAgent\n pal_agent_id = AgentId(\"pal_agent\", \"default\")\n await runtime.send_message(Message(content=task), pal_agent_id)\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer\n final_message = await queue.get()\n return final_message.answer\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (0.0%, 0.0%), Median: 0.4%", + "generation": 2 + }, + { + "thought": "**Insights:**\nComplex reasoning tasks can be effectively solved by decomposing them into sequential sub-questions. The \"Least-to-Most\" prompting strategy enables the model to tackle simpler questions first and build up to solving the original complex question.\n\n**Overall Idea:**\nDesign an agent that employs the \"Least-to-Most\" approach. The agent first prompts the LLM to decompose the main question into a sequence of sub-questions. It then iteratively answers each sub-question, using previous answers as context. Finally, it synthesizes these sub-answers to formulate the final answer.\n\n**Implementation:**\n- Implement a \"LeastToMostAgent\" that:\n - Prompts the LLM to decompose the main question.\n - Iteratively answers each sub-question, updating the context.\n - Synthesizes the final answer based on all sub-answers.\n- Ensure correct message passing and topic subscriptions.\n- Publish the final answer to the topic that the \"ClosureAgent\" is subscribed to.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, ClosureAgent, ClosureContext, message_handler, TypeSubscription\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class FinalAnswer:\n content: str\n\n class LeastToMostAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Least-To-Most Agent\")\n self._model_client = model_client\n self._system_prompt = \"You are a helpful assistant who can decompose complex questions into simpler sub-questions and answer them sequentially to solve the original problem.\"\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n # Step 1: Decompose the task into sub-questions\n decomposition_prompt = f\"Decompose the following question into a sequence of simpler sub-questions that can help answer the original question:\\n\\nQuestion: {message.content}\\n\\nProvide the sub-questions as a numbered list.\"\n msgs = [SystemMessage(content=self._system_prompt), UserMessage(content=decomposition_prompt, source=\"user\")]\n decomposition_response = await self._model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(decomposition_response.content, str)\n # Extract the sub-questions\n sub_questions = []\n for line in decomposition_response.content.strip().split('\\n'):\n if line.strip():\n # Remove numbering if any\n question = line.strip().lstrip('1234567890. ').strip()\n sub_questions.append(question)\n # Initialize list for sub-answers\n sub_answers = []\n # Step 2: Iteratively answer each sub-question\n for i, sub_question in enumerate(sub_questions):\n sub_context = '\\n'.join([f\"Q: {q}\\nA: {a}\" for q, a in zip(sub_questions[:i], sub_answers)])\n sub_question_prompt = f\"{sub_context}\\n\\nNow answer the following question:\\n\\n{sub_question}\"\n msgs = [SystemMessage(content=self._system_prompt), UserMessage(content=sub_question_prompt, source=\"user\")]\n answer_response = await self._model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(answer_response.content, str)\n sub_answers.append(answer_response.content.strip())\n # Step 3: Synthesize the final answer\n sub_qa_pairs = '\\n'.join([f\"Q{i+1}: {q}\\nA{i+1}: {a}\" for i, (q, a) in enumerate(zip(sub_questions, sub_answers))])\n synthesis_prompt = f\"Original question: {message.content}\\n\\nSub-questions and answers:\\n{sub_qa_pairs}\\n\\nBased on the above, provide a final, comprehensive answer to the original question.\"\n msgs = [SystemMessage(content=self._system_prompt), UserMessage(content=synthesis_prompt, source=\"user\")]\n final_response = await self._model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(final_response.content, str)\n # Publish the final answer\n await self.publish_message(\n FinalAnswer(content=final_response.content.strip()),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue[FinalAnswer]()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await LeastToMostAgent.register(runtime, \"least_to_most_agent\", lambda: LeastToMostAgent())\n\n # Register closure agent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n # Send the initial message directly to the agent\n least_to_most_agent_id = AgentId(\"least_to_most_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(content=task),\n least_to_most_agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (3.0%, 3.4%), Median: 4.3%", + "generation": 6 + }, + { + "thought": "By implementing the ReAct prompting strategy, we enable the agent to interleave reasoning and actions, allowing it to interact with the passage or other external sources during problem-solving. This approach is novel compared to the existing methods in the archive and can enhance performance on complex tasks that require dynamic information retrieval and processing.\n\n**Implementation:**\n- Update the system prompt to clearly specify the expected output format, including Thought, Action, Observation, and Answer.\n- Implement robust parsing of the assistant's responses to handle multiple steps and loops.\n- Improve the 'perform_action' method to handle different actions and queries more effectively.\n- Ensure that the conversation history is maintained correctly, and observations are integrated properly.\n- The 'ReActAgent' will publish the final answer to the 'result' topic, which the 'ClosureAgent' subscribes to.", + "name": "ReAct Prompting", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, ClosureAgent, ClosureContext, message_handler, TypeSubscription\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import SystemMessage, UserMessage, AssistantMessage, ChatCompletionClient, LLMMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class FinalAnswer:\n content: str\n\n class ReActAgent(RoutedAgent):\n def __init__(self, passage: str, model_client: ChatCompletionClient):\n super().__init__(\"ReAct Agent\")\n self.passage = passage\n self.model_client = model_client\n self.system_prompt = (\n \"You are an AI assistant that uses reasoning and actions to answer the question based on the provided passage. \"\n \"At each step, follow this format:\\n\"\n \"Thought: you should think about the problem\\n\"\n \"Action: the action you want to take, choices are [Search], format \\\"Action: []\\\"\\n\"\n \"Observation: the result of the action\\n\"\n \"... (this Thought/Action/Observation can repeat N times)\\n\"\n \"Thought: I now have enough information to answer the question\\n\"\n \"Answer: the final answer to the original question\\n\"\n \"\\n\"\n \"Begin.\"\n )\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n conversation: List[LLMMessage] = []\n conversation.append(SystemMessage(content=self.system_prompt))\n # Include the passage in the user's message\n user_content = f\"Passage:\\n{self.passage}\\n\\nQuestion:\\n{message.content}\"\n conversation.append(UserMessage(content=user_content, source=\"user\"))\n\n while True:\n response = await self.model_client.create(\n conversation,\n cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n # Append assistant's message\n assistant_message = response.content.strip()\n conversation.append(AssistantMessage(content=assistant_message, source=\"assistant\"))\n\n # Parse the assistant's message\n actions = self.parse_actions(assistant_message)\n if actions:\n for action in actions:\n # Perform action\n observation = self.perform_action(action)\n # Append observation to conversation\n conversation.append(AssistantMessage(content=f\"Observation: {observation}\", source=\"assistant\"))\n elif \"Answer:\" in assistant_message:\n final_answer = assistant_message.split(\"Answer:\")[1].strip()\n await self.publish_message(\n FinalAnswer(content=final_answer),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n break\n else:\n # No action or answer, continue the loop\n continue\n\n def parse_actions(self, assistant_message: str) -> List[str]:\n # Find all occurrences of Action: ...\n actions = re.findall(r\"Action:\\s*(.*)\", assistant_message)\n return actions\n\n def perform_action(self, action: str) -> str:\n # For simplicity, define \"Search[]\" action\n if action.startswith(\"Search\"):\n # Extract the query\n query_match = re.match(r\"Search\\[(.*)\\]\", action)\n if query_match:\n query = query_match.group(1)\n # Search the passage for the query\n if query.lower() in self.passage.lower():\n return f\"Found information about '{query}' in the passage.\"\n else:\n return f\"No information found about '{query}' in the passage.\"\n else:\n return \"Invalid action format.\"\n else:\n return \"Unknown action.\"\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from the task\n task_content = str(task)\n\n passage_match = re.search(r\"Passage:\\s*(.*?)\\nQuestion:\", task_content, re.DOTALL)\n question_match = re.search(r\"Question:\\s*(.*)\", task_content, re.DOTALL)\n\n passage = passage_match.group(1).strip() if passage_match else \"\"\n question = question_match.group(1).strip() if question_match else \"\"\n\n # Register the ReActAgent\n await ReActAgent.register(runtime, \"react_agent\", lambda: ReActAgent(passage, model_client))\n\n # Register closure agent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n\n # Send the initial message to the agent\n react_agent_id = AgentId(\"react_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(content=question),\n react_agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (18.1%, 20.3%), Median: 24.9%", + "generation": 9 + }, + { + "thought": "**Insights:**\nComplex tasks can often be solved more effectively by drawing parallels with previously solved problems. This analogical reasoning allows for the application of insights and strategies from analogous cases to the current task.\n\n**Overall Idea:**\nDesign an 'Analogical Reasoning Agent' system where a 'RetrievalAgent' retrieves similar problems and their solutions from a knowledge base. A 'ReasoningAgent' then uses this information, along with the original task, to construct a well-informed answer.\n\n**Implementation:**\n- Implement a 'RetrievalAgent' that fetches analogous problems and their solutions based on the input task.\n- 'RetrievalAgent' sends this information to the 'ReasoningAgent'.\n- 'ReasoningAgent' uses the retrieved analogies and the original task to generate the final answer.\n- 'ReasoningAgent' publishes the final answer to the 'result' topic.\n- A 'ClosureAgent' subscribed to the 'result' topic collects the final answer.", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n from dataclasses import dataclass\n from typing import List\n from autogen_core import (\n SingleThreadedAgentRuntime,\n RoutedAgent,\n ClosureAgent,\n ClosureContext,\n message_handler,\n TypeSubscription\n )\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Initialize the token provider and model client\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\"vision\": True, \"function_calling\": True, \"json_output\": True}\n )\n\n # Define message types\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class Analogies:\n analogies: List[str]\n\n @dataclass\n class FinalAnswer:\n content: str\n\n # Implement the RetrievalAgent\n class RetrievalAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Retrieval Agent\")\n self._model_client = model_client\n self._system_prompt = (\n \"You are an assistant that retrieves analogies for a given problem from a knowledge base.\"\n )\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n prompt = (\n f\"Problem: {message.content}\\n\\n\"\n \"Retrieve similar problems and their solutions that could help solve this problem. \"\n \"Present them as a list of analogies.\"\n )\n msgs = [\n SystemMessage(content=self._system_prompt),\n UserMessage(content=prompt, source=\"user\")\n ]\n response = await self._model_client.create(\n msgs, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n analogies = []\n for line in response.content.strip().split(\"\\n\"):\n if line.strip():\n analogy = line.strip().lstrip(\"1234567890. \").strip()\n analogies.append(analogy)\n reasoning_agent_id = AgentId(\"reasoning_agent\", self.id.key)\n await self.send_message(\n Analogies(analogies=analogies), reasoning_agent_id\n )\n\n # Implement the ReasoningAgent\n class ReasoningAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Reasoning Agent\")\n self._model_client = model_client\n self._system_prompt = (\n \"You are an assistant that uses analogies to solve complex problems.\"\n )\n self._task_content = \"\"\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n self._task_content = message.content\n retrieval_agent_id = AgentId(\"retrieval_agent\", self.id.key)\n await self.send_message(message, retrieval_agent_id)\n\n @message_handler\n async def handle_analogies(self, message: Analogies, ctx: MessageContext) -> None:\n analogies_text = \"\\n\".join([\n f\"Analogy {i+1}: {a}\" for i, a in enumerate(message.analogies)\n ])\n prompt = (\n f\"Problem: {self._task_content}\\n\\n\"\n f\"Analogies:\\n{analogies_text}\\n\\n\"\n \"Using the analogies above, provide a detailed solution to the problem.\"\n )\n msgs = [\n SystemMessage(content=self._system_prompt),\n UserMessage(content=prompt, source=\"user\")\n ]\n response = await self._model_client.create(\n msgs, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n await self.publish_message(\n FinalAnswer(content=response.content.strip()),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n # Main function to run the agent system\n async def main():\n queue = asyncio.Queue()\n\n # ClosureAgent to collect the final answer\n async def output_result(\n _agent: ClosureContext, message: FinalAnswer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await RetrievalAgent.register(runtime, \"retrieval_agent\", lambda: RetrievalAgent())\n await ReasoningAgent.register(runtime, \"reasoning_agent\", lambda: ReasoningAgent())\n\n # Register ClosureAgent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime and send the initial message\n runtime.start()\n reasoning_agent_id = AgentId(\"reasoning_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(content=task), reasoning_agent_id\n )\n\n # Wait until the system is idle and collect the final answer\n await runtime.stop_when_idle()\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (2.2%, 2.4%), Median: 3.0%", + "generation": 10 + }, + { + "thought": "**Insights:**\nImplementing an explicit fact-checking step can enhance the reliability of the generated answers, ensuring they are grounded in the provided passage. This approach is novel compared to existing methods in the archive.\n\n**Overall Idea:**\nDesign an agent system where an \"AnswerGenerationAgent\" generates an initial answer, and a \"FactCheckingAgent\" verifies each assertion against the passage. If any assertions are unsupported, feedback is provided to refine the answer. This iterative process continues until the answer is fully supported or a maximum number of iterations is reached.\n\n**Implementation:**\n- Implement \"AnswerGenerationAgent\" that generates and refines the answer based on feedback.\n- Implement \"FactCheckingAgent\" that verifies each assertion and provides feedback.\n- Ensure robust communication between agents.\n- Publish the final verified answer to the \"result\" topic, which the \"ClosureAgent\" subscribes to.", + "name": "Fact-Checking Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, ClosureAgent, ClosureContext, message_handler, TypeSubscription\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class TaskMessage:\n passage: str\n question: str\n\n @dataclass\n class FactCheckRequest:\n passage: str\n answer: str\n\n @dataclass\n class FactCheckResponse:\n feedback: str\n is_verified: bool\n\n @dataclass\n class FinalAnswer:\n content: str\n\n class AnswerGenerationAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, fact_checker_agent_type: str):\n super().__init__(\"Answer Generation Agent\")\n self.model_client = model_client\n self.fact_checker_agent_id = AgentId(fact_checker_agent_type, self.id.key)\n self.max_iterations = 3\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n iteration = 0\n passage = message.passage\n question = message.question\n answer = \"\"\n while iteration < self.max_iterations:\n # Generate answer\n prompt = f\"Passage:\\n{passage}\\n\\nQuestion:\\n{question}\\n\\nAnswer the question based solely on the passage. Be concise and ensure all statements are directly supported by the passage.\"\n msgs = [SystemMessage(content=\"You are a helpful assistant.\"), UserMessage(content=prompt, source=\"user\")]\n response = await self.model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n answer = response.content.strip()\n # Send to FactCheckingAgent\n fact_check_request = FactCheckRequest(passage=passage, answer=answer)\n fact_check_response = await self.send_message(fact_check_request, self.fact_checker_agent_id)\n if fact_check_response.is_verified:\n # Publish final answer\n await self.publish_message(FinalAnswer(content=answer), topic_id=TopicId(\"result\", \"output_result\"))\n return\n else:\n # Revise answer based on feedback\n feedback = fact_check_response.feedback\n revision_prompt = f\"Passage:\\n{passage}\\n\\nQuestion:\\n{question}\\n\\nPrevious Answer:\\n{answer}\\n\\nFeedback:\\n{feedback}\\n\\nBased on the feedback, please provide a new answer that is fully supported by the passage.\"\n msgs = [SystemMessage(content=\"You are a helpful assistant.\"), UserMessage(content=revision_prompt, source=\"user\")]\n response = await self.model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n answer = response.content.strip()\n iteration += 1\n # After max iterations, publish the last answer\n await self.publish_message(FinalAnswer(content=answer), topic_id=TopicId(\"result\", \"output_result\"))\n\n class FactCheckingAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Fact Checking Agent\")\n self.model_client = model_client\n\n @message_handler\n async def handle_fact_check_request(self, message: FactCheckRequest, ctx: MessageContext) -> FactCheckResponse:\n passage = message.passage\n answer = message.answer\n # Fact-check the answer\n prompt = f\"Passage:\\n{passage}\\n\\nAnswer:\\n{answer}\\n\\nFor each statement in the answer, determine if it is directly supported by the passage. List any unsupported statements and explain why. If all statements are supported, simply state 'All statements are supported.'\"\n msgs = [SystemMessage(content=\"You are a fact-checking assistant.\"), UserMessage(content=prompt, source=\"user\")]\n response = await self.model_client.create(msgs, cancellation_token=ctx.cancellation_token)\n assert isinstance(response.content, str)\n verification = response.content.strip()\n if \"All statements are supported.\" in verification:\n return FactCheckResponse(feedback=verification, is_verified=True)\n else:\n return FactCheckResponse(feedback=verification, is_verified=False)\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue[FinalAnswer]()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from the task\n task_content = str(task)\n\n passage_match = re.search(r\"Passage:\\s*(.*?)\\nQuestion:\", task_content, re.DOTALL)\n question_match = re.search(r\"Question:\\s*(.*)\", task_content, re.DOTALL)\n\n passage = passage_match.group(1).strip() if passage_match else \"\"\n question = question_match.group(1).strip() if question_match else \"\"\n\n # Register agents\n await AnswerGenerationAgent.register(runtime, \"answer_generation_agent\", lambda: AnswerGenerationAgent(model_client, fact_checker_agent_type=\"fact_checking_agent\"))\n await FactCheckingAgent.register(runtime, \"fact_checking_agent\", lambda: FactCheckingAgent(model_client))\n\n # Register ClosureAgent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n\n # Send the initial task message to AnswerGenerationAgent\n answer_generation_agent_id = AgentId(\"answer_generation_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(passage=passage, question=question),\n answer_generation_agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (6.9%, 8.0%), Median: 10.7%", + "generation": 11 + }, + { + "thought": "**Insights:**\nEncouraging the agent to challenge its own answer by seeking potential refutations can enhance the accuracy and robustness of the final solution. This method promotes critical thinking and self-evaluation, helping to identify and correct errors that might have been overlooked.\n\n**Overall Idea:**\nDesign an agent that first generates an initial answer with detailed reasoning. Then, a 'RefutationAgent' attempts to find flaws, counterarguments, or errors in the reasoning and answer. Based on this feedback, the 'AnswerGenerationAgent' refines the answer to address the identified issues. This iterative process continues until the answer withstands the refutation attempts or a maximum number of iterations is reached.\n\n**Implementation:**\n- Implement an 'AnswerGenerationAgent' that generates an initial answer and reasoning.\n- Implement a 'RefutationAgent' that critiques the answer by finding potential errors or weaknesses.\n- The 'AnswerGenerationAgent' uses the feedback to refine the answer.\n- The process repeats for a set number of iterations.\n- The final, refined answer is published to the 'result' topic, which the 'ClosureAgent' subscribes to.", + "name": "Conjecture and Refutation Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core import (\n SingleThreadedAgentRuntime,\n RoutedAgent,\n ClosureAgent,\n ClosureContext,\n message_handler,\n TypeSubscription,\n )\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import (\n ChatCompletionClient,\n SystemMessage,\n UserMessage,\n )\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True,\n },\n )\n\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class AnswerAndReasoning:\n answer: str\n reasoning: str\n\n @dataclass\n class RefutationRequest:\n answer: str\n reasoning: str\n iteration: int\n\n @dataclass\n class RefutationResponse:\n feedback: str\n needs_revision: bool\n\n @dataclass\n class FinalAnswer:\n content: str\n\n class AnswerGenerationAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient, refutation_agent_type: str):\n super().__init__(\"Answer Generation Agent\")\n self.model_client = model_client\n self.refutation_agent_id = AgentId(refutation_agent_type, self.id.key)\n self.max_iterations = 2\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n iteration = 0\n question = message.content\n while iteration < self.max_iterations:\n # Generate answer with reasoning\n prompt = (\n \"You are an expert assistant. Please provide a detailed reasoning to solve the following question, and then provide a concise final answer.\\n\\n\"\n f\"Question:\\n{question}\\n\\n\"\n \"Present your reasoning and final answer in the following format:\\n\"\n \"Reasoning:\\n\\n\\n\"\n \"Answer:\\n\"\n )\n msgs = [\n SystemMessage(content=\"You are a helpful assistant.\"),\n UserMessage(content=prompt, source=\"user\"),\n ]\n response = await self.model_client.create(\n msgs, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n # Extract reasoning and answer\n reasoning_match = re.search(\n r\"Reasoning:\\s*(.+?)\\n\\nAnswer:\", response.content, re.DOTALL\n )\n answer_match = re.search(r\"Answer:\\s*(.+)\", response.content, re.DOTALL)\n if reasoning_match and answer_match:\n reasoning = reasoning_match.group(1).strip()\n answer = answer_match.group(1).strip()\n else:\n # If parsing fails, consider the whole response as answer\n reasoning = \"\"\n answer = response.content.strip()\n # Send to RefutationAgent for critique\n refutation_request = RefutationRequest(\n answer=answer, reasoning=reasoning, iteration=iteration\n )\n refutation_response = await self.send_message(\n refutation_request, self.refutation_agent_id\n )\n if not refutation_response.needs_revision:\n # Publish final answer\n await self.publish_message(\n FinalAnswer(content=answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n return\n else:\n # Refine answer based on feedback\n feedback = refutation_response.feedback\n revision_prompt = (\n f\"Your previous reasoning and answer were critiqued as follows:\\n{feedback}\\n\\n\"\n \"Please revise your reasoning and answer to address the critique.\"\n )\n msgs = [\n SystemMessage(content=\"You are a helpful assistant.\"),\n UserMessage(content=revision_prompt, source=\"user\"),\n ]\n response = await self.model_client.create(\n msgs, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n # Update the answer and reasoning\n reasoning_match = re.search(\n r\"Reasoning:\\s*(.+?)\\n\\nAnswer:\", response.content, re.DOTALL\n )\n answer_match = re.search(\n r\"Answer:\\s*(.+)\", response.content, re.DOTALL\n )\n if reasoning_match and answer_match:\n reasoning = reasoning_match.group(1).strip()\n answer = answer_match.group(1).strip()\n else:\n reasoning = \"\"\n answer = response.content.strip()\n iteration += 1\n # After max iterations, publish the last answer\n await self.publish_message(\n FinalAnswer(content=answer),\n topic_id=TopicId(\"result\", \"output_result\"),\n )\n\n class RefutationAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Refutation Agent\")\n self.model_client = model_client\n\n @message_handler\n async def handle_refutation_request(\n self, message: RefutationRequest, ctx: MessageContext\n ) -> RefutationResponse:\n # Attempt to find flaws or counterarguments\n prompt = (\n \"As a Refutation Agent, your task is to critically evaluate the given reasoning and answer, and attempt to find any flaws, errors, or weaknesses. If you find significant issues, provide a detailed critique. If the reasoning and answer are solid, state that they are acceptable.\\n\\n\"\n f\"Reasoning:\\n{message.reasoning}\\n\\n\"\n f\"Answer:\\n{message.answer}\\n\\n\"\n \"Provide your critique in the following format:\\n\"\n \"Critique:\\n\\n\\n\"\n \"Needs Revision:\\n\"\n )\n msgs = [\n SystemMessage(content=\"You are a logical and critical assistant.\"),\n UserMessage(content=prompt, source=\"user\"),\n ]\n response = await self.model_client.create(\n msgs, cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n # Parse the response\n needs_revision_match = re.search(\n r\"Needs Revision:\\s*(Yes|No)\", response.content, re.IGNORECASE\n )\n if needs_revision_match:\n needs_revision = needs_revision_match.group(1).strip().lower() == \"yes\"\n else:\n needs_revision = True # Assume needs revision if parsing fails\n critique_match = re.search(\n r\"Critique:\\s*(.+?)\\n\\nNeeds Revision:\", response.content, re.DOTALL\n )\n if critique_match:\n feedback = critique_match.group(1).strip()\n else:\n feedback = response.content.strip()\n return RefutationResponse(\n feedback=feedback, needs_revision=needs_revision\n )\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue[FinalAnswer]()\n\n async def output_result(\n _agent: ClosureContext, message: FinalAnswer, ctx: MessageContext\n ) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Register agents\n await AnswerGenerationAgent.register(\n runtime,\n \"answer_generation_agent\",\n lambda: AnswerGenerationAgent(\n model_client, refutation_agent_type=\"refutation_agent\"\n ),\n )\n await RefutationAgent.register(\n runtime, \"refutation_agent\", lambda: RefutationAgent(model_client)\n )\n\n # Register ClosureAgent to collect the final answer\n result_topic = TypeSubscription(\n topic_type=\"result\", agent_type=\"output_result\"\n )\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic],\n )\n\n # Start the runtime\n runtime.start()\n\n # Send the initial task message to AnswerGenerationAgent\n answer_generation_agent_id = AgentId(\"answer_generation_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(content=task), answer_generation_agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (56.4%, 60.9%), Median: 69.4%", + "generation": 12 + }, + { + "thought": "**Insights:**\nCombining ReAct prompting with tool usage allows the agent to perform precise computations and interact with external tools during reasoning. By correcting the implementation mistakes and enhancing error handling and security measures, we can improve the agent's performance on complex tasks requiring calculations.\n\n**Implementation:**\n- Correct the regular expression in `parse_actions` to properly extract actions and inputs.\n- Remove the unnecessary `start_code_executor` method from `CodeExecutionAgent`.\n- Add error handling in `CodeExecutionAgent` to manage code execution failures gracefully.\n- Execute code within a Docker container to ensure a secure and sandboxed environment.\n- Improve the extraction of `passage` and `question` from `task` for robustness.\n- Ensure observations are correctly added to the conversation history.\n- Update `CodeExecutionAgent` to override `on_started` and `on_stopped` methods to manage the code executor lifecycle.\n- Adjust agent registration to use lambda functions directly, removing unnecessary functions.\n", + "name": "Tool-Augmented ReAct Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, ClosureAgent, ClosureContext, message_handler, TypeSubscription\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage, AssistantMessage, LLMMessage\n from autogen_core.code_executor import CodeBlock\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client.\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class TaskMessage:\n content: str\n\n @dataclass\n class FinalAnswer:\n content: str\n\n @dataclass\n class ExecuteCode:\n code: str\n\n @dataclass\n class ExecutionResult:\n output: str\n\n class ToolAugmentedReActAgent(RoutedAgent):\n def __init__(self, passage: str, model_client: ChatCompletionClient, code_agent_type: str):\n super().__init__(\"Tool-Augmented ReAct Agent\")\n self.passage = passage\n self.model_client = model_client\n self.code_agent_id = AgentId(code_agent_type, self.id.key)\n self.system_prompt = (\n \"You are an AI assistant that uses reasoning and actions to answer the question based on the provided passage. \"\n \"At each step, follow this format:\\n\"\n \"Thought: you should think about the problem\\n\"\n \"Action: the action you want to take, choices are [Search, Calculate], format 'Action: []'. For calculations, you can write Python code.\\n\"\n \"Observation: the result of the action\\n\"\n \"... (this Thought/Action/Observation can repeat N times)\\n\"\n \"Thought: I now have enough information to answer the question\\n\"\n \"Answer: the final answer to the original question\\n\"\n \"\\n\"\n \"Begin.\"\n )\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n conversation: List[LLMMessage] = []\n conversation.append(SystemMessage(content=self.system_prompt))\n # Include the passage in the user's message\n user_content = f\"Passage:\\n{self.passage}\\n\\nQuestion:\\n{message.content}\"\n conversation.append(UserMessage(content=user_content, source=\"user\"))\n\n while True:\n response = await self.model_client.create(\n conversation,\n cancellation_token=ctx.cancellation_token\n )\n assert isinstance(response.content, str)\n # Append assistant's message\n assistant_message = response.content.strip()\n conversation.append(AssistantMessage(content=assistant_message, source=\"assistant\"))\n\n # Parse the assistant's message\n actions = self.parse_actions(assistant_message)\n if actions:\n for action_type, action_input in actions:\n if action_type == \"Search\":\n observation = self.perform_search(action_input)\n elif action_type == \"Calculate\":\n # Send code to CodeExecutionAgent for execution\n execution_result = await self.send_message(\n ExecuteCode(code=action_input), self.code_agent_id\n )\n observation = execution_result.output.strip()\n else:\n observation = \"Unknown action.\"\n # Append observation to conversation\n conversation.append(AssistantMessage(content=f\"Observation: {observation}\", source=\"assistant\"))\n elif \"Answer:\" in assistant_message:\n final_answer = assistant_message.split(\"Answer:\", 1)[1].strip()\n await self.publish_message(\n FinalAnswer(content=final_answer),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n break\n else:\n # No action or answer, continue the loop\n continue\n\n def parse_actions(self, assistant_message: str):\n # Find all occurrences of Action: ...\n action_pattern = r\"Action:\\s*(\\w+)\\[(.*?)\\]\"\n actions = re.findall(action_pattern, assistant_message, re.DOTALL)\n return actions\n\n def perform_search(self, query: str) -> str:\n # Search the passage for the query\n if query.lower() in self.passage.lower():\n return f\"Found information about '{query}' in the passage.\"\n else:\n return f\"No information found about '{query}' in the passage.\"\n\n class CodeExecutionAgent(RoutedAgent):\n def __init__(self):\n super().__init__(\"Code Execution Agent\")\n self.code_executor = DockerCommandLineCodeExecutor()\n\n async def on_started(self) -> None:\n await self.code_executor.start()\n\n async def on_stopped(self) -> None:\n await self.code_executor.stop()\n\n @message_handler\n async def handle_execute_code(self, message: ExecuteCode, ctx: MessageContext) -> ExecutionResult:\n code = message.code\n try:\n result = await self.code_executor.execute_code_blocks(\n code_blocks=[CodeBlock(language=\"python\", code=code)],\n cancellation_token=ctx.cancellation_token,\n )\n output = result.output\n except Exception as e:\n output = f\"Error during code execution: {str(e)}\"\n return ExecutionResult(output=output)\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from the task\n task_content = str(task)\n\n # Improved passage and question extraction\n try:\n passage_start = task_content.index('Passage:') + len('Passage:')\n question_start = task_content.index('Question:')\n passage = task_content[passage_start:question_start].strip()\n question = task_content[question_start + len('Question:'):].strip()\n except ValueError:\n passage = \"\"\n question = task_content.strip()\n\n # Register agents\n await ToolAugmentedReActAgent.register(\n runtime,\n \"react_agent\",\n lambda: ToolAugmentedReActAgent(passage, model_client, code_agent_type=\"code_execution_agent\")\n )\n await CodeExecutionAgent.register(runtime, \"code_execution_agent\", lambda: CodeExecutionAgent())\n\n # Register closure agent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n\n # Send the initial message to the agent\n react_agent_id = AgentId(\"react_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(content=question),\n react_agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (20.6%, 22.8%), Median: 27.8%", + "generation": 17 + }, + { + "thought": "To improve performance on tasks requiring detailed comprehension and reasoning, we can design an agent that constructs a knowledge graph from the passage and uses it to answer the question. By extracting key entities and relationships, the agent can leverage a structured representation of the information, enhancing its ability to reason over the content.", + "name": "Knowledge Graph Augmented Reasoning Agent", + "code": "def forward(self, task, model_client_kwargs):\n import asyncio\n import re\n from dataclasses import dataclass\n from typing import List\n from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, ClosureAgent, ClosureContext, message_handler, TypeSubscription\n from autogen_core.base import MessageContext, AgentId, TopicId\n from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n from autogen_ext.models import AzureOpenAIChatCompletionClient\n from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n\n # Initialize token provider for Azure OpenAI\n token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n\n # Create an AzureOpenAI model client\n model_client = AzureOpenAIChatCompletionClient(\n model=model_client_kwargs[\"model\"],\n api_version=model_client_kwargs[\"api_version\"],\n azure_endpoint=model_client_kwargs[\"azure_endpoint\"],\n azure_ad_token_provider=token_provider,\n model_capabilities={\n \"vision\": True,\n \"function_calling\": True,\n \"json_output\": True\n }\n )\n\n @dataclass\n class TaskMessage:\n passage: str\n question: str\n\n @dataclass\n class FinalAnswer:\n content: str\n\n class KnowledgeGraphAgent(RoutedAgent):\n def __init__(self, model_client: ChatCompletionClient):\n super().__init__(\"Knowledge Graph Agent\")\n self.model_client = model_client\n\n @message_handler\n async def handle_task(self, message: TaskMessage, ctx: MessageContext) -> None:\n passage = message.passage\n question = message.question\n\n # Step 1: Extract knowledge graph\n system_prompt_extract = (\n \"You are an AI assistant that extracts key entities and relationships from a passage to build a knowledge graph. \"\n \"Extract the information as a list of triples in the format (subject, relation, object).\"\n )\n user_prompt_extract = f\"Passage:\\n{passage}\\n\\nExtract the knowledge graph as a list of triples.\"\n messages_extract = [\n SystemMessage(content=system_prompt_extract),\n UserMessage(content=user_prompt_extract, source=\"user\")\n ]\n response_extract = await self.model_client.create(\n messages_extract,\n cancellation_token=ctx.cancellation_token\n )\n knowledge_graph_text = response_extract.content.strip()\n\n # Step 2: Use knowledge graph to answer question\n system_prompt_answer = (\n \"You are an AI assistant that answers questions based on the provided knowledge graph. \"\n \"Use the knowledge graph to reason and provide a detailed answer.\"\n )\n user_prompt_answer = f\"Knowledge Graph:\\n{knowledge_graph_text}\\n\\nQuestion:\\n{question}\"\n messages_answer = [\n SystemMessage(content=system_prompt_answer),\n UserMessage(content=user_prompt_answer, source=\"user\")\n ]\n response_answer = await self.model_client.create(\n messages_answer,\n cancellation_token=ctx.cancellation_token\n )\n final_answer = response_answer.content.strip()\n\n # Publish final answer\n await self.publish_message(\n FinalAnswer(content=final_answer),\n topic_id=TopicId(\"result\", \"output_result\")\n )\n\n async def main():\n # Create a queue to collect the final answer\n queue = asyncio.Queue()\n\n async def output_result(_agent: ClosureContext, message: FinalAnswer, ctx: MessageContext) -> None:\n await queue.put(message)\n\n # Initialize the agent runtime\n runtime = SingleThreadedAgentRuntime()\n\n # Extract passage and question from the task\n task_content = str(task)\n try:\n passage_match = re.search(r\"Passage:\\s*(.*?)\\nQuestion:\", task_content, re.DOTALL)\n question_match = re.search(r\"Question:\\s*(.*)\", task_content, re.DOTALL)\n passage = passage_match.group(1).strip() if passage_match else \"\"\n question = question_match.group(1).strip() if question_match else task_content.strip()\n except Exception:\n passage = \"\"\n question = task_content.strip()\n\n # Register the agent\n await KnowledgeGraphAgent.register(\n runtime,\n \"knowledge_graph_agent\",\n lambda: KnowledgeGraphAgent(model_client)\n )\n\n # Register closure agent to collect the final answer\n result_topic = TypeSubscription(topic_type=\"result\", agent_type=\"output_result\")\n await ClosureAgent.register_closure(\n runtime,\n \"output_result\",\n output_result,\n subscriptions=lambda: [result_topic]\n )\n\n # Start the runtime\n runtime.start()\n\n # Send the initial task message to the agent\n agent_id = AgentId(\"knowledge_graph_agent\", \"default\")\n await runtime.send_message(\n TaskMessage(passage=passage, question=question),\n agent_id\n )\n\n # Wait until idle\n await runtime.stop_when_idle()\n\n # Return the final answer from the queue\n final_message = await queue.get()\n return final_message.content\n\n return asyncio.run(main())", + "fitness": "95% Bootstrap Confidence Interval: (5.3%, 6.1%), Median: 7.9%", + "generation": 19 + } +] \ No newline at end of file diff --git a/python/packages/autogen-core/samples/adas/utils_drop.py b/python/packages/autogen-core/samples/adas/utils_drop.py index dec0c2d0f853..a92fdaca830e 100644 --- a/python/packages/autogen-core/samples/adas/utils_drop.py +++ b/python/packages/autogen-core/samples/adas/utils_drop.py @@ -98,7 +98,7 @@ def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: row_ind, col_ind = linear_sum_assignment(-scores) max_scores = np.zeros([max(len(gold), len(predicted))]) - for row, column in zip(row_ind, col_ind): + for row, column in zip(row_ind, col_ind, strict=False): max_scores[row] = max(max_scores[row], scores[row, column]) return max_scores From 0ad96be141cdb1da89e5571a9a41978b53b3a0cf Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Mon, 9 Dec 2024 13:07:07 -0500 Subject: [PATCH 12/21] Update --- .../docs/azure_ai_studio_edit_deployment.png | Bin 128648 -> 131 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png b/python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png index 00fec68594793153597da7b77a9ded7aa54d293b..813b59f2abb445095b5ba552590231e3b834c2bc 100644 GIT binary patch literal 131 zcmWN?%MrpL5CG6SRnUOpx4@cifL(+cm5h)atX|*cUHqASyk%SKn7dMszHZMtxBucekk=QLjV8( literal 128648 zcmaI71yEFP*!RDbbcxa_AR=7?ONWG%B1(gVAkD%qB@Ggif^;`XcT2M<%@QKbQcHKo z|KRVLdFKDlJ1;W~!@}-4=f3Xyd)4O(eXXuYh)0bF0)YsXmE<)+Ak0S~5QZbp1K^d6 zIZ-3v2b!~{qAaLvh!zF>f@vwECIbRhM&VzZU;)45Ix6WpgFr-G_upu6heC4@=q^uL zUPjB!Xxq@$S8L+rmYz|Kui_C_xBpn2X--k$Xb8GOPB&#vA>Y$v`40c5Us>Z>va?q# zBSN1ja+iP_fsQ&13;N8EPvKNoYZxcIu zu>XI2^^y9`EFF}>LqXiGViMMmyoo-9&7hY4_v=j>PM!S%)sq!De4=0`67wwlU(dfi zDm{EPMi25;WN^osG8XIHIW3gYX<8Pjv7gghFvhyr{5#|&&LRXm$4s~jdIIU#g%I3wFoon|;=HB{7wt6u9QFumZ^;3cQy6-FQqDJ4=l z*@8l0>(i|5_JP0jZ{0VBDY~!U&|yGg^YMTA3+}wRVw|cj#_%T6)(Cd;VnaPQZ_LWG zS^}FWfg@Rq1PztQb5VV*!xOb6_IS!*?bE^~oC|fFb@i0;q1Y}?SR6n97OzSgadn=S zVJ#>9oVTgf4=GMX%J>}d@q{8)n<|qTW?f})JfatoG#{P6bxkF`ajgHBk{Bs8A!_*6 zSZ-t@_xDf1ZS-M?bBv+Ow8kEQBgsh+VLsC1z*JexPLA~R>!oJ;%f6VCNX83eEiYd z+yimwr!8LP;JW;H-7XLHsiqcjd3SfEa>Q@Bz|{d|FlWKZHKjq4YOTgpC(PeB@&%?$ zduo7!1))mUXs<~1hHMor|0~2~R^IOs_jaLgxy~P>sEK~^JmK9H$6EBgRhf`UE|Yz# zjRHF;uv#^l3RoX0D>`fXTsPD`J9S2kq5q_NYL%Rx7r9z<0L@=dgc>ptRgL3va;Z^d zH}c8Xq?pB&@hhw})zs3n4aqV4s7aK3Qzi>@*>j%ws1*)(ME-oPoU!u6z}g-buAY&$UkS54HI!y8^fULW@8gRH&lW zl_WcIETcL;DsnB8;q$S{U0g7AWM042%dJVfaD7$p5(l3o7?WgV6p8pz;bcWOSFu6^Ul<#X)-uVH?oX{KtdP3C=%o#1gPeVG!Avmx?vmnvC#iB>#vVIVFTHo=G&=WaWiUk; zIXGq9g?+Tp^L4fVwPqFR>guWj=9_bkXXB>w_8)M;6y5_2uL}=!nhc1ZbsGjeZpG4~ zhqp@$#0!l}8uv^^nkU_r&i`g zp@^D(E97+LxRe@ORc1E$!S18CKDVe>+nijrm(~9cMHC~>#w&rJSMqwv^yTWe*VP&o zW{T|Se{Ph)N{DxTE6)wr2m3Sw7>4D*iKP3F?IPGVetbOGI3-gdyXmpa_@Ia)8uOzs zSi+>!3UWzuP*m`VDbpn4x3L<_$hDDmHv75~9c}ibOx~q@#ug^doV7ryc$@5@!>p^g z7f|NXn2k*S=u$#tYvaBpo^+eH_57n#e4%KfmLZu)$i&!jgJ~0$mI3o;uBAK_<3H() z$?N$` zOg5h*2s#VnK|*LPa^&WWnbOz*iChR;TjLaRfi1(Lb2+|!D^Q?xO;NZM8MuP${) zk}WoVvZy7P3c?eAOuI@Zhby;jY`ERO0}qmzr@qLm(#pnPHQt%wbBSyJ^8idhC5}@+ z%a3=p*CwSp0=8Dp)OaSMXzcV)t<=TA@uXI))ot(YNyl;&>JZxN*_nHNp3l3vm66v#M>E0eKpWX3=PtqhU@ z*_EyFwfUA08^UeE)BT;8e`H~01x4 zskuicmEZH#xQXR1Zx%-)SV9qNjmWzA_mixr{0N%Y<1HVd;()_;w)BWmnVJ>= z!Q|7Y$9CNw*zFLR7iGaHbBY2{;NMorIJj0IG_9|Y#SjGekEUo^i?OUplk9kMx^hWeb> zk??@+eO(xGyi}X7#B&J$`ABMG;c;uzTd}1pv&xaQ1i#f!2FZ19GX&R5<3=JULsWk{ z=B&ky)39b)As$m@Y3CMOs z(Q1+~tm~UZ({dsG(~JO1$*2ql_#?e0B7dreB1PfZ<9I~WwV;H+Xh7R9Ye~azx}WU$ z(&r9-PmGzuGEd43#SR9A^+!tF@plD!%>Wf~&0kN2+8z9@=k?H$I^J^^iA2Ff)SDLV zTN;NbiP13~=6rM9uQ!Ya)+N3O@!S7Z858g$$5z&@DJ zHb#pMcw#au`p9YI5G*Pa%RHyrdON7Bg9CzTFXbgj)%D8oz`5jmyH4B=5(<`+4;Vo3 zTXf-hF>+U9XWU4u7OK0jG=|~b(Fzv3gSt|kn-ikU@T-9Xg1?)1GK#-;xPwue+#bw6 zb0+1jn_FAbz18FNLbOhS{3|qZ9eFc#X2%S>o&9f(ztRbd4rsFR-xSo$o^DRe^wSUb znaE0!pl?72ITG7Gkj@)dP##fmc3xNR@L4EnU!NDf!;v92?XD?%qBk)~o6v??I^_4d zZ1~G-`gp~4_;sT|40K4`j3pAhr(6>M_mxk~PzTcO>*?SA&EYSo12#i_ z+TM*5f&F%#pv7D)ta2I*!v4L>+ku@LNM68s=LC~NrTsn z(&P^}(NB?#@3yW~V_cGK<*%GqUhAtMOr9Fb0S@)k!^czZ{MfNSiv8u9p3u?JCETV8 z&4oYDWu+BaYDT_Mrzw9LPo5*nH4y{79T18*7AS@%nsYr-BU{fuF|)Ox1H!C;uCJ)q zg3d8z5#gM$Cr4(e6ohe)RRLkmP!g|6C#Gtu2&tIsu9g?~?Lm~!BB~(cy>Wo=4WNF! z=GGx~k(=_c-4?vJY@Rr@SNPZ4&pw090;NAJCRl(wM zSUKFwr;Q3ubM)x>549S~z{)-D-r{;9X$)6R41-wD@W~NwM4my7=NuY{4k7^mXL>(( z%ZV1}DehVCnL(Xq7p~jGZnMPOzs0BGe~u=&@tk9Z;)UZz?1p!S2V?;I@7w*|CRV47 zIAb#;p7nX0eqo?+(N}VjP(0c5?k`>aBdKSWiCNH->ReebKpeHv9iC77W|MO<-!$?{ z+~(SZ1yHtnvCgMeKh$?&^qw7u-w~1BI^sU?-qT9;K9Fc|*3tja6|7UCH_W43kJtWl z1G-!!a`Aawee?4(>MAZo+1wPe5oM& zjMQ-SXyYwp=PiS&vl4b{XY@}~-kV5gN6+%I4R?8-)csg7zBI`LUtWVR1;H*ahn*RE zT2QXbiZ@N!+Gj>0X6N#rn!Q^+Lt~-mSDK4|5W{d{jXez8r0|3_8srFe59XvtKz+%a zFIUi{BHZozh7Gc_XtV*7*FN5qqMVXEVXEsBztHO;BZKT@8f{G6c(gU2Iu`|wMy;77S7NML_v7kKgMt>gpH_fq)Q+7!UXnD!Aa)W)v7@cV=@@^$i0=iT{Q8rL~*e9+?VLR9ZgTQg0R#3jkR`xci3ac|o6 zcwBq4%a~oc?i>6Go@VK?p>*7Dw-`LVaRE}jDh{4j;pLqOsqL@KpB-r~S>9KEiQX%3 zOFPI*N3?PbF`Ti=>N+s4q>~8j>sLvLJ3@>?G2@Ko z{UBvgZ~&zg)Fce%Lm5=qCd}}p%<>dN`ZMk9-#W4&bB^~u zw=CQn?yUJ}n@tUiMLd4d~H zWkA+xyZS~Zm-L>8Y4p^V*LOzU3R1njmBcHsbtj)n_hBQCKYH^Rq;vD=c7NE5Z@aGY zd{ES`(&O^k_2p<3=?D`kCR)XDt+Gx8_lA$EN{#rb8?@J=MG#g-U69D~9Gz!s_SC>Y zG?%qbwH2@6c2T`WN7Z5Eqy;24PxXbtK%6>*)>+{8_Ru{11|r7UatCj)U-l3p2IWhN zV!);Nu^BjNBgp*IMa=qswr^4;o{2Vy6z}h*6#HTXFE%&r27*8@kURQAJ+(+Fka`Qf z>jL1#?;7YTs6Z}@J7U2f{seG9w$;e^Rx_d&Tk7z6E&e#RC>g8pZLd6V4ZUvaGWz_h zT#Ur&zif1$4(sFU>iLafTi5BB=Bzh-u5h6OtqHmnl;Y4x=R^HjYHcV@PP=S&y4bf= z;M%!u9;*trxG5hGox26?h`Dr(Ek7>NHoGR)=&6w@U%vSgc5v(8LpcKx3>v{D{hnr} zZ5XjAN$uma#4NIMySL;IDTbzM>SN;kbbCl~+<<;gHkbj#1y3V%LYE{IO`oD;VtWx6 zxL#87w8?C-=hgGtIZ>o~kD>h#RDoaPc5J-7k0R)4uleZ3f%E9yfd7v_$o47|HqxjVb&A2x9XhT&cp0qD=&mi!O zo)#0@M09uINV>Kl3Tn3&cb<qp5`6b!Cb! z6=F>VB>{cpI7l!y2(=^^YUh(`#?IMmpj>cY(>M+?AlYx4K=K|&>F?jXAzmKJAe0VZ z9bUT3xWQEk0Bv~UDn!u+z5qF{^iX;Y2Q*aR!_6-RYW9bQZ$pDK^h3!5d?KpbIx`m{mc}hVQ7@z!#x{BAiD=9D-y(}=b>Cy26=MMtBs@E zIm!%32QrLVUMtf1&$kb2;;fu^I|&`=zjExkkudxGA*U%=2+Nokr4w`C8A_^O4o;X4 z(>-}$cK&^yk4E`93Hh_e*89CQ>*79GzoyTt2t60pA?Kn9$+WL-piHr@wCT z_YrYJ(Z70Dr6b#F@xOq$44ls~w^=VJHEEtz^sYQ;?9^+Dj)#3Z1SqqhUE2_ak#l8Z zR&`f>>>vMl!a)preOEF{dDSl?f*NN)_8#OwoD3IF5{%@mZ~~q0zDn`@rC7gNA~kR~ z_*{d!#F%xOVEoW!-g3;u;DzRE2Vd6CDer85tI@UDJo$wfb~N?&NKT6o+pyS0st3 zZq&n7RtWCaJd?k_2{!4wfh1o^UnC=$K%Dx@3u+k$px#A~aQaDwh_kk94`{M@l8a}f zZCaT-iIf%F>4U|DDQHIwd-;dQGupK;L+TmmB3J_$I?~|%qNK7l^rxV*T|un3mVr23 zFGMcFxpfX>N{C5^68){x9xSb3u&QSwLa_JACU1D!h|)%|w4;7fmUfXaskg^5H+}e3 zwVH}ZaJyV$r#xR|7&W$rE%u!EEJpF25dNLZ=fz&~6Vv!mYNxb~_alD%bDxZzPJM?) z&@T_F;V8e?cTIAXPYC)+4=-3c51sOm4YT9b8Qsh(e$4i6Xl7+1bZqID%-1fnI~&9v z?J*ZPXtrCCOFpfH>*-GX>dnh{ylZ21?d!38H*Kpp`YxTmw}utOpc+YXHsJ5znw7-g z$cvqnjaZ_a;U=s*iOndt<$Po%aa2M=LJrcM_`|UT@nZUC`_4R!WV4gw&k&SGMt4GQ zs*;TxZzOnCr{;mLS5xK!I~%eP<7_#VojsJPF3KUgz!1Vkw4lBA=JJ!<3vb~y?elry zUuJ2H+vy5GZmUvmHz`iv6ON9IVi`;ZoZC7Y3sS?;*5`xRcCWcBmUP8$exH6S!5s|e zdEXDyd1Htc*~-Gv;!qCeYcj~lVXd}!(aU9+(kQMQoX@Q>`DKMy zg$}+7)V5Z6voai9*W6-=*cX1>hsu_}#!PEwEo&ng5^h7*C^qO`*@(e8))PZUw zNrJX&LYT$Sdu?Ha%4&rkm990})oyb+7|pCsr_m{|$<|9YOEHQYdW;MQv3({fFEH)!Y|eqrn2d{|1ua5O)&e*`~+FOz{%>R&$s3pIVOUCYK)Zrjc5 zO^Ue4#;7)aJe{K;H)j<(e0@88ej{;~Ws=i7K8!>|Ki-(eAJ%XqC8QP_Z+<$EE#MqI zQVw68qWhX$ACi=zi?HKWc4j#5`*DJuDfhYNrLRy%8D@NBZ#4a!%~`9A74y;9;> z809UPb_8MF`SwwAM2|{u#_}HPG(AdmIav~EV!O$1AC(}6@BeBa`^eGY0s_N2vNH8F z<2Dw5TfYr(z{c@i1f@DDP^i+Vx2b2Yy^EFuiMfuIiwSaB4+ZmQAH^inJ1CVKRZ_My ziR@DT?ZHOJygKa|rF+MJd4L)*63NEhVw7I*`V7+f@{Tpvot!7fT|kk}&n!wD9Zsw4 zc{I~|dBo7Y@tM9mjd`eD(qZ1WdZ_8KK>cRU6&JA-7F)Upo;vu%(*Ux-BJTM{Kl5Dl z+LAA#oJ#5uylU?LQ>3Wc^!D%ydBf+>>8Q_Ayh<=Y8{tF2AukR$N6FH*zTx=?Z9wHM zLB$m&7$lesW=p{!brCehDb~-R3Wq794V(`rO%d<8ij(tjtiaz`x4v#t-dFHG&_@4O zz)pgGNqj}M6BT*Xrlppemv25MgdJNFn)j<=^3+Z36WY`hHHw(LaNv!3@3i28M9lBj zhh=1a#u{=k-k_4fWHtQB01v$A(UBIsxJ=i#!#%$p7Vb;)>x*IJ~^) zK>gsSF!0?w}^J# zZdEHT(E`sOA__di)#->o5@sS!63wrvF%P8nV=-#!DoMOPCD4ORl$wn}M4}B5$vhmK zgBkQXm&zggN7}r0C`JO>%=OVc)xSGz1fKiMKx}p&pD8xYi85M9zRdZSD+hQM;`^7} zGBJEId}g29q(NdA9qO=AIwid4!LKfUV(z)uDP`J;>>9DykviCKYcqHD zNMG^n{^kiX!-%MtJmdMjI}W^ydA)Aw!h*7Q*LQchDaFglu<*+_QGSgEXu{=z=}dvB zhU&9~~b(EhhPd`($$dBOK*(j)cmrBFGUn!#K$hI4Oi9sDuSoOm~C zz@QBF1DeWAK&7G1!G;f@yA4`DsYHWr`$+_)^!&0{X@LSVIT9v$1O6v;;FGlEPoKr zGuAEA6K5LouYxZ|1@IAI8uUL(Rr^h zN^BJ%b&GELf_9mcxZf>yoi<5oI%r!YKJw=eVW>;G^6F6{pZ=vCqi+YIB`_T&TDW^K z^radpZ6vWYx71e&;6fl!P{CV85FRk|U`X7k4}Xgtm_FRq z*b7M-_GUmoh`}?!I0z#ze4F`1|GWU~QJw)x?+Fi>u1%!;EP5uK^Aa;Ky#H&%#ow6Y zUYpk21~JLb=DVLnlt-X>T=pGa=+42lFSq4b%B%A&qM?R7vuS#V`5#=qtZsX97Y+KL z5`AA+ZG@K1T=F{|L-KuRRo}CE&+#hOHp$tX@x2g`Lffo^GudHa zc4TeLlD6wRnEizO3@i3094ABN_s^Zj_sT z>*$ducYU2=!5{J$B6&(sxNmFWLB|;!nj7DO*k^XKX1% zZryIeh^I3(G9>R+@C#NBPlw%bTM&vzDs0Bv1)DsRi~H&T1WtCEje;2Af9Tj7?kT@> zzylK32vlr(w}bZ;=lfrz6qUafEzh$zyTpLlfjsk9vzzT>^{EXAj=+ zNl8C*TS(PA)}pZ{GXmQmSeExF(GdwvX}5 zRiVkZ2t|9dnu{$W2ogp6^caI_p?C21okm&!bGcq9X{;lafj zXb5kr-@ z1iL^8&Ej?!eO7cys73T6XOOWA&uwumB1&T>y{oVoZu+;ld}%efyuA=N#V?kpBWXvZ z?!^v7$q?ksww$D$(Mf|(;FEPq6U1b3i_)Csv&WnFJP~T)TT&-zy=E^d`^@R>hL;h= zeEo3Pz>A$eb)S0P4ThKA2*D{!tf1n0VE9E~=3s}XvbI3@jN>Wt%dfZld#*p_DT6lL z1`a^^NWX7pd^;2DZY#-e66;=;$?Dpy+nsQ=P-xE{(9XL7S8{~sHLPP~4pxMXmaB*G z+6D-ei(_!ri3cyO&xKMLc@a9OHee;pi_>j*(3Oc4{j6fERT_`qTcOV1Y)RTQIi0De zW3hf$f_LA7Kmh&_KomGd$0zwW^?o1oyo^>`eP`#i^Ow!d=!Ej)#D7^kMhgA<{qcm78Ej|pJ?CQ@6_j=3wUue<&H)}ohmY>7h5A_SH_lB z#Gbnkk|<2PIVBEO-tHgvBlah>k7FY+9uTxQuBV6DIu41hXBxM8ztYcmEpn5eQn}Y= z-&;F17h06!v78;uwyYhDaC=?OW`DiTd_G;Nw!cI+gq zJJd;?4_vDiStq?dat#=<>XN@+`mJ(sWV{g0MqZoZ|ugpWf#jule*lH=ho912>YvoHh){ z(LhDu3_ZT2#gpjG?jOe97cp7<(~)SMYPJ+zNd_?ytIYYT;)8AEZ+YiTCmDA{b-W!U zU`!pTJZ$V=XYK6^z=D^HQI(ITmVTpwpTHR8GHqifCC$f^g%|0sEiv~Ola|jMQyXnb zxWT*@-X!~BU!tkH*$loc(HVE{Ge@=a3AYh0zJ}_$uttg=;?N?taA=){_7}0wGJT|9 zpwJjyxu^S#U89X%l@b%WkKMYO4sa3ZakRDGer? zUJXvgK9BI?<+m=r!zdm!37*~!cAuWJvw{Q0Prew*W$?B;5n5)&d0%WxG{w6Wneet7 z%IP5TNp)-8ItvUrcj^Pj_{;wSB5suU}gb zZgoP~f-5YT9wO!~K$}xw+f6FvPC0C#yuOF)54s|1z((#hQz~wsQvg-F$Zuz@K~FJ! zg4gf(33j%UgfcHKoBFhyQ^mQJb5SL_yQEfN;{Qn&UQJ+`UMA#cT_UIHT%pl)jTiPexF)#5)TSVthWdo08fyO)Dz@t5a*>NP^hwUe`LcYuR zB$?uzQG*=X-s@}jZn<3?=Q9;ADv@zLCagU+)%Q5pl*cbc{MX|^l)1Bz+$(o77di{Y z@d$E0VT)SwXSl}I=kY?jyM<{p0vdh&iIKh=c7Aq5krN@bunIDv93x{pD`wMF)D95{ z%!qjL<6Grl`dVTqXH~?1z#ru+L~Rn*;|U>>7>ne4&Z0)~v2n=5^6vPExMGy_U&ry3TY9e^|Fwz^m8Z5_iqcJMdg((a5(=P+z%aRvYw zUA!Xk2k;2S_Ol1+k;b=si&xAD!ozJ9mJ$AO4L}vJqchDoL`QB$`4ZUB;Q^$HIh((j9KU+iH~?^eQy3 z(c`Qv?N6N@htJ?w1$K5T(G5n>KKU*cm^Mn)jn3icDR@5Bks>Jlj_uLZPK1h2Ut7A| zOGL$F^FX~o4$zYg3HWMt&zRwl&Qd-BGZloC$-0#Hv(ODIZD>$OjVeu!cL7yl4~Btv z0CrWTYf*sG0T|A>?qeQ9?dP!vB|_L_69d^c=-6oIRYehyCaS{Qmj6d#m`n#~yyL^K z|Ck=n{H2=$JHlR{P>1R*)Q9dsmPn`W9w2)qB5=u`G7`v*7i$w+ra~UrH}AZ--f{5h z+McdHSPtV^j@P&EuqsF+7fl46(c&#mm5#4mQC-+f8O1zRN8FdO;4z*pL@FmkUF$BxP!Ax51UZl3MeYxI#U zhF@rX^uKP35M5MiZJk5BGD1fCG>`5?QLnYWM)D4SJ)V*#s-DuC#8gYevLxbu2M36> zGJ~fSqqihXbF^G^7DaMzbHM+AM&23TD?&AaF`)#$A9=lxCusFk^M8%;fWh=|llUfC z+V`r#*-5d-^?m>kwv0h6j^lcLYjkZnW^)Y(gr8oD~Y*Ze{IR~+))VmK74!g9_%~`62>H4Cd4CC*~wX|sZPSG?wl@O0t z4|&4@v=s|rIs5A7g@`p#EDgxGlGV`HzoK2hhJ*bv(C@1kh@EoJc(zT!v_2;lbwqk` z2$p@DXRT@!n^m$QWoZtix)P3$&TjbLh{xItsh+OCF1*lb zx_(<@uhs@*XnnvDg~HNn0s!om=}lgFmSI`PQwYv1fLp&BH|guX)oDf+fw^r@2G&k3 zhY!0G^EtYemzuzS5!UA8^tB95tp)ew5NOVuwA-d#Co$hf;2sEOWEuQNo?Asd_cb7W zJfZ6}c2r#~)QsJ3I0OUwGmK{?tQ`imh9rPrau>%TnK7PgaX-4_MMj{`KirC!==`Kj zc@dY{(lQ$&g?AZ#$dq#0}?aW94k1yI%D%4hB?m0Y^9{L^bmttX13zFt`IKUy9XB^|5!of3vQx zw<9fycmv|YIez#x;BESHsneVhAlo6pimS9(fqlpuZj_Ot8@#!@UpGm5>4cT0`}E4qdF)Y#W`2UMGjHr1OUaJB>RB; ze^vJQHp+;RjWDWu!Lgbdxsym5>k*b^_xlFIu3p^0f&_oRy*tvoC`k4V*!G2h3E`iE3J zd(u}MC+g9$F+uD!nPx6K@m4H@6v0ww^^tddfX^IC$y?15B>?!PC7ti?zeZvLHwWHw z6g);;^36_e2Bs-|;-n7?!Mdrs+~rLh5AfdG{aqv`I`$FTLcu1Dr(5T28y9c52I;4^BWb_`n)<}caRsPYhU`_uGZZrJ z_a%#6`GlPS@nOG;s8>UKKT}P>qa;MKyd)wt}djj zqB7CD+Hl~h*KLuW3_xxaF{nC|PxfW;?n#0?h!+q4q2&1J!XW_4d0&ca+Dzn@zdCWF zv?wTA&Ht8vQTfmc-09KMBAz=H{6KLXKsqT;?1?Rz$*ZK%~p{7Tukqm$CZ4o52l8~cE*lJx8iK5fzud^F)8B_iCvgQD5R z(LhmAyt$q|0VNq<9(0AyCzI3p5B867ED4%SeeDg_m~=Q407!B3NkHtM@)|nTUZWsA zSyox--~l6M6*ZIOe_5!mUO15&-P5gq6zhx#9B&IVp8B4szh@~K4Y98uvYkpeJ+^Mh z_@qSXnj{caAx7M%nR2}&87!;B<3_d=IRpMK%)_qteznAG$epCz-Ecr;&i~joGNz{D zeq=#$O!U$54o`KRuEHE3qF=3K%JkNga#w%JkkFOm{gLFLVv=~8NbM(K|ENJvH23bw z&C~v_SII+t{1R&G_wzjerVy-vvXuQg6>z5COj!UbHYKY>_1_@CnW_?N3{WsEocQlh z$K3nA?nCbkG_+T<+!>z= z0I+Xe;aBjaffDQg3@|lemKwF^Z9MBg%9$~~;MC{_q;EA|@6Ar*SDnSvEsS3N^js^K zcQy&DgkR*zGSp<4(6qIH$H_%eh9up)hm{`^)>4}3^CyT*gFEHF*N?vh7L0o8+Pft| zrSEzc+UaiNePpvNWXci*0R?czNbntlx}Y@^8MwglGg-63|F$ajhEK`HGj{;KmH-o{ z%Sok+%Jt|6PO!;);ceQ)z2~;V*2arvD%8V9!C4n6ouiN@Df293r{=JMz0WPD>#zY# zK^;$6JFQ)zmft$+FLq-(;y%GC{+P{Z{BO?65QE7o{~sIsSqp9Ds}jN;@MM+6vC|UL zyH#TJIi%6em|VC8R?Uo!_e6L#l2O{$=Us)TDbo(wV*vI}^ut|AR+)ri{lL0>fDBi3 zz?p!@ya(#)Y3e;SinVfOz5o0q%3m5?4S}^1eH(VHO^uI!JN;5lAIV1|kOb9HXL}8u zd;fxYiR|%z+asL~QPb4Q0t^Z5yuMrwLjChz57-QtcB7SYYwc&ugO>R#busiqEB0=s8_yQfvCYS_j}?#665k z#<*Ye4jpdutWsaY|JEFz>7&xWOz1QuK#L!36PNF0F-NXbZHtc*;!XUzalKT~>dA+6 z>P0frA5&q_e&>_=IE3VzXfC$tqPGQQ66L)aa(+nVar96za7ZNUWd;)m`kpW{n>2#;xrr~ffAifWL7&EBkYr<(m8mPK(mSaE(?zZ|;-pVM zouKo)+*(KcgU5RDCB3uN5uu#*D_^aLg}!6i`;56|)r8d?yz>2?mHls42`)YG-_+l~ zfho@6=~w*?H_)<-)0NBN_v;*mok=|vclpUkY!f@sHaKc(LV$_~C309D9%(dcJGPoz#-gO<`PsdFd*ry6~d<2Ya*>#K?^2w)+HfR<4 zmiJNs#nJ|G8Pi<^P<$FNL4>6H8l<;+lquY@47mBj-gjrgo@Y?l7ZLVU&e}JG^-b}o ze~uW1r|!35w{_C9KJ9;kLjj6W_%Gswr(-wL6j6rvBf z0{T_P!Y-8@q~GYCwBMKx9RcCjOV=5z`T4|o{BU$cYjDf7cQx5X%5-y+y5As4>xRqR z>vMmJK=pvjRcx;_!e6lT=+$J>aMI!vJ8}BB1IkZ-Lz+txi46joydI-FwPQ-V>y(w- z>zaMq6dW7yFU4S;Xm=7GDt!4~3vdI{KVwWkInU;t%9O1O8uG^D>we<+aBrDhlmv-p z(0`gCL8ciBq!#RY5@>6M6OUucU6OiNZ~Tufi=9o~4RziS-X=;c93F4M-EQp6n@|7* zZgNJ34ci%;;Bu`ZqgE8ItnvQ2}#BwR>_auAs!<6wj!{l!4gNI6Ay8p|yJ2!RVUnN`+*M zI7wN=hlTDbZPQuyQzjb@%M-Ybw`2Fp0+7$st=Ns*krdL*APW00GM3VECG#nrNW>dn4H0WO_@vZ+0AXm&(~)#tEGit_2b7sKfT0WI7|nDVdx+< z2Cb0{t-`l#piWRn{@_gK;URz&CFGQp5JXe7U3X3~Q|)5tYV~8dU1Qim#T84_bz*C; zI$4#E{4sX`s_$`?7OysNNYB2=#gyrhJJ?HAGO8z7id)L|z}B4k8~%Q2PS2zlNRk^V zcsLM%w}!M$Yl4kBEC)h^nSB-!D89Igfiaw@Gw|vAwz+h^jem&9VWT}clf%=f*fP^?;NqnK34;Ff! zPW#f_7>ngEHZbrp)Wyiu^lGcsA8ahWiR(MRlz4E7)v_Dv#l&#rFBRDA{M$NMX6>+jtj35Deg`|(3)xEitbk=<%1N36g@rt1+w^EhS_f)qg)Mk z$KtmIM0BqU+q5ONYqcd@BZ7m26>FJy20fU*so7d#lj+q4D6?7zRjpY+}+%?aGG6r+iA+xyC{r3%ZNF}0VW~qS}^ebrkTDC(wxdNrTiJd zz=f2moH0W_MM*+$F^QwB{XJTDTH2$0*5i)-CBgR9Ek4o0`QMgsG=p*$u_kFUI0?EH zX)r7Swl!WAI9K*3P7Bd^{0cbcvB8W&83RfuZ)L8EU;>0qw7P6cks>is$e}cnqzr#d z0jdYtU5(P~Jah-Jhs2?!*uQPQV<75 z)0;j_ZL$=HMAHS4p4KHNV5Hb%AW%sX*S+uF7n)*501~?ufZir*#(Mm*p-pX-nSc;gOob8C~lo)2(4Zm!0a-R=1hQ zAmTI#@9JU}u93{7>#UkPw;rTEk9z9n=JtrTvBjj^-kc`IeZPGiG}5%=CsLzmZ22Wv z@kG{02m6|AtP{sgl~9QR1v)-@(+Lfwpf2&iXflULtJg5hp%eLT+gL<_8CS#LEaZ`D zrmW5}VHU~s5rYHWP2Ch`su22Qt_9nm!w(iS@xljmUFK6o2_I8SU$uryt&ZZx3~cSS z#^^>)hx%<>yKo-e4YbIc3d|)B68thUGai4-$M0R^jE2eX3 zHW_in5|sEyYhWPXfiAY=ciKwl+?#4Vi-ZB8q{C6VN!Xw&#o$bkO;WFWyw|T1ZPl=G zLQP>qDTH=64P4>Q_GxpnDC?;l^#ed4wFo0O5ez$j={AU;-LDAw^iLBW%&Le%aW_W2 z*G5K>%o-vSK7$=Oi_s|```UW(0*WBm_0epBP~6!rvg{HCxh58ZMxSR_o>bi{a09ZF zjZ1Xdq2NE^aRk2ve=e?U>xIeXdlWS{bbueAoT^7uyPpRYiGvjl|3C8HIwJqii#E(rt+9^9p&u?7MJ_uy^`)(}ESaBJKN+E^n&x^agDckIo3 zQ}5P&Gw%{57O(3{>5iJHlqe4P7$a1vt6 z29BAwJg*_J6L8^j$x#W*q)GF)RVrD;Q-XGHX`Bg|t8>#7H>WM;w@+b$>9`rOtvowJ zErZb(uoZA;A-k=BorMw4cH@$?2^gy28nw6TmE)fntwb&v%ZIAkTwGMI#*<$7seyZr z{SerXmOe)hgo%h4%sF#!l8ivg;oJYNWyS&CRdk}qbH}`B65N_aQ-B$g zW;I>&2Nv%eWCY3xDyOD3;lYerJ}3QV$9uMTuh-Gw*zI~QLXo4=-e7|m-Et6M zowz;L3`d)SySQXPq<I9IO2>>R&2;26WV>yRIb`!$F=y1C zNd+N1pYB*c4qLdA?n%zxT+UQH_)Rl81~Jxl$*9J>o{)gWhNROszEB)}dGbmT$JD!` zxmWse>0%HZe|k+nve!)DMezY~V*1Ww~7JycxV^L*s%=Z~tjOUOmN^JL_h=lgPk6%cPQSyBya_UyW2kQQ>%B zgvGo?r30`VvAv}c=jpO zRr{U5H15j+6J24xyYWh&n)&3goi{Bpn8fFBs)bF>R^p^0jNVRd@S$-ir}!{tT6l;C+%^b8~vqt5K!V88*HoBSeD@L9GKO^-})pVb+G8IZxV5 znuHLJ*+W@wRnB<(RYTt>!~mss2c?sf5My>{Pj_prket*TQP3@G#!FO+iuL& z-cZogWPt1;+`=+D?NaStyrHXnf*OLUfa$IG(wd=FA9tk@Cp~Q)Md#FyggZmkX_xP? znI)U1bfupvNU2Z2K04r_bE|`GT`PrQx1mt{U#Grhyl351>Qk-EiK5AL5K(nEu-Wfd zx8=F%SPF7FFdJ4jCcRclVe22Wn&Gt2`QX&km^`qy+GA)L#S!H@8L0^ZLmUQVIruU` z+Wf=CJP9{K>m4I$!V?Hcsmo05!J)TF;ghQf+15a&Sh0kzKH+)DsyStQ<#(e^T9AuY*Eu7Hw;)Mz9EfpwxKOQlb7iZ6#3RLN9~i6 zb}^VEF{dpNdlH%bp0K<8eHwzrT9qfoHyuXR@``aZEH-9;;Y`H2iwADer**%S<}6~O z|9F|{r!P8HNRHV}<-=;Op#i^Xk@*~O+}|($lL0kp52M1<1N$Q7jJeXR$9@+45*K?& zQ0={r*$fnDNJ=VoX?@rA%>$>cyhhW}VbylcNbiXRG7?75R+U~9fAeDlmyS# zshWpJD0dA&&L_C3i)R!#X6X5gOx=17lU_5+7 zTaGU<-dyZ3r4Ra4uV>9%I(2RykyMb)jbxOgf#@Frv5nzhv2FbB zmDG>j{7v1z$_eu{d->GEjjmoNgEOEgCtlVS_pZn`0?Pn(m zyy7}tR7dh|ImPCm%#r=5&z~AQ29iygBt|LLn=h)nJ#lrWJ>#W! z&N7?59Q|g&adHncyG^y+q8W<5ATl20tA_IT=P^RIZOFlbAEsSlP&*u_|8p{iAjLD^t9NAI6cz2bd|cTnFT zj{EV!cBc#69X0jI=FX`E571(_soS^z>GZIYC(6GsX9Er^aN)j6SZmki7EPXP0UL3` zXzf>^f4GpL72OsEia#Ahy6fnQeO|by4_t@$rXbenBDGu$qo$CsL?dWPhA?hLSsJ8> z*jnk=pbIr7Vc8u8vp*k`P)hdc;gKr6Z~5C3rF882-FecYXm84T7BWwk+o3{eVZu8p zI9Ut@wzyWuKU+NCc{nPpJ*Q%+XE1jD`Ch&r*9i{7NPL$mrqcl5@q+I z#_e@)IL16(dB=LH@jK=<4V55ykbXx?3`#4JF6yJjI@!I5%sdviTdYrjjR}Jk@F%-= zl((&Ma7?;xT2il1$!~WvdQSxI>1%0}3a5>y=h7%RLmpS!Phu=TLcsc9Ua7@;fdglL zx->8i4!p0}WuzFiIHROJ{>T4|9iMJtgq~nyvR5fdPGPh@{uKN!e(g(3fB zyu}1;9(tvCn3BtFl&{y?pL?=(nntgZVAj`k*{V4LcK=cMKuP;B9Q6qegOD@}75KvgM=KH|l(gHc!Zh3K#}{v;Qj!0xe+ zUBfHl-)s;lr7hn-9@+Pl^HaYefCm2+hGdD7AnpoN~1Zjv^3q6Q}gK8gVL3&P=p%80AzZf zsRfjZ5jm=euGA_>*~=3Xd7J22$~~=(gpp!Tj$>|-foDV~xPc!^o!H#nlAot*>;VJw zy7h)&(1tIW`hwP(FE@nU%2(jUv?mJq+^~YwH05~_rl7B)y&;V`VDbTuG&`Qv9MNKQ zdKyQ1Edi1z=78-YQLikOqJZ|NLxW##S-4jzXR>wY%I$J{+1{p z)ji;H-}2y9gH26nq{Pg=ZMe6+_&{Ad$2KM2JK9KbOD-d|F|&ecbL1!_m0tPWY6a`K ztKXwRe1I-Y>F5K|BOEl<&whjyFG&$q?*3mo*Jg`m-MFF@-jvBgN@mOL({%2F|CH3N zb@!@0n=2px=4`6RJrjuQZ+0d&W<_1xhfs4lMsLhODZ?}3E4?#S0`82Kf`4XyjTuDD z$PFGEl7q&cPsUV4jzk!7f*$?ZBIGsroOby){xwmJC5>gSPqr< zryk!>*8^4W#QGW1a>t;YqaR1YNu%qAqWU$@vw}0LS z{`A|`&E0c+^uE@y!_nXOHO|piD7X9E3L%nZz;|64e{-5(J#&JnKZVUXtO)EmHtK+_-eV>rg}P8823`o%?ln``WPJIcG)9(ovX-ub0P!X4Do-rkj}9tAIm&3m z5lvnRO53uVIC>1!_${vqTdC$|mnRqLy)_R5yEQk(QrZ}V{le&{b5*xGEPBJ(p0DVu;bep%EKG1&Qj}|G%3>xE z$I50Rn3Xrj{7Jq^h=yBYmH+b2eIVJr$8FYqMaQx!Y2*9BKijP3>~gD&Q)c7Xew*yK zhl^Ogqu}i#xuZZtVfBL6+W^3)-pYLABB!y|y#=xD?>2r8t5#HywV)h@c*%{<-M3U7 z`CV5)HAZu6-&vZtq8t2NH1CN#MobF^iaZ#GP?<_vD3-vAX+sGF+?{$$H8}f)kh0E# z*QiRuXk=l!GorV6f^S?t5tNi_-w);t0`ZRWKuI0~C0a_+Vh`Uamyk9%GYU-qXdPTp zn0%dMnA7$_d7xpkddww;O3O0ZtDc(*mf(v3^CDOdsy;Xw0v>5Z_cR&J_DM`Q_e`y% zG75|zd;D~eZN@Y>Wi5uRO)1?73FH}*JzAGaEatwY``G`rGU5-(eMhsZBnsv`a%$c< zv{f+z!dW=~REQ?+=YaYsDwy>#&w)esR0y9NrKk4@P0rIlrR>NQ@+k5m^76K(50^p{ zOQz{+X3`;uOdh>Tl5F1M%4ZdN3?M11ldp7@yw9b|ej1B)yFOn2Ld31?0~-u*NKS=V$7tvehugN~_kvwoD@-jKU%e%hNVwdHH$7h_P&&$W3UZy)Fa z5&oDMW~Lr{^jiE5;{Y@!QB~w_VIG~yAN}SgHP6PW;8h?vVbft5edS=f+m)ixAT~i?9RB zLky?G4;DLr6QaJ;H>Q#PbjX#ib>OmHk5>4-=1PowMMj@QUX&KKVqFfz5ZC;~AnxK1 z;L3*pH*1*%W((!)gCAyta>P7ZVhv#f+-c5Qfkm=@KaMJ{>K@nt8GCRfA~Q*|{xwpY z09BZJ0_?#LW5d`Ff9VRZIz^lKJPE!2`-;yg%#PPB#r&$T-eapC?u7hl?#_oxwrp}D z7EL*C7|q$j5pw^YHLoo(Q5%A<8l&re#nuHp(!+aSMehG%zrI)*09>H;PNkc z+iglTT`V{Kbcs76MDNP6(}775G=e&`U4Lv6&E4f`je`ukd97@Rsx|muTNN_}(Em1Q zLtFcx>jNo__G;7xW7-?1C%BzNWp1v)%?oXDRl;F)PkiJqhhRxH;z3&U$+_zy`FV}_ z9BE4}Y1Z|QUsf7(vwskjpZ#>DWJ2OdM|)M7hOu4yf+Dh^!rU_U`GL4(hi$!vQi)kJreE*yqUf8iRNgU|hCeM^i zQDWMOlCEeSllGTZS&?sj8y9DG7@{Pv76pn;F1|M6TUuJC?rkcbP7aSctvSu^1(C+J zi`N>)#h{V(8sq}@GXZ;XFH_N8x{9M_ zn`InBx9vB#&q|tg#yU}U_3&iIS={gAlDg1A-FDsn=bh!PjZrmiZSdq!u>Cxwi`V~J zvM9Er#h>STKW_vDc<-g3?D`VH+&n+QImP*a|41Tp#Ys%c|Q(d6)zW7 zKp(xb9P0jJG`N<>3tn+@*1A0+34JwL&fxF2%W!GEut$YzWNq-{4v)9}I#2`a>!P@6^ zjt=WLtA=ssN$ghS4*fK_%bedRs&Gs=?A@PCJeQPF!~Vtn@8D`^VS~^O)h!8SCcixP zk}wAAi|gE3wV04gT9-Ce1!vHI0})kyV1N2w)rN}nk$UUXOJ%bOHqiTrfBxT^P~!P! zxMat>K@Yx0GW~VBmY+FCA2{{Fj7E-jOaFR#IN~=Q;uv~LX?)X&42jYfL1UScPV{>R z<%)CdURW&_r@Cp{S*3-+8Nz=CVJloI^tk#d10LqWz~-f#aleSsJsG#f`h9FaZ#vb= zypT+i0-B}S#15#KZQSc=c3=8tPzvAVl$6ucqjtl*`e%zHbZ+p0?at3edJ8E3PRlOs zsEUV{O%zKVB$o{5@6Dn1Tg&!#*5S+B^Mr6ggUXvkgURh#kHhOmCwPyE*x-qH)t&~+%=ro=j!cYLcwy#fOB;US#^$XaeZNYc`>Rt8=LHVN#b{%)8?^V7MDMT{X5jvZg0pc zt6hsSE6S45{~ngmZoIV9ZZi&vR zbEc^KLc+y%r^)pv&XzgNyI8ulVE*mvEq;&nBORx8N7;q0);J`;K_v}nyNUV$JgC-~ z@RsX}XFaguxy+7)11fF;$)B`|}A+p$m>la`Z;K1X~h0#N}F zJ!-p(wc!vdxH(#o|d*! z?!mXH=on7w zQV&7N2+Zy><*ojS$U}CSViTepOYAst>dB@%{p9si1Hdlq0v4RhaSeaQ?*?=_B>ISZ z@5p^GC(1t-(1I=2#QB0a*6(oWPJ1VEUM4Qb z8yic7`V&R&fN}8aO3LgLC+53Y#^3lzIB&e~>Te@yK-c^3V!W&Dw?8u%r@Yov#dwd_ zhdO`&Uvb&^YPN+yRh$RM#F6FgC|IWM>RN6u%bj9L(vkD`S7cm!;tj{g6fE-HtG=HR zLQ6fMy%QT=ZqErhD$#95Ywv}&SFEfZ{>D^s<0d|99I)M^+{a=bC+xTJ?xI#0%U$9$ zIP#?Cg5mbPZ~qtrLV7V_#m}K zX!pCcH9XF7kQQ0i0W9v``g)$}AJIon9rQUP-``4OQ0kG5jx>QsCMDg{%Qq1zVE@w=d@BV>jQ6A%g8(owUSF*>{_U+z(!k9O(OKi@?suWg9&%#-L-^X!=&=lSMa~8$yl_^fdc)So!~ntr zwUd6=imP(WkDXm@M~m!fS&g^1&K1J|vWUZF)L4D$wY-a2aVmX3#H6w`1r;N(uWXZQ zC#?sn7<>6akRD`6s4bBwstoq;T6P2`W^V zCVZuN(iS|6Ivla{*yi@->6rGv5n>v+xcdtgoJQ0e@qn>=AZD$>Tq!<@LV}MJiiMf0 zqZ<826V>Rjc-`2&)LRe*>pja2JpUEg%08CK7>i_nYvKJhc7lCmU`zUSO@E$~fj|^> z8C4|n2bGi7CfTDB&ojSF^gChqVD%w&Qetn}h?k`16WjuWogEV>hvKXgy6r|^GTE30 z`Xn9HC^<0n-PQEKtK}l_W?VS~bVA=G{>flQ?7evCJ$WNh?lz(5}^bMt@Ft%CR|7go_1; z#yC`HgzmBHZ^Z#n#^-F3dAmXvGJ7f6B(N&;Cf=)(oMl0`SI^-^I4%3KX@pOMYOv2+SEUB1ogb2(3>4y@G2{SB)hD$NaE z%<+yyDMtK{ap^vBZH^a3k*Y9+gW_nvcl${!30lu*lYahA7%Nu8>BTULi&egPpbB&C z2j6=eK(vNvXYNl~DpxVu%P~vIG3y9@L;U@1hpf<5aW*Qix8?M*5-?wsflZO#4+tfo zT`Z(^?8>GW!N&XeJ1}hFGRmJ23w7CA1~a=70m*6LmbkGa*cHJviYQ^rR*}1)T3*}8 zZhQg!zf@xmMfm7hR5&&jOt-PQRGQlX^H|g$qlepV2l&-zfizNjYqSIbjZzIev>87Z zq0|qNeFap8#-*XpN0mAjbkkfp(p}jypuL6q=W)(4IEwt-XfX1_5Wl(RSMJT#ZqrWL zV}2-tuV`TOeU}sB(^CmzHQ(S~IiKdvpv(tOxn2qj0AY1hf$sJVrA)Wuy;??Pc&W2x zJW{CW{ug||gG*L;VJh`H^2V2O+usj|!oNTjT<1MasVlVMBqrqKG||cebM1|)+i6?p za_RE=(svQ|=SWu=Ej%N+CPS9jBw4~zY|;cc7dj)Qr$uwH%I`y0GNN5KWUjsiuc?n7S{=5mdoEL_7f$n|2e7#;)*t%n34+@Sc1h>U#DapQWrgZ_4n};|@mE1sL7_3_ zs!4yDGxTew7>#28+8m9S=n!Q#9${fYuDwXzwx&WHYaF^*<`j0EG#y>J-rkON6AJw$ zE+V=#C&EhR)C;z{zVxqKPewbPc?Ru99fX-~rE6^;M(ftP5lr};-N!N=pcgDSV7Oi+8&s z{58JQ0>H6;iZ9LPb7KV9JapZ35vii%0-GDnOm|9e!F_4ViWp?Jg1XUtt9|f8q{6m1 zV1@stF=+Z=`{kK+wZ9!71chf%3+V>#8Yre$)+^v>F!<60QxN?z755R4i~vkfn0Y7v z3m2>@qv}u@fLlw3#|}^WzJy~2@ZM};SX9$L|7E9m(PqZ-5ItJOT|QYA?Zgt(f+C`ms&*Z-h`cM%fX=rk{O|CBQ9|=>J3;) zOoMVZ{o7O?gqdVcA6t)6at*6G0khmuF8mmZh5OAu-5^f_-iU z!)Sk*l(k(dfcAEa4toQ9y^O;viByG6P5s_49XE0v7f^D$uO>g-s;S-XI1-X-?X~vm zG|jZjVi6LS*5d{@(Yd$a(W2*511M9lw=NXKf<_v@j#&$~q>8kiX+BhIu(*4DzGDs{ zSeyZ4&_3u2$~~mQxyVU=y}Z|G)>sxhOM`^TSlK+*g#hBZQr3ICENk>mI9|J2v-+HO z7KX9@w$e2yI}--J@332hzOaQf=`PI_{RjZ^5f4XSsY!^Mv3BTweC)#Q(D>Cf9r1L! zfL$q}MQ1ida2A9fpJqj;YbNjbsAqdBVopQA}Ny4T!d{RL>dG`5gyEqngZKak)JR#5y`ndg=OE1c>&f-z=hg z&!6CF;sai7oqqOH<~KFCoi#WFw6ccS6($NPG<=Zo=1nB>IT&JS)x22n`N<>OQ7yMO zU6FTgA^K0Imd-eyz8k(#z6gmv3!O>_{SMPn?z5)?H%dc~lX_PM58Jr$G`J#d8Z1W! z1M(h1fxdt+JQ~owRC0mJDWKd%h918v6!l>saV#Ws{XeGfM|K<_C0uxdKn`}L3Mfm2 zwh}d%e8esl$&+lLW#((ztYVwT_%-7|MKa=O{e;P`y#FYWcxU-NaH!jCB!2XZaj^5Q z#2@uC8E+A%rMKNt*Wej<51miqRWCoxox`PCjJa&bw(8I=VS7tWN2pnVOChSK)5)48 zfpNwOmg;1gmX|k@p>+n}E6h7>zBk6vf5hCUp;f;!L~3k)p3_us=%SF6=^D~EG{)pC%(FZQ6M6iYRZ<6w5C9M+q& z_e}y$PyZ5s@rEX#eV*taR3ax%m==|^N06%WZ~r;t0Op$` z8i(^M>{As&+;@hCo>%Or{hcYG>7Mx**`UyK*sBMCG#=JkQ9gUR8X=?^x&xq`FDdKC z0S3m}4BpUbpl)q+AB9)a>`3FK6HVvHkf~6#R@z1#;fb9OwC=8SrtE#0zCPpeVwjaX7PqJEt3jh86t)7K z2UsXUwOP#ZV@eazH$ZhaW}#zpN!21uVa6+vnF5G;^aM+K0W^J%|1UJHiya$+I+oGE zqynl)IL|b4K;vsx9XPbJyE;aQQ3W3pKrjV^3cuY1i+n=kkthaO=U+ z>*Zr{6an=_7>h7f2aBaZ(RcA)!(*Fn^V1(bN{k``Ko=9;L*7r9^je3!&US@apL046 z8h=~@TB6HD2`hYsI@^y2ltg+0tr!*=Gon{xHsPi7tu!fzHlar*ca2n|uK&Ez-s)s? zKhfc6H~0LwA#l&iJ+@`mVbbx5ms}%gm@p3?6B+7o-ykDgV~jae$NbmcT-8*$H|YeM zaj*=Uw8cfe^<~31YajE4rfe!y8LK|6r=#zV@TIHD*NpLOTVjECCyXYs8dHM39;UgA zFl?tKzwPMIzvr{0M}gnJvL4<^SvpH)wN$D@e3$Xh@NM#;S+*nd{}8o#%pUCb6!g5H zwdVT)iMV>LgAqXYksD_o4#`WPTl8bx=REFXeN?+3wC-kv3GTI)2`(0mo9O!*T%Yi- z?G5qOh}MZ8JN&uly;YrZ{;u=$$=df%7Xo$&t1LD+W!6*{6$?;mvPMecV0}V+k`bcE zw6?VEuf3Aae+58BiaixE{kSr7-z1c`g4Z7kBRjeY&}smMGNB8wEfcDm4_TJHS1Vp7 zl)b$?x$;B-kacX@j+1Vxpoo9%$HC&^Q4)ClUr^+X=@8})4aMZ zHkmVJnXP=>kMY?&+y!nq!!XzF(21?7k-=}Ui#$_|#Y4HpPkkI_#QCJJOJ7}6u_m+g zUs8#t4;Ql)WXeA1afR$y_thDGnbmbcCw32HYBYliOK4MGgysWHa^5|0CMub&g?=0z$1N z|2i9@j$Fh}@(RUwe&-T+nY3J)Z7t1vOxT2ikCB1PBZbeWcF=B*^|$jRSIb*oqzI<0 zJ$O31_U%dH6BACMo2CQq!>a4Qe%2ae(?*d;tl*`t8*gILC`>B-sRWiKP_mmw~l+0?`LCr`)g;GENBwWm<`ldLVHzh zfI0S^F#UvzmB9iB91MlTYXca5uYeI&NUC(BOF0Fsr0-dRW@?=N>d zvK2S7*K$rt`e8R7T;Fd~dVeSU&%apbnFgDGW1U$9H;3=MLU-2ST5(kBaZYZK*Cg_% zOj6XVR=!jsrK^}6(;c9x?LV}qoq(*FuE}N+it_W^jHT*#kfaKX57sVDhs$(Xmmh;e za~|4Nk~(+#0Fq}C+GLW_q^?9LNl{JyWO0J)>-`7!_g;d27C`MS0WENjxYni6AJfr) z>-Ev8y{V?x)NNJwr|`|9YdN8f67hc7QeUjngM8G$Fh@QcM;o3^X%YqXJ2JY)yVhp& zIX2O48a4VF9BS>pIZ5>dSM$9je0>Dj!B1+hzkz+1TCL2_0XcPEtv3hOuTjtXcv>UX zs+M1$Zyh~ZJOozJ`fgMC%guxcr35fw^p#e4 zXbeq`L0k@&M8fjF^>xg)MI8Y?!SLfG=Lj0ra!cjSXL;eTXZS>ma4!voepAURmamt% z-L;g4$BKN`!^9>f*N+8Fe5nV;N<{~SxPVQxR&Z*A4B;WVdU3Z{5>A@B{IAx%k z+4*?eV7}%2l%eE{`-_4V zLYiKVS-+C$CDbyGDSGYZHpTpyZQI(g5eN10C*g)Tg#ewCxPVw5=aC?K=%}g2l5M_lpw84(4fb9&G+@S~!Rym~i znW4wT>r>;M($s@vXjPEHD1wZjC|My2VWi?BNLDBdiJqL1`DLRrJ3tRYHO@H2T&5A$ z50yJy+W>-V@4LtLT$4!1OS@d~f2yiA|0yn+8apmI%{Z^yit|N-tqRJhRPuT%-;Bf2 z6qGD_AQC;1?U(HlqddgJRRGp!MyZIhrw(}6_BQBKrw_)zaKU_)28 zGzE3)sut8kr~Bl+c4*u&^SNny1X`bPe8N05DrDayT<3G@INW5(dK>$Uo4x7|b&!jZ zXa`wfd~;?4E5L=Lt}`+ZIAlkU$Zzv|&i0w)6NsqKC;!5SNysWd)$-0fzgu)!D_|dYg|6pt0N> zxU7u5nM{?E=KGeF!eQp)bjTmauEiEeFss#IAjXbqn&lpiM!|t0j`zwby&D87QB9Qe zlCdH>-tH~+M+Si;U;2DQfl1pY0e!(~!H|veq+8zkSXltQ1~jY3@ff@@Fd8@aqK2%5 z89ubxjO5q3V}7yY$1{8{)|kl^*Fl#?oS*$5Mf^B~t&gvu{ifVZ(KK-(Sj3;;A+-cD z@Sdk>?5P)>;;EO^SbSXpCWcWOQ#2ilOr~S=6n|E~Q7*;09J(8_l17(epc-^p4z!1>fQtiGP(edzmE8+AC?QFmim=#CyOZTkwZ=PZrdTVh}4mVEc4 zjK6FyENp&hzU~vc+KGw5&y=P)!5xmD>cl6$_}frrf;$*eo=IM{H zFvh2J4q4@?YoA0JYp?U|Fo9A0qdOfPjvq3%S|uN+&}Dk2IH}%iev|LDf@Zcc`q28w z{acfKV*l#ur$n(uUe1=jbdx0Ry|mV)GNReA3HxcYL(ov?H?tSLTqB!zzecdxA&I!z?-`;=o72z3u!qW-m#j z`3GcP0(9BQS?Sm!{3KEJvv1n%H0;<}!hjI2o`!sLIQkBcIq8NdK zrcZ8V>*J!jYnG<9o4j1U>Ore00k5DCsaoXH>$1JJV6xyxdsCt#JAEk}-$Idp(eO9w zM4xzxn6Ta?fmv`MQ(umTj2ciCP8v&`78v6TrnyUbP46mqb5yhaBR9COR8A9Fm&Wxk zRVfcOZ@R?wwtHCC6=f0uJVGf6y}iRF1ctKUa3mlj*uy&(h=F ze1hAAht{|Wx+VSXPK*Ion^$VLXfG9*@nZA*VcsK!Xh=zOe`JQSW&vdjjr>tX%-;`1X`tkh)Y8<&@C5nhXu6kHU3|75 zm1d=mi*+IInpF0!KS^rsS# zaLf5%=)?=f+H#by4o~v5;hYvHqs-y;G21H{>ht^ZepE^4yB6^ z>0A<|FlYa(_IHBZWp8`a88c0M3SOom8}cGtHom^+-1oUO$p3T8Hs`}HWo==VDWgNg z*N*%XVHf6Q_I5DS>lEvW?3nX>Um6Lhmtgm`-Gd3gO27-M1XbrCK2c^6aewqCqE-0} z59&E5;TvlHcky_ri&Pj?(KvP;Bd9reBI&-uJKArgN9>pP!$8lDIxP zN!gY0l*T0U=I_j7GrMasQr@ZY4|HGu$Ue)B{VS`r$mfY}+{;SI_HM5jV#r+hQPv=Q zrS%4Tr@#JLB#!Cd_>SRsgwMw8Bd}@M+AymIcQBn(H-p`B&||3_=fdf}v!uP+h@Bt7 zS;wG*)sA|QIxXn6=IbK-|Fwa={A^sZ-?-$l zQmq|CZ*tyW(8-8nkc(Il<8op}?(knwuw6Q{PWZ#X-KgkUOyls&=s8 z7#CmWH=S&RS8_XO-d0aQAvVCcyph zu}LtLl;z<0f)h6NKjsCvFh+zMfvhmzK-_P*ELGJ1jd{W4!$5<)aOv8?&mKeJQLKMR z;sEE9{IfkXW}5fZvER*)oZWMnBuq4!+)D@ zF4b)u$BS@|;y-SeUyhVNAEa?27~DZJu&=&(J50<@v=w&{W)hX1dBV+9`>ww;D}Lrt zNn9fxG6PAwMdK0=CC%QX7W~raB%Lvvur{5Hxa+W*cha739P5ats`*+TtF=BKY)IdP#nlxfiMPB6yH|CH zWZ->%CY6rv88_&D?((A80^{Xm>-ilYLzk@(A(rE!WrC6I#+2pUMM`s1)kcRS8bO)H zZ`e+YyU>qk?$a*~*BQRvIf?PE>2asu*M*{e0ESVH_&Z@1SA`Z@pze4=a$$*8?mGaL z@dk`q0@j`_P_Er+U064gYOuyUOLTH-zu|>r0KBQVDpwnT4G7LBa|q?nZncr1B;DSO zl^Z{H->t)_Q9wlwALN-_=)x%z!y=doSe87zJx`SlJ*5rF$@2|h%dRb4llOB+S zaYQ*wj+i}}Gn#OWTy6#jY%oL52!Htfl>6!FU5HZP)#)|wd?Tj#F+Ru!Wumi zBwjDKZ4n=MV*w(h>U#>f_bT2_>ATO2GcfSq-w*8srn*i)mot3#T$p{+@QQN-kXQrm z4DxncM(#0P@TZE?QDToW`p(cV zs^FJg+mD2B_MWy72#_w3Y{@wDDn~Kq6sS#Rlv{qYpMDRQIwo+7aUyI7LwtJ6Ra0V= zJW3HOVWj1oT~pzJ{?C|q*=uuwHr zH|Hl(t~EDqVeXx^J3dzo43pU+^KW!=%lfKbezb3xx*cDKg7OE|IN7(6_fC)PqFFHi;k zlylj@{mq?eZSyHo_r+2DA;LEm+s|=)WGFQ(&;kew+^XmZY<1=QHF1oWCpPN z4c>CTiuZ>xZDj-Rl` zp6RFwDuM(z`}&q*-D2I60D`rzOK=zk?r88sS{BImm)`UkyiBu9@&v>_>aj>;*-C0A z^&NSp2IJe7{Sl8FFr@CIv?8bH%v(-kWXta|z(X@i&P&JrxF97VXQen*mLRskpywNT zlyZUKg|iRo4~REuk~vausV;7e6=%?S3%+Zm%2jF?+f@QB%&J+kXh#bTpEhiG^=8bV ze}SsuaC4d^ zjatLxVgmf`d=SqMZ;dPlMBywwOqu^vm_W4d15OwTq=gsCE)v&E6P-t0Pe;YvQ)G03 z>vx)(3^#U6oU37fI&y$qO5fZ`xwsbmFhfRsBn~7w8F%Y56@Tn~6zDtQx%dPRUlK;r z{VY*cih<`TkL3M?Gbc9?KGP@K=QY|#krzjI7ymopp}mre;Lo>CS&xB1=AQdsE#VzG zJWn}`v8{B7n(Fg$f2!_0{v!jTOkj(!yv`B^3+im?vq(1Fr^A{=4luaB>7$HOV55U} zzF&JmiQp5bfu=>cMsRkgE{X9Ov`pf!whh#7*Sd$}4g;gwjp`NC`bHgIeZ4#60}kv%Z0F75xT)d8|%i zQfOq_f z6)B3;*Du!aj!uTF8J*ezRkwOvDd&?dx`#Y5oI=8l6HB+&4B5T^J8Oo646JPWfC>g3 zPM|%Z#i{Zh^?t(emeWNfb18r90c(J`?PfSeYwdL_(}L1yT}3csJ(n6FTLRz)F(naq zDRZe2yI`fl|B@@yQ1)+uBg>jJ%~~pxQo(cP-X6`pW%(fGqFaeE_CMA;pt#R|ZHe`q zIu>@%r#*o`-)}a^bKe=56$g38x!nngF_LmQiDz~hotp9eGAr+r^1<^&c0eC-0I#&#pZ1W{;|7os`3Iz)VgC*_<=Gp|w(WfWW69~yI zg*9i@-z@~O6Hya(YpegM8tZ>8Ku6A9+`Icm>x=3}YK?BTwQBzw*xVj0GoKrIk!vr- zgGl+MDq%@H6?6G&T|29~yFwK+#XJF1s|2N3*fgv#SoUH;RBHr1<=gpvc2mu!4HdbC z2SS5g8O#Nz{zsGB{>ym9<7a^JrH99uL#lVsS}B7^(#YHcK6gG#hnd0L8VH8}u|eY( zi+66^h+KEREhDvKGJ|eMq$BCnVdz2KuMrI7yDr>~N5Eh7-tbrW!2nq1AS5jvK))}7lG_gv& z1VJFniF)f>w;ZM-+YQM+ej2v%&SI#MqBt^KU8_4k1+e^~1$rI=qs2U`tmJV1(U8+O zZ;nX*7xLZ$sIF~W8^j3`90EZD0g@A(;4Xm(4#Az^?#{*`K@$k>5D3BDJwR}Gw~f2| zKX-D@z4!g~s=K;gb#-_3tjam;z4lsb%JA{cG3h7|Pw}vw=zhj;(fUYk8IAT7lAe=j zn-un)L>Sm_y(+O`dfN543<$2jtF25L;GA~|6%M@Ov`4Dx1sdl~qz2Re!{PAMsil|y z;|-Y_CX%sN0bj-bpWBf6cNr;?G#xlYiOORTk|QmBW4t$yJeuxwo)2$iuM+8Wg;~jX zOnm;h?(+w+edPq{u-uvs^|h9RB>+1$+N=FR_KaAqqt~na{llCQmUIuK`Z(22aTp+r z3j;_-7$B>Fl#fnGf%eweEhbKi&N&-9TqMp{UTTCf#0iD}{p3sBiC!Kfi|Op-02057 z$xyYm))!y3hCh*S!>@8m+WU_^cGM!BcsTipJ)o6U4HT*oB7lFYLay*ssqD>au z+s7XhE>IfiI#!oHqx^S;7>svBy9=|HBb^!J`+7dtbQ5V2U6&H(cJxlp#)WB`@OI{# zrKuTG-`lG5KV7n;PzU=$x}14 zQXh1F@4klScFStAi|MaSOC({_qc}CA>`wM2Q8}`c*E;&i^BGGaa2EMhOQdha<*W8- zK9%&N1yFdguVkTmU)d7h4}zLC*DnoNJ>&(mvB^n65kkk+Pln-L(Rt{a97p&CvAdnp zudl~hZNp`DlCrv`Ybwo}mD}q9b$|U+OK6fNO(*Tcp8e;(B<{HFqY984NtUZ~?gtER zKQw!rDcJDtSzVyn|E{Lv>yppH=^}FJ8eOFHmnNSov(eX>WN!{l_Dbcy^I+|O6;V6t zGkS(?RB7pozrZTxl&d!Gyv!E zwiGLD_Rc4Y4Wn&7|1T?__;<}Pemj3J`Dl+Rum|=3Z}k@@X+HNQURywc#&Y_u#kX?Z zKAMzlGY_?Q_98qEr(?Cm|6(Y@Pne}>SH41#5(l|nSF1v@fIkYcj}u9{DjkF=Xuu-bnJgxga9@y$QmR$ZnL1O zLTGo2&jnL<**sHZa*>E=b}S-SJ>A_lZ4bFr%|X8A?{@a`qKF^VBmK{|iuv0(T+u)~ zS45>_3;UK%Z4N6;{HK}W{}H$9Vbxp*ww{+6Y?h!B#NTb_&7T@~W&}5+F8}llJ7OCC z_Y-Me?7Xo)VPE@V+S~T8CibUL)WK_Sp=#h5{oOwQ>BGN%1^%Eh{-fb4h-_1-g13)B z0kyx|caJ|;axW_D-we>n+RJbr|GUFyN`I~{{Pn>dhYDuq{?ite|6C!FPqxm_8&P?$ zC;JN&r2M(Ci-VOKwa!V z<1mQu5yJV`GnV8%|DGn=BOaH?frOVz0n-05TX4T;>z{tW{{PR|IS?vLbA%YjL0ILV z79jxtd)&7V>ZxBFO?19y`sakFHhI@M4mKLYp^(c$QljsNx1Dx9`2 zBoRL_-3;K1;k^G8BVXwtwO0C1f)O9xkbPFpx95=rnU}-dnLyk<*NzBaj_ABSmln3o zJpWnqAm5bILPcpQkPyfJV#ABUQCeoE;OMI;0 zD~-#0vUKqMzwoSFEC#M}AhcZ}(DD`#Ftm-@PD7kf@na085oHau#|8hrjQnRvqZ^3m zeOO!n;2~Qto2t>ck{8IYEwq9U2l_$T2|Maa-8I;5{D?Xb}X0!1e+%IyY zx$K`}m?At!)D-HGz+}l`EbObW^QJj3P)A2wIH3L@A(byJVaJM+eM&%>PB?DpDx8MY z^XZ-3XjPRXPS*ENkdqT`^gAUlLwEzcjK;4GUFN>ke%x9UeeWThw>)7EqlLL zWHp+#D`&Bc?QO4?q+blK*JTsFU z2jh>-qW#6%;*$=fTxUoP>LQ(4S`{Q{D~;{YJ(4(gBYN3-_TE09Y`GY4q>CGy4yrPRJ?q}k<^{sc zVY@F+9Q(R?`G!3)d8j(;Lu`Ia=pbqk;hTE~aEPu0L(6Jswt-|Pg^9U;&CWMe7DEHc zli;uhr?y*OaCkBIUiC^o&p#rUg2Mf=*$F+t@?fKw3fzms_Q%6@Av!<(WTo1!&iriG zDpZzQ1^W&zdm>h9&MV=hdG!}vZ-=np5}Z00LA8nI{*!D-U#b4i$hL*1*Dgo3-D}Am zZT^R{O|`>EUxPV{meLQ|i>;dQZM%v91V_v*?F2FfG0Hd^B5SyIlz4Nc~j>*+=o<1k8jNja-e)X`9mDU9o;^Aa!`5qvVrh$J-Z7(Kc-1sRj#^9={; z-Ce`gue)Hq4Hcv_9+K_R3Cygfz_brcbbTmSr*AHN+m3RHx$3^&{nZlLzlSN>r5T!7 zj+7i^Gxrv%Kbz&a!6gL=cw#^OYQu9#S9h2=l!P6{z=%+9@>N3tM}hiNjg7$^%1<#e z9dSA9(^bd;hN@V#+H2c5Wpu~08S})P`YgPe!7BpfK|a`zr4UfCDZ`iAf($NR0XcyO zXcVp}4*++*J8v=3t46S$w;3?{+)1*s8p8AKaP3}qr68!?KkKSA7FM1Uh85AjN#MMRJ z3($lD2lVf@^YObQDnX^i9D3UG1P*v_j*A<1xK!y50Z-tcXZEo8BhI1S`I+R6%TyGl37 zbY-DK2_g^d*DT8|IUd&|f*`crh@*buO|w6yjpH{8b!_c)*}$j9+E)BYU!N8AN=B=H z;`y?m?Xs_+A)EeBT?cJm5`zwIdPj!%ydw9NiKol*zF)$czr4X-^g(qX{@d*+Q#De&bJM4fW)HmLt#S-;_p%#|0X#Oa%2c;66ss{dKX$8vS36F(z&@n z{ncNeo$9ijkI)FY20AZLI}OxrX->FzEi~Q6)`$nYlcV8J`Ihs|C6~WEkxcK435eso z^P1(#fw?PD2r1Ieu zr7l5kl(a3-q2o(vQ}!TyPM(eyF}`P1f3U~yPZ#?IvomhMj9B(j!`h8x?-nVLw!BB$ z*Q?b`2+u!8i90lJ(gddKfGKOQV2WqQGUxy#lHcSjvcd1)w5MAs)nwQOj;xpxM-`2+ z+97ba>G`+mmU)&zL@qZOc3PIwR&FS~?UR#cKn`c}k@Ff+^^j+Le-24Hfw+Lxv*HPs z#-~U6h7TNsb{S3t>jl*J8Zf>k&Z&{eVWnx~mkXUSXxiOMbLSxzJI*6~a779HOB|JK zJQwNRr*-|yVQL4Ni$*v35M|+xh??~f=rc2IU)uIJ`*CRG{b^jKaROXt(yk&ls@B_K z&eXq|0GD{SD|rU}mT9@@v3R2qoTIS5s?O8CL@`Kb7>Jj$~7lQQH$&=Au$Qj3x znpk9n>EC&Zt;F7H=>vY~S5>GdqiugDKYUoc>ZkD)?!F7|ACnZ+`<4t7Oe@|jgXIYI zdz0^|Q?5s}i1{38x5z!8FzwQ(3YY}$HOwG>vuC|a?~2o?$9oa-$Y5`j{!qO{b;%FQ z{E_Zn5C&_L3x2{*88)2mS`fRureUv@<%jUH7j>WmZ^!x1> zO-mJyW&Md~@$6&WxgY!lWYX~ea?<8iORdwrRN8)lN6PLYO?Bt?yk$10Q&-dWg~L1x z&zd3-Vb=~P+Yie|TWEQ!(;A8V@OqPz&b|gwlg$*dzcrH0j$&bZ3zaE**_~?_Ih7?O?QJ;(Q+xBCWYRjyM>~h>*LPqO-6bC!vSjBVg`A;I>E=Ny~W< zBwx+}*uHk~IkYJP`@W7WukCDOq>3hi3(i}@WK5qv22xKPa<37ZjCv|jL@{S`5Wc2* z(GuhW8Cw6oV2o1>xy2Qf7sA-Qlgi3;F6i)8$u{Wdtzva1x*%J=wa2aA%8~bSC0i$3&5A zr^9L4=jI>C7{%!|w~8U3Q>7TTyc-!ObXzfw@cl3jr0W_tT!jsQ22F!xmsTUAL~u8} zbuM6kqQC|B%q*O@2db;gdh1Wu2y7ZsjWpv3L^-9}+5X%d+d`x~l)j;KMREe!)Nrnt zF_9dKu!9ziEBcoq8kijqROsEKq+ZZciu+y*w67+Kn2s-BtqcW~ z+yMuaOn_p!j#5Qi&jb<*T_ZPoS=REc{xR$2LrUC6tUMsSW*+xAIJD>NQQy!(7_V1;0-m|Ha49SMmXOd_sxc+P^-ZCtl#+pgc5Rpy-=dx zj()SH($spkJ2SUQ5xP55wNf#m-BxLNV%stuo@S-Bb3PIx-y}_Hp>d&Op;1nJeS3oU za&NpZ85|t|Yw{s1?yk@K>meLop6ZJH_U(7irqsiC`NU54Cvob!MK$kYD@?A+2;(mH zly}~9HOEWthB>RmWUSn|>z!Hd!oe9wsAG6lEQXfWKt0q3Eu0XjdD37gsBy* z^}%LNDuSlpgbsvUtJXmT2gzhSpJcCYUvOu-0)wSdkk#wPMboAQ85~CxM#@zQ{i$~h z?IRZS;NF@jilBNhSZS$ob2$K!0F|!ZzI(@NGD!8(dY+6Da#NyyJZ)Z-E|biA(R6<; zbjtCG@^(YAdcW~{u-ew(rPVBP1pm>4#*1Z)B%4;W?!E1zCaX*BdKcEKvt4lFpInod zXDBf%!h*-~L*3QmNrQ-`AfcNlzEr~Cv}_i~EhV_-o6BQc81Wtt2z3HoKA80e|K>0P zQoe(6chM=Nzu$1)v2b_BwN~^lmg(Z44SOu<>kAeAgT$MN509DG}`j& zvE%vNBge!+X41deb5v%BDeuC33P)JYwSBkx51I4ZiAxI!gISm1StE#!j{c5_9z`eJ z3VYQ`gb>C3?LLIn6rqU`5h!5lkp#b90pFYN$N_ah&{QdmLQWgE}fDvpbw49f|yklws4-6R)x+iP8XlGX9 ze;30bA}^0S?{O?$f7n4fSz}M{br_T5afPL3unx@dILO;UMHl!Tgl zmla?A{{O1d3G9ZEZ){4Purlh%`AS0NtsR~FWcOeUW2WZt>_yjnsMyK- z=kAT`yvF^PN#L&z&efm3M6*K$oR{m#9yjlq<`O3K2ZlV{Iw(7q{MR6r3QbOq_G7V9 z@W{BIU&z_mun^2gr|{SZPH4Nsmi73khZ&!cu)PgiNGgItK()q>)_5R=Bb$`n2y;O2 zngNj3qkF(D!NI{)&!2y^U+=FO)7D+-2=h3bGOj-ve%G785%QTvvh+h29w;8v(Hel! z)!&~jn=1Iyayoi-^#iCcBro&Ne z(@g;E%|Vo2aeVvrQGjy3q@-Xq7gWu6f1!Q1F_!np^ZLwiXR?Hh*CCijGTdeKcwOi| zUXsGq-#EoJ5awW4&A4uUi-pOmi*23zx8mYRq+C`{xwvBI>YStE;(qN+m5~6l22$jS zpkT`EtR|<`?1p^NyS`tq)$6|s7sN1{UgU67XA6GP;kX?Q2@Y0r6i^@6U(tDNzm@N| z(L_eKpA^-3AOC7A!u8@(L>eO0&!@Yz&Xa$!V9MzM2~0F5UrL#GyqNGK#FrBOy|jvM zZoGGQccXbK5%}~n2XoE~C1zuPWo~-d*c98YY*8=-hlh6z6fHKigI$WQ@C23?t2Z-PssNaA|!sgGYQF$;TD^voQ2UOT;|1O_- zaNczAIFr+I8V3&#!KwvEuqwsX4WzSTZaoFJ_J3BiUAhb4z zGEw#P^m^l1+myuMCyp+zuki^8#M=TNn$Fd7z=3_M-@s6k_?*F`x?22Dd2E-Ffb?Xm z6mxKRY22T*^z$Qrb^`N3p7O(+n^4Fraq-pe!S5+TFX*`R+uOn(6lf(=mK}Y(>v>Wu z=o(z7&G)vf3)OWoSt>~;*h)S`t1R*8!g4~ct!cRs>~6%tt!Z6dUq4R0zX=KsevrUn zZj{y8(E-S3@36|_($3e{mp|LtmNz*`2r{3vgAXPJSV!G}Do6hyD3E z&GH8-y6M~kZYM12Xw`R(H%ISUTU&cyy%>#WH?9V>>T+|~c?naC;{_U>__F+ZOwjjX zk2xM-V!otzSFLxk{d~)**NSA%ii?X|gAPcfNV!7wRuxHzzI<^c7nlDe(G7D)SFiuO zT%%VdT6_`5`kMP=9Qpm$w!}{v-oC*~&%b)!`0C7D9@lXUFI`!=(R$B@(Q2`=@ga1=!epi`73Qx-4H$9v5Omuj~Q+pMa}410)Zb^$MX zoXeUD-=YN?>O(mRm2R?Ce$i?tgNIM=P3xm=;V z-@tXSsoBg<+*oK50{5`68}-75HMXi6Qp-^@FC%K5Lj}y##W#=D44) z@BQV^fq@91FV9z3S6M73S%{dmdw;yoi{r7Uvu?c1u5l^R^oao-EVfw^9nV)YU{auK zX=xeFRSW|5)MEpR`qOa@sJ@{2L?QX@)#(a&+Zs&R0nPW6j*iW0wwl^c;1dwfH;?}e zn~@UQ8{c++yKv8H50wIY?cdqiu^q1u&{I)0KVt{h8cc}}4-XTw>LZB-V(N8b~F532aw@T@~mA0${QUBGlE@1?;cZ zObZJO1`!bvhmAoWEOK6#Rjp8XM3l&+q|l}&q4RlnX4s6Vf``e=%R@8Te}L&9$(E*} zqVf)+a0>=vCoUxV4G^G3O?SLGGRgWNEY_j%lIcB=Ng6no&syQ%myo z>({w+BI2Mip*H#Z`~Pq!WTYnCvYY#G%|>Z$*zm39#q)))Dw-tLn^;v72Rz9#WLT&& zIU^T*kURd79NF`@>{1P&;;QDHNMSTD7KFuNtW)}sl#>O#;qGdpiOq8QrF|us&R``G zvvewIYC@y=de?ejPRs(y2-9gm*(Csp9UT0c@EhQyV5K;)cr1V!0r$L?k;w)u4h4Tw zQ!BiBU(1A@2q`~qDO61At}9phs%7P9rfe*J&3j^|&hn$?*VkqZ`}Qg`CHed+J-gMSg`4kHtYi~nG=W_%2r z4)H?quS{ty8J^@@qxn`cg(sR?6u-X*S5OEA1ARv}pVHH#*3I=_9=|Vo;TNo#@Xl3V zXWaDN3l+^q=FRr7s>R1#EgXfu=KKM%I$l%HXkbIIqx~`H4eni?EHMBJ<2PMlzOuh> z%|uQ8HLi+u`#*bbuiH#&y-EA9d&7p?hMVkhwEXXGxe!jTJ)x%$ddy)a2MW0$ix&p{0)0+ufGStWD*qQ#d+v zXr#XQXPKFr{;HDWcHSdvx;YA&Si=W$5Z4x_X+X(&!lTG0QH^IXAxsl@(E6xTt?s_u z7tQWHuk9JX`FScdfI0lQ$8`=VCN}RhP`w2ScJp%#ph1|MZuDiC9uYp4vpF9sRJ?J? z&{r1C`s?!WC$mTSb~KOLU+LW*ZQAj%(y(Ft75k~ysonxUplG;1E=s&m{Yvon^1OcU zJcWS11*Nj{X~b3u{PgJr#-G7T$gzxRfTDgk6)f2L{cb;v(VzXk5Bz7~25{S7`S=Hb z%1mafS>bFcvlGi+6`G;7k;X^;r9F-%sD8mMc|R$xyfpYoCI=ru<9Md-dgLeNZ-&6n8Tr_@)VyTF0#4qJ zsO{FUvMS^OTqrJ{r3!c%n)OcE5CPrO-5+-*t(K>zH=Lqd7m8*7<5$soDJnr}<>9=$ zj(lFHoYkhyLu2gxC#Jh<`W@j}2ITfj2=i8@FMOGgSz|dMXW7+khFH(YRNL}27-wzW z>9^oyjInfzRAsCNgjmyZ5zt|Cp>U}+1|Q(#+4}L}O+diHWgUOP~-cDSJqyy;ettSD!RLjLYVmy)<}x>;EIqD`dTMJPDo z_p)6x;@8;EF}9^_)j)Bw)y(q=Tkd_)O2}!5^`*UG>0q^@acc?wa4A=V`)RiGz85Ch zVA0}zgB$(`d!QF0ik-a_CaZwK#wbqzsMV{1ZBks5LK)Z;ucE*A+!#zhg#JAG5)~z8 zW785fusWC1J0)m3RL;43Gw|7CF`p-ofMa8-maYPbM9)B;B7#$2;+agrOgr!TEOcZGVT=5)|LUgl8aPAuO@)?J}tx3buR>2y0c ziMO9jTi-=6r?IE(9;d*1-jTo`7$i$JT^dD4FvS zL2P|JztKQSA24!YR(*uP8S&KA6onyJy)cgn9W%2ZlJTEn5)!r?r+%}LgNm?lOKK&q zkGLe!Icgnt$}*$|Ih8LcW^KHVE^|hsSlRxIEqCIDEc-{D#5Xs_k{|DnFiEh4@})(I z$&FI>j^17I`&DB{usNslNEVs}wD?aW?mJ=75nS+`DsmdWOq-Z<9uXL&$#dXntFDwL zovyx1&|VQ^>FEwT7x*dW?ATHv?iw#-b0WT!hyPbuZ*u-reX3&4d6{dUOt{^r+r)|D z@u6z*oG^ zdpn=^rGYbdcz8(0?=qM~3uE8dA;Q76mR13-v9*|Bbq5pGknknin^#D~Pc}R@`$lQD z?VQ;mX3&(if`>WWt1*THJnxj}sA^GFIu&jojekoHB5i%Wfh#&OEyL^ZePD9ik7T-% zQ-D;7{#cG8zVPEVGAn1c;JGup(T1brh4is-{`xU`o$RR5QKVc)d7()1B5!CImfla; z#gWIWlmcx&`s!TRJBovA>0>n`7PnKo_I+?(BWsT(UXAIQw_8>O&~@Ke`vN$^<%c{pDW$Dq!CaAD)E++<(In}-P%`8{ALn`X~9YVoErD6r4- zJHLO&m??d8sxz7LzBnAZyUfv7G5=%?oa^+^pb*>0WQK|toB|q>$~3zwp+(spy;1wK zl3BCgUS=!c!cU;+#2^=maD1(WF^(m_!T8!IRWa)I%?*Q=2Nqq@lifseGP*C}?T)s; z_*$XZUoK4g)-5iNG`xP0Na#W-hJ&*V^kRc7_vs>)CXtta(L?A>C0nTSI?pW(W-9Uh z&rD2_NWHX--z?Nbm4#)R%47c$Y>qYcaWFx6K|aUA_5AzhWdMt7#bUvF^jEX z7v#C2WeS;kvM$xOfASoZzuAeOk`D)Oqee`W#%yF&isE_D@h|RmNfQ&C%-Wjis0kyx z0h|o;HXllv5{m(~(jS-jbLkW%dw!Nz6Hj?ZTh1l>b$e`abaT6QwLBUQ^Cf0}B_wt6 zignAb7Xue_tdglJAtXM!{>BLYC3bGdp+Qalq}M0uDJ+H-pPR}|WbUqeFDKtrA;v=c zuSH!CpNuWeGVk0#$re)B zEPAw*n>N|dHK+DZj2SB^9{>DS7Kn;xx5e<>v?+2WU*bauP6tt?Ae!a-)DlKB9L+vU z9@2@Q`h{7g-y?NJNw~d?j)kILcX3N*Zunp}e#7Tzx-0)_EAZIy?xCkTkww=*f723I z(`N9P=Iic0Uu29>E9Am)IAF3bjte~D;i-AebH1tLzRLk3Zk@nI7C7qupx)s2O-c&3@Q|zkpP91&&on zx!14$ekedHz9_)}-f{4+G3Y0>XVEX3d?7~zgd_RR3$8K<@bK9~hq9||*CDBD)wkS- z&y9Z}h!&w`Fygp*Qdyd}F8QhT$_TVyX$i64Z}5C45}tutIbI~Uoi#DcPp0(M7Z`WJ zA^4i%>?&t5lzMd=Ilbk?;JTOEdfvQ{O7t13Ku5~}k*+j_>%mAV?$mZN%PNTDWd zvt)iM_~ss&&r6*18D^kAGOnr5O_XasoRy^lJ_eSBjxiCFF&)N8h^ffu)Xpo*8ZwmT zRC%bWVrg8%`lF*Tqk8i{Ud{h|6X#}mq;EKiIl zB-ikzxEteXA@_U`<{ORD0v3H6mVoFL-G)P^y^XOHPD`sruFNak&0EVbI3%^1VTN)d zxF@)lCL>NA zfGm_!C##{06MOWPy5xYOyXn@B`r&!9wP1HzWW)O#v@wG3q6T_9vSo5n@sB|5B6{ zXy5lb=c6GWsdKgl%CH~}xIV#}V+3-~M7$xavg;bk^hm|)$;Yqzc^yA7Oa%3MlpguO z2~qp>KS6E@`bDJo6w_L~^+2Nc6N3x&KBY}M*3p3duQ5sLb29p4pDmnaP<+z~dD$##Y8xIek}qWNP!i{DzT^SE9~I95n$mf5D{@ zothIStCaLyn*T)`cQBtY5=$YKwviUfRrxG#Vn8jvnL}KnHUP;?{bN34Df5OGHVGhH z3Uldm)yfv14%XNkXJm#1KF`d|3}eMK8_(}zteR~FKViPgp7I=QFVw1g%yq!DS3pYv zxv^C)($>mOYyAd_{Ry(NvQn7(`1nxFl643cO6sHk zmeoRocw-Y$prjtasQOn}58>~*m`JG2y-4hTw*J;6ohr8TBR)M`_vl!&+3Ajh0`TQP zc+upQ0@*ne-ioxfDKXoJU|g2TMo;&Tb4VaE)&ZLiFbThUdh&{yP)@hT+Vl^1%0HB- z@>k0CC2-V06MTpGkB*KmF4t2-EJ#)2%gbLzH~_)_Iq%I^v}a_3Zh_G>#&7y;@o?ba zMJnPw3xXA$carpfeH-K(teA;N>^mOD?i`p<5kPRDLoun#}mQAIHP z77I`0XO^SH) zT~A3hib$l9Vy~US*w5En;aj9R&1OU2Y9+<5=Iy_@ZLRV#Trq z`f3W!TV?cwxWDj2rmg>`cXM%AZZhO6orvC#Fb|fT_`V1fQ~|-KB>AEuEz<79>r$AR z{2E?ozu1eT?mUITB?Hn8uS5^+(4GDg!I()U^pYX;;x4;M@QFC?uMJWRLFZ@3aPzM9 zhhYtir;(pegYTvFJpEeZ#~5+UIZ*4G`skid*#B58)?IcVGq9Mr`sw`r4T}s`Rx%yN zuPecbHmnC$#I56!Nq}Ca22^F~>*T=WT*m6Q;ti zJ96r4T*!kk_Sau9#6`w-VJ|=i&O3BaYP=zNXUGQr`C8o&g{F1)8KancbNm3E0evm} zX*h4cI)#h7p8a=>%#C^A00LmUgERrDX<^RZH(0zFup%K*C`(;*&h5O_1#_j~#2J`v z7IkWig~oK@g+iTRw!rfdKfz<5egmZ8cyWJK}dZ4Fxwwk zDQEi&)}cC)$;o|3Ydv8%eyG@`Zl_z+$1nO}NrXKz0u&h#=mg0m-$!G$TQ5RZIwPW@ zqd$0Pmu2gY#(-o7JS^?;$Vs{bj^{b!h?*T z)Q`?5;BsIn2rY#U%oC_|2+Wm9aq2WSMepnZ>-df+Mifg@#HO30Z$O8MhPCd+`Kfu} zzoI!KN^38;s><;fkzHL~Z$R>x%lARmf)^hN2?>bK2V#=wuT}wBb9;U69&JzgdZ-~~ zV`Bqv;UqF4q1(Ku!N{W!s3;JjWv^xYU~J6per6F)DTEb(NF$TNA8*u`Kr#~jl!qst z@P)F===NJglc`c#&#Mz~xW+^NlR>f3e6=W$^aE3C=#g#1qvaC?(lb7>hTm9(S)KPZ z`;(rVn2wbizKd`JlP&&NU0T`^Bu9M=<5_urUvuclH&lF0bU*aF%{54w#rI~?qn?b} z*e1^N1=Xk<`>~H^8xge35m^J$@M)y2aWl;=J}c`bh%u1~;>ap~Wv&dDCoW5>7%G+T zu%4~%Q62V4CzYW@B|2hW*KDb_Cg9rl$Ypdti4*>uxbtbH$D`MK$4gJjnK4w6Tz%S)8h5hj_w%;##urV63!?747wUP8 zH}ASm4*LfZ^Gd7Mhs*5@YNh_N*yEWRbw5U8y?{;y3ueDtF_BOPn_Thk{H+Q(oElM0 zJB)yyv}*e6wZ7QPxSj+dcSR5&FTk30eb@Fb7B#3%uUz@N1tUT49QnbprViupxMZOR z;gF~ZJo0Cqj|k|GK-kq@?bz7*(+hhJq(5=X?`htmW7|jn#50?C|ND3*lZJ4*Re`Tc z(Z}FMY?vseRyftF^cIZ`7PkNL>;1b0RM-^a=5B!>>ZxAyzAVsaOJcAaGgYrbX`7y4 zr4P9P`ICX9r?6X>-@nr0Q#KJ!2V4%z`FTGcyLCC~xH_?x*XV0l`=L~&?08l6y)Tn% zJV)a6)1_$7*zb7oS=;Y!s$hYHoZ0C2xS(qWGPObU(p9oPg{6i(lpU?8KX zF0QSvf=m(*l|cNQ!i@w;8X(*1whVf>!}CwSyC`+vViu)NWq zmiqi(0ct@-;opfCKu8|h|5qQZB>zIC5Ud+Em~cFN{JtsfWEnb{q~O$4N|0M$1Mxp7 zwIGk@)l2Y+(O#=Drim;Z$Xk&A0XP~;Yn9E|a8LS5^<{)`BmJia!S>L`L zUTqh_vKalaC<89#GcaU@_J<%BsN;1R0Fs@grXX>wHbTN=~Xwxq_*vczfw&CXQ25wd$@SrF{+I_)bLC(wL?sA>N z@KaP|q&N5iAdNVNfiLkIs1_0qGXgNw7ZrCvTfs7;9v=K4*L|`(6APjuhX8jq3%RmX;EMZHK^YHNembl;L5y32jeBU{uau- zxo28(VgO15|qu7 z{4|&c8OuFBSisVE5pDf&#W0AOUHAbz%y`s9c?H$Fmut zlk++(0))jHxM%5fLTCN%dVd`dH4H7$m&kQIvv3!)aJ%OWbEmH7mVa!IJa19z6w!4I~s4g)$>)kP^;rZ1{Ix?QiWAT1Dl%W-J#gwP0Foui;$VKpTwz6$1J_00>rtG`H*8SCvp# zG9j-d0OeS5;8kmI;{c8-Y+8Y!DA8*}0gj~vh)V$%8U_$p>xos&Q)aBNrwt1i^h66R-!stk@F}-x*F5L4*?(6@{VNy88MU z*@&}&nEy4F(Ta!;a1?8xN1!CScXP8aEOj022kf{QV3CzL?W)7b002&{%dx6ey}Z1X z@<3tt)=dYu-5-@Mhx)@KBLa7HQgONfUQ(ppU;=D$7~=r6%7^>ATY#JDrGZ;CzXf4j zBe1@`iCht>URNKVhy@-1M3frW9S|tgYx$rT^s*^gpx_@85>gy6KC+T7E-zUD)&Usk z0{}(|!&HILFWVy;K#IVIECHuY|Bi2XE;M?Q3Ajar??jfQYv!zsfk2dq2YtXz>@)JP z0{tNh35bp+083J$UeJ+R<1S`PO+&Lel7p>LX|c3hwUBMl8Sbz-jLejN2S5%6C8{tD z0y=9jS*(|@S@Q*4jt{W|6RKA=GgWFB^4SSc08C)yE59HLY0s>ODNIedba>>tIjhik zSjHbXiB_{+0ZP4!3diHUzLw}*TT2{8a>C?+wBHy9951EK(H zHbeGqJV%xi{7w`=IA8#aVdly^K-9+o`w8?VDv-5rfeZq0%cJQZPo6&Y2OyCMfLRS% zPjL?ikh2ni-vgO75>|cxnrhfEs&fac_&3=Ve>81(Uv@eg2ULC>BP#pxLd-{}1dO*egNO{yz+; zOWLPTx%)n@t^N8;0~E|;lIE)qCD2Whr{6R_deWu%=s|Dt)CAQQ_Jg37Xb)M|zxuR= zGxGSfr|!{p!uLt`>reNkLlqRzxRVK;+ztRT)kM+Dg52~xH2w;>%QgBRjzs(qGP&xNa6XsS&ys-6V|m`eT#ui{uE$2&9*3`Dn02iBYd^ft z=<)le&C(J8S^ZG#E{1oUlJG0KJ$DkLaC=Q*%`o!lkl#o~+qW;t7*DlGutnGg|HTt% zufo*ig$|!C?HRKy%jVu2AjUQQL6Gndh9pY~B$|E{D;K<{O2g@!Tu)iQY;C$M8lb;x zBo_5a>a%7s{#x_UPcu4-Q}D7XtnJLhUIrIlKGRk3iO>3e__F?VFhTDVygYT!KZ~fO z1|FoVzzAm^JJee(lmJJyl1Z4g8jtbYxCG4f>IZ_sGjaCkdJOVV53Dl%ZYjYopN(sL z;+Yz%F)v+#9z2!EqfBzRcP_?O4aP4KHPq43J<1}hRTR@YpW+3sm935iSnwuck1>dM zghE>uoaw3`w@0Ke{?c}%bf(|{59tRmfxk<`dB z4}t;N5dFlwyBtcolQI?6>Lr#-B%CQ@ix#^N*~JG1v{_{RL1rxu6nFFz3&TmR9Pm42 z7yE4U_uK((PH73q=A9ZOp69{|6CL*CDJh>E+uku~Iuq9+GDxH1oy##;GU~e1hr4Rb z#o(jYJ|*CC^1tRJ!+#IG{7?{X++k-Qf>(7JU2C~!_reO(=f7|sg;+YUlPu=cS3_RF zI7&er=A<KG(um#3$lt=p>=q@{F=2+ z&+crGrAu2Y*%Al$T*ozB!iL+BMosv5gY*HXg8kIl89T$x6aS5E8Z@fZHqv=rr)4&% zKt*KRX~st@63lQih1jN_rPi8ScsrBx@hv+)31m1w(Pe|a$v^p!MaO#GWk_~FtU(=X zX+5Bu!sy@6DzBR7bKA+$>uG?`BmBCDSie@B$VunHH_KFF{VQ>Uf;DYPwP^m-$6*;C zH%BUMa3n`Oy2~vA;1o2FwQGe(e(-(o?8!h!DVJg~S8eje)CjTrxrxMrz{iWAD?IPy z(>w+A>%B*%CQugiEIov9w45UD&ORz%LFP|F#f-#Yh4Q763 ztTWKmWT!Jj79JR8v^NrkUfETai5|0mP2dC-{*XEpN-^GWAkoBwxKGiyQD;rt?aA7c zwR(vpNm!ZUO?~`)yOQv?MEcyFj*d!jRPu^>H_}-Y za9iy*_Qu{yPJ4_D<<>Ojc2tYmjp7saER0AvBUNQXVJ*YujLL8s)oabp9zZS%B=S2G;7`^&az;vvKtL3#{#cYDYe}2h9rVT(=(H za-t#pHC4l_)4mAEBH?fu)_d>ZE-kLixZqGLt*KMt^lQsGq_+zM;V_8KoBiC)F`1Bv z5*C&Xwf5^$KTp&ZS>nV9S4AM<$USkt!-Fb9Eq^l^0N{zJS)c!v9QyuE7Tn9)fVgZc zl5jb7hfOKt;{`|M0Tr?ot|kiBkWew?`xlpV+#&hYbC>MgCy2r<>4n!9oSr1Alt)}S zr8BnPmZZn&cfwah;n#9jg{JZjT6GTDmKW{ygNGB!9sj(psTR*V`Ng*zGA9Us;zb0T z_iOzX6rz*crb*iYTIhq9Q4RV1YD9rzj;LC@S3oBHbN|sFa9G zmx_Q$cY`3E(%o<)-QG0cwQxW0^S)o5`Ocg(XAU!a&uli_{A0y+{c16OYqUOkkzO;& z+lvchLC@`UA#(Ne){jWSVlIuMYb=Cn0@RzE+P#CyJ-W4dbiprlZ?N6lH=s@a(eob{ zm3QNeqO|UqzEap~@A(8Fp6~)6rLdi)ql&5g`S)B3K6wa>SrT1ddUI_4Yl%QOBOA+; zdKcL+UK{!k)2VcGB}x6;Mk)vE=P_luFR!MzJef^q?0sH$;KvT* zcvxOr8&N-DvjD!ngA5m=9_M(DvP4!@DW3nS^G1@n0y8LMc$7LTgf?lOC ztn`?332$IH@jb)gBEJ*YU#c+{Xe*2bBu;0-E|9`zXxZR9`X7@*TRFHuBHL*C-G}Jg z_v}R~#>{*u`NOBR!)q3rh4F%uj3cphIn82=9vD?g zZ40WG6%7$97YRD1+^UNWjqJ@CbAct%uv3%6+ro7DRxX32 zu+Y0YoY8Lq@*`8{FXn%y-zqYbsJiX}Ua@7sKtSYN=C)%EeX`A29Oyb`xILyCf@{{H z>1c`(F0OL5FhCP%vSAzm_R*WyLqefzL{2Z8)%3YY@2b)WT;Xd$>(aSB>LJ$njCj^XrXP|MP-K8T>50MRv*jeaX;BtpT3$ zJ}OEMbkkcuY4>q{Kmi-T35d%2?=~+eQ>{AWc_|GvoshZ$p-ht^w4B%1!958bHDsi&8 zoDQ&kigtMn04tP#U)Cz1TNA_{n@0f>VCDHXN^os`acJOo%eIM<*k}4l>Pu6RKhJcB z_ZnwYTzo{TJ;p$<{9=9G7KzSLqoPW_0C(lOJ(CFNPRnJt`uA6BHczIRlOEOyJXi5Z zZ|Ji>Pv-n&xu=43ueQ4{zj2(ra^(v9LHG{kQ!2g(uyq#nniD`@wY*8L)1PBp!*>pZ zYH*cWw+<0bS|`FA54&}6l?_lCkK>!e&d?0q;S9j@qWhb1wVwhEA14Qc%^@4m2u@DU zroO|!jjOEq89+9qp;(!y?MPBeyi5L%kwzV!3AFUb0F2}`+PVS~Wk7|v9G7*v)3vot z*(;DO23$5VOATmGD|AM!E~vlFOI9S!i)OAtH?27H;v_;9tqp8W)%>RNJUhCxR6Y$& zx`BK1yE&FK(5JGk{Rn{j0Kh)-1F8|e`UB2a01g@Gsggfd96Q84vs6it3bP7=qTQY5xfcDB~EXFRb0@>xBHi4)wt03lI}uG z*ngR*S#KVmy=UYdO(XPgK42ndG1Wi+CeG`hN6fcQzy9fnI+A~c#Xv&j2so7^o9odZq^IGr7?!(p=Ourk#^!(DnK9EpdfB0>n}|Ny} zz1tT#-cXR}!Zg8h3U0tty$o(-#&+CRzzQ%>>eJw9GmrT0YBpF91nAfMkPyR`!-kRr zULU*D>JtDU&7jo*Ew^;0Z}DeVi+_LW!>PXjdR@Bwf57$sMfLrk8pucepmm;g93LMK zTWQm6^>5;?s;$mkzR>yvEk5v7=z7CUtm5ii2d~OX9RDMA6&D3 zlW-Cof5b-bF><}h&6*y1`>JaBCzx`3eYU3TQWB}b4w;mL9=U5Z)n|FFcud~*_bqJi zCOW<^a|k|uZ$LUiRi&pYoLQ#1@ZI&phe%KV>Mo69#;-lBzK=-Wi*NfVu&s| zYX|{gFy%8K&cEB|`gq=k>0O!6&{=(_KS^zOtUSLaUdyJx6tZb_or_(_i`%c~-H}ng zA6eI@GZ}5qucS8_buWoaJL&TfEdqdvw| zzSVkkS?WZOS2K&b1unT>%ExA%8)SGDS&VaMZD64PoLV^xN84k*m})Ce3*M|@^QrE$ z94aQq&-70l3Vf(;vB)00acB~NC;0u2*pUp|3n0+sQd#*EL9a=>%B7^bGAjoagtw2v zHPRd&OJx>M9BgYq#2KxE?jv1nSB&CmaX6)&Iny$Bs)QbR+Em--{zIA+Mm+NI#8r7i z!xi$4_Jc~n>@V-hlJjIDTgQ{eGUZ&xRWYI@eqC;yv@c&t;WkrlXyBeaNxnaBagv+- zZ&7@zX-UoaH-@kPk_z|5j~NeT zhBX%us8#TDSNzo&E7PG*`Cb6)be3r!XM$X~IP5QBJZ4m&pqc{D@lA44-s|_GbilPa z=scH-e1=hGV1lLvS<(pr-eM1NiOKJX$&x1R0S%kvTk3ki#*mX)3V^pA?2^%Vl@CJ8 zG&BgJfyDvsBN0G?EvuFYGTs?Ecpn;SbU5V;zayH=8V_xbPzOAL3c_@_B%IcL^9X^q zRiXKiKw9yl03vPx$zvpuW$jF-P5BP~4&7mN&PV7wFATUWv<+n!}o z%QLNqDrXavhwy+;XB)Nt`8M)Mp(jl0H?jYu(We0 z@Y`UBz@}XiL<=TjAim5e5pnm>5LsF@bm18wqCxavH~Xw2@)m>L=L?Vt^Mi#Sko^nN z0{y)S$(`i{!G{8nA@Z3?0XT@5iN;ViEW-pUDE0L9oq*O31L6h736cyCI*hoXX9c!V zD`3W!!QsDFez4aXwg~@b13TAbx4H#)`#$grtMJrUpva2;^-B>bNNMo>0ev;*O|CTx zJF?LJ=0uc`(>YI2lj!N`6<{JvK*A=73P3RDT?3aNYyngoB0KZhhz*2Uf=DS)O7c4{ zUjoJ*+416mJkBxhcm>xKI$&Ph9M&=g=EqS`S^fkij)>d#a=OQ!9}U3VPNnb|Ta04e z5KGkz?966Q`F=IL9)2(~0rEWo1NIsihQWXp4n<~1jpSF25Z?(#=1hA$ zA<&B8Kvw`B zhDi)2%W6V0a0;#DN&bg57jArFB9Oxh04|~;aKsIJ5rBBkh;Dp*{A~DPDD{P3;I*~2 zLD_fY7w|GLd1M|x4w3p*VE|4_AoJR@jTA1Yy6m=hbNncex?jsW;c0`$31x7EU8-UOsC4EIl9 z7XYg#bETubI79jm3nRD7#?05_?#i4vl+TW;qL3q%>Lk0 zh`gS3#y~Y4Xs-IFXVC;h0VGs6pNau%0>iCRFZ12|_u`I@1+~tdX_^CZG%$368l;cl zeMsqAsj9A~^dKzfAl)?lEak4DLA{ckE$4kBBEWk%ME*|NT@(@kAqt2N24a*-|2XOZ zao}?zQGx=vtU(D-0wlmG?>Th+T&$mQE`DgMKLDpCj<%xZ9C-G zmqudYcfu&S4BtWY05~bOpq_`t(zneU(JLd65QMzD?N{Eve;>xKi;MUw2!+z;#Gr7H{{)A?in5YRbyzH%;ue4}95J=NNNvhFMuues^HvL;hzf7vhfTq{KD zh3aNxCNVJ;2{GCJpKd`PVfeNBJ$u4Dc`6>ktiVm7=b1f`Ypw?Rx`RStiZpgU=UewS zTuAHxCpF$_&>I|TKY(ch_0zvp2an7_*)y3pU_Jq=0Yk7cV&-M@fS8Bn_=onvwlb^& zhAnAXzUt}z{zT5}zi$D;3%L*f;2E*0XYBg|J8XSfRlD1cyK+cagJ z=GYT@PU zA}L-f&D~jNU6j_Z9O!O&ms`dKA1 zF{}0~+AKXGO!k)ZIj*uQG2DX`1w*V zJ8}LZWowo7<|91PJCmz@nfmyEXP})JG%mm(KVyAxRWoT0nd!#;?ouh%={3J*R9Oo9 zZ9j3by7y#TOf}9M!}sb~uV3nVKG(d>$Xz<33ST)YXN)EFxgZ*>zbADBEKrFk@ z@6@#hvWEp%+Nzu>6DvyG%*%h?tqJLTK}J22a*1)sX5b9ZJ0AxXnXT_%LY41)kASKV z`rF3R;-BgkrQRJgPoTcaF(2xW(liLp(*ocGnE3lc7vafG`aYI9N+8AmQL85HIJ8BJ zL1*rLkfb>&5jHY8+V<#@>7|u4&s(8p@wOxkMAoK5hPLtF-mxB+=+5+X+JVl8>Y!D? zxp&U?^H`82!_VJ5Xf(i=HOCEOu_Hh3lB^9`@~@a0C|0UdrP0b(zI^&Xq=eo4_SrG{ zj3PIGDx1&4FJEdq{i?e5II#N=K7wDKihqCne5G+agO|Lv%W#d+MdA6CzT!CQyXgfw zcg{)?2kG8pN@iyw?>~ROq3IK|$&7q=+fT_ebT@_C;&iFt^@k!VOZw@9ti__;fEW@n)C zYT>@U-i3JfRyS9C8%kIW(SqoAh_~!X%uS8{2X}0!nmiFcMnHel*Pwg(cGG6Y$!r4r zyG?2rTm4x$d;=JK}G(pLPY zA)Qrv0?uQ-SVq^5+c1q{ZSl^gREdx@e!l+U>4UH(HK|s+8!Gp%$-Pi!W435B;9yH1 zaFuBI@xMe}`F^mBj9HEXHX;xW*a)!FkgmWhe`pA`Hc09`f|m{ZVUx7q)z8b#c*7QX z5?@QsJKWYBb`1|KOxeojtq%T&cb^=8)Xqw*b>80T&Ef;s9o)Z9<7}bO4KqT#z{S&} zxrJqF3~wH$jK};6<^4AR{2f%S@HdZ+PEY^s%FZ&?qt3%nbi`>Rl*lrU9;7?vJ{8$ zZ&Wx0`@-f7BP%~u!Y74^dWe8z;xZc>u_wfjXkYyS%MLxgccn*TLCe8JaF6WC8&R!# z6LaayUcmLCt9{|+g#frm5gjwkf3iDfs%C84K@<|mImIF>Iui9T zMLv?J4U|?XkCUaGwX_bKWYF`_v9JU{1lxmDHEy+h^JaJkPP0jgC}s@xj;M0hEV4av z2^te9kf{8sD*;z+1!NQH5`%(*Xx1##k&oaqR4cG>1wqRNs;U#-sn>=bCr%+H6x36a z;5tPL8yt9=B_Mu$nEW^ZzTK^IyvMp2;{sJ^R(7^As6T`307^WYbeCDasDtj{vX*MN z!fRIfjDiT9kGI)BgU$_75)Os2f!`m9(Xq3?gT8qcLeeNJaLJ*n2RKk3%GMJnPeKIL zaGip@ya6Q3`O^qTKvR`rJ#`o9)4HBbfhG~@=Ai!s+@%((x1`6ZR8W30!mXlD1ycjk z-qiS0{)9j^sdD!{^gQ@LI}7FYHTVIj?%&b~H$Vr9V$Jv<=md!)LBa%CNjB@h@1p`1 zLV2Lykz^l(s!tH47LS_G(i2G_Q`LD7VVbWEOy?e2YB{q%Cm>+>29laOQuDfZ zbf`;4ogYBup~lOVP}-r=83)Q`DUk6<%gU}X*fg@_Qy4%C{q0dQ86Yt@qwpf)U`W_5 z4pI^ntD?%Y^ZTR5uriQ|4O-P+teFN_9iq`G=k1Gw$S5d&w`rnw@hZIB-d>fBf; znD(`ABH;Kzmv@a&U2MM@jM#(ba%~T9vxR+InTnT(Sw?k_KB;CKmaH}~2`&FPilSN! zs9v`1HJ@Q*WW<=luM3I93cG|ryYntIv=+b$jf8GoB;>2Wr+or*4)w>VS-uP!J(N*0 zVFL=oL%&+4j$8%!fAYJ| z#Z4UIhwVDJhq#Fw5Gd;70Xxt}i$NnwA?%#PpDc$whVUyt;kgO7D@5_>=7@p0P9HUr zlw38xdF~@U^ZZD8ER2ZD&F1+ouIL*x@NVA}u;cR!`-!g*L zZA{ zKmlm8tpFg)e}EPiYkrN~Bi@Id5KEO|3*=?J08cSFX!-oT*&%ue2&QU?{(EA3;f;?E z9lUBNi4iFWDpV8|h@b<21B^hvC*t7r8=fstY8{cseGElIX^@pKFVYznI=>ZD{+Orjp5DD3|rPmP_BM z#`~qG4t?=%$jE`Vc0`Xoo0Ap6Y~Ygzl`3DF!h<`<4qB!H7VgW%aGpt@aKZGd@OaQ| z-NtZkJKH3@S!zm`N@4gnxF4pCeTw-vWx%dQK699WBNr<&bo0?xLvu6tKOq-P|Ljjr zgx)RwZu?F#9?v+j2igv}WtuhfQifTWZ-s+hW0SBk=xgi;FKl3|1*^9-_>u(oiow(!JU{E=Tjz5PW;WWVQK zmL(bCesB}{^~g3sQ#L{)GWY#4Dgj#)@r$BAbZbLyMwXXleI%_>t;wGlZ!i6^V}YP%{>%FEZ?4oioc>4zs0<;Y=O4|K5;|E?!521LuKApQK;G zw9`J&AQ!cN4qj!Hh#eI?oiShUMVfGgtf1baiwXoq)sweK5rzc-j zz7y+KJ}}^IBPvd1so-Y0xkTa>{Lp@>R8eFbizVq`2pg5;7+S*AP$7F%?Gxg(8o7B?3)y6b+- z_{UUUWFEumVm^kCug}4I{u`NRy!|adZyZ_niV8B^XZDy^E$?)9VyHdAHW1G>lupck zEqjzhM{iT`uO0x^R zf#*k~cK)T&N`Bl$z|gC-iS<}Jxj-U+qMDA(lAL?6Txb?bfm_xmIm5I9sWRmbZnKogQfA3X z1=6+dSF69ib8#Hu$#c`!ACUa*Vv{pX#^*s75fp2Lzx!F>AEqp7IxnC+zB1$?nuS3B zyaN`pt~Tbw?R(qi4@9-acP|rWat9cUpJBrJ9IcmuNuM5aP^z!1r<>l??e>VU{=9?p zZgqyhp0<^4Zike4|0aH8bE#k1wJc4GZ8hu7C&st>qSob04l2xZzKB*`#8P%$eLdaJkgDyarm2$bW5>m|&C5j1v8LW*4;sZ! zSn4d9ThDtxfAP$0M)(rN!0X0MS`J(DDSnxnSyI!xY%0^ba*&n$xBRlhyR(eu6raF9h$nThVN~wCr zK`^77lk>^E?W5tY9%G&X+;J3pn+*Nb+V?Z$4+%$4UlSR+zZ2q-@BgqjZ>=)82Ix&5 zQzCyO`)*~0Xgpt%-0yKPh$ZDD=mM>;^2Hf&Ba2M8- zSoybCcDG213;Fe=ZjsO_1B3tHZ^r^qI0MXvIb_q5?nK=)iZ3)IBp~3tu9u0P2w42}%YdGSW$q%WZ(;L~rUrxaCRoFtWC zIp2Ubv9-M8XZUsKi4U~Z3A9>N@cxQ_PrRY7t9q?(JeN5Nd4lo98oVk0t{omuk^L2- zm*+9@0;d?7QgC7RKdEc||MBjTEBs&T2L2y#?ga~eT5c8x=y7nLOphxx$r6wafw$fx zVI6GI3(S6M%%q*!)aYr_TFiWi3Wk$OQHJpT(g@h$H$3v?g&Z6f2{;BPr=@N}Sx)0Io8R zYEv)NI)Q=>Tvdwm0w6D;m=gs}7&x1T&skI@zkqfr+il1G%a<>omc^|$m&YEtm)>|F z;d%g`oo0wqH2(GJIe&6flikBAYZ^g&e+a__L6vP_k*bML`7^e!mqzIjEuFQ12&KLQWw?PQO43z{d6wtp7;&7x09 zfe8VwIyejHk=~5wPzXC~3B= z3qEUMFS^vE5_3d-`Yic$w;3un7d5X4sj;7zBQF;q91tXLIKN=6sJJLz7HYt5SH1jg zBGMaAlS{*3Fr>n*?3pGVeJC(3I=O0n`4+MKganZuHPzLbkTeT&sHZ|c9K7W=9^w`iV5c0qz^Fb*KTzLfXEhQhMT~%m;-xYZ z)(0u$Z$FF|Ea79*j$`Z?jh$rm+`o?tBn)B;T_~{$SQxT#%IR8?*i&mfBk&Hb{3F*I z^k4*6r^RhKF5AmRG=}Aer|*n(nL2DX{W#=uxVNsyJtR}K8%qWTP4Q|vs;vBxf98jopW?OXR z?J-NMjePIUw44>7X`rzd0Xhs?*X8H6*5`Qxc&AD|G-s{`Z~Rd=VVx*j7@V^m(Za0x zZ#h!Jj8;`ul?cIS+v$wzDbE;)G}&v`?9wo}yR&CuAix{m>Xkb9b`H;c+7!E8Vy1-6 z8V*S<*b2>wW_#83tXeOVB33pc-|3*BJF{SII{h<%u$N}@Ggb0iE(0;(IK29rb2;gl z;gGI3%?HSRy#zcEPF9)7ohGg{)^i`;3=5>!_2N@D8fB!)>V}_ed%_&s8c}I}98M$uaJLh2$8W7E!ems|T`FRpu|?!ti~L^W-u$W(v$ zG1eI90f@K7W&m52E@}#B&NcW5a8zc2TP?1B1iE3>oTq2O4rl-;9H4BFx>MD_qauPD zzRBIL5J;- zJk$Y$S{5{p87j;~R5#y%+#uBX16b6N?-eEwCPgnC`xAtz&n8Lp_guTcxD|N)Q(<8t z7^nei)1__%$b-$gO+q)>53UJC$O8%ayUleU*mh>MoHvLPhFb?}y#fv#80B!20cxlQ z;;{YgMj+-HG#E8_GoNmG3B@oQ07ezozh*3$)1g=93SgXrMF$wdp~);Z){!AuNoL?TQy&K&j=_#m^GP8|`c za1SPivHX@?8gy?}Z_d_Of4nI#bK}`T$W(q_%0SxJk##UMQr5oS;UvXgaRb&Cy4A#9 zw?+5U!|HPRf@@3VbA}i5m)a4F40q;d>e1u$%g%iX9GHNra3!dMEj!3q1Ar4yJ$dClS{PL6-g`_H*<6!l&{zNrvwzNejAr}RFNg>4d0>2$;N__+BD!RmS2*JQ71o7| z@7!rC#itf|_U!pD{d4;1Y`X^=`6L4jUboXEPavL2I6}tz7LMmH^=sz_>NZb^b%te< z7wNsUyH-MT6p{+X2xz=82>8^G_ETrIAXfm> z>sO>WKv~#Y4)PH{w3wi1OGrsz#%^|nz z2*I?{dvF(nD)jNQJ=V0yOr-GcC5Gbw*z9(;Zh-9-a@8pYbYX3WPKM&vaTndt7`VIe z>n!)-O09@sA4PQoC!Q7qhQjpMrz0ReVt?xgGA+m%&^sm$>3r31&R+K4h3Q~9+sPhnAtel<~$5U*Oy+WART>O`d(n!8EtJd4rX;g->L$aGEACvI&TLc32c*)$4F1 z6>Ib|9aD^q(Q*JA7OFscmU2x53)u2aL?a75$~EWUT-FUu4v&LoO*|+7mfB7&fP#v} znudno`m0nA5f&@}_v3s^oz^h${dkXqeVXa|;>98)`#RT3UKXXVG2QJqqh};CoNvzJ zSY!`2aY|$CpbfFhpZS&T(v3gD+y{>pYeodJ{lXKz{IO7VH)$+dxVtTX05(%pElRO> zH}z9H;3OJp2z(uBv~RLzQX|7^ScB&-9lpy%<+#(s0g zcU2?uig=lBigs13$IXmyKOZjlD6b{9wwgC@J}PKGuQK@x>cN+=+)>4HadClCa80_H zVo`Jumc*|hfTOv;^FE|;Z@hlvvIl^&A&zZ2!(@=W41>dSKnYt=z!#X{f4B>u3Mo8o z(NuA&r#fv*&#&@=P)`Zen?D+YnS{;n$ijprgJ=b%`PGj(d#+Wd{!LK<>-=#z@-&Jwy0KYfBeHcYq>|P8>d@0I>$g_9 z?Uv<1EKF-WLy#h>P7rBYoL37kj^A}NqsN?~+D)O9aUs9uf_7rkw>|Zck^U@m!L=2! zv7EFezkJuh7d2^XyKuP69T?5BMD*e12Wk50UpcE&O<>C2C3-(T<#zgTY)|Zy%2DUj>ZLwa2k23iwX@! zkVeGLe)P^a{JjVTC!Dhr*G6Om`}|drHp`W%%1?Y4T61h7YdlNnT#}ffVM6T(jBN1e zpKiRK*mQXkSatrPc zN zHlN?_Xj1>My@c(Lt0OTpDCmLrg&6SL2;KCzH(vx$f)MZI@TBDO{JGYy z*Et0^?bXY99-0kVFbAE*H^Bb?>-!@nBpZVY4dvXxlE9D|Yc8>DPxN5(PG@P?tGT2; zn0`<^zB~H_()IBZ6=Hc_Fi+(klv-URftU^J@W3`-jTm1}NZ>+coXFNBNqX(q@Hq-@ z?Xc@tt{|-!;ZdFli~e%e-?@#qebSch-Hb`SWV+OJxjOX? z|L=FX&Sjyj#Mithn6K>ZR8j>S3!Dwo>m`(u<`w1NhOipp7^-4z(=fIws9}^7u0=0u zkdrL(X0nwmcpzo*&n7%4$~PN$ z)ojSr@n=l}nlK0z;`Z2T)ILGX_Vi~D_@|(Fs0N9itl8e^{-3w20QYbL8ldljT6+K$ zoNV2!xFdnpO}ajoj(*f#F4v>hh| z(xj;8r`4?^+8p9JY$J;p*04X+_tI=02GR^iF2XQSdZ7FoDvYwdKk9MX9;*3C`8IF2 zC@hsjMtrmOy9;>}+2(G>ujK8q8KU~LyT@!gAb(0T&on&KAK-`|&vJotP5;PP=MF7R z;1uIRZ(P`hW$sHWT9dH{g|TykUW)P27i9Y_$Za%N(K8;&Z^xJo$1YCa4h7*sa46sv zC#w6ecsnhod%9YaaI{uk@M3*6d`{NZxxzC5#FHQ^B3bhMdY!P*qic`8Nazj+kM6mk zMk9)~q3-I=__6IwxmhdSJ`9QaD^}vqM6S=sGG7yD*{HG)Rr#rioDITFi@bo{FGk0Z zq?fGvu1wZ#TwZte8%#$DfoIX*?Pp@|9}0MwJXLMyjmwbldh2T-8Tm?L+T z2F5{Uj}!!8AMnL5T6r)-zaOK-@>MQ4kXbhQqYF=78ek z-bbUWp~NxHb7lm+ve`{0vq!zx-J$xVk?+-XYa9w~+c`qs)llJhMZYNF~PXuOUYqL2+Pi}qo5XQlmqp3;W2?T<*OVo7V2_j z+>_LVmqp{X>pWo=8>L8tbF?oMigkn_8ZUE+;2Jiil%hku-3?}7YtCe-zT z^u)ltfdR<$_)PLL`U1d|;KgQjlDSyhDTU&nWqfQUzuy@7?-Mp=|Az2cuXO&0>*oJF zu??;ezW#HP2?CTKc=ws|z&yAMrG-kqc{Kmd7X*<4umPD#O{&4|umhnJkx*Wwk>(?f zfzJi{y0G$~mK+7|WH)p{v4MigzW z+?(S6{lW4K&h$wC#ICxF^>})ml{5}qkmNpov31i2Uk&@i3(-Z^1#fSJo8S5ByUofK zc>DUQ6GK8-(BniyZVZvRbaX_K+lK}TsjuKmdIC2NU_#sZ?o!Gf7*=rVWbn%LFj3#e zlP91b0r5~#ulXrV!b+N9#UW^E=kW!>;Q-kqYXQvOb;r9U$O1HF<#A);I{=x5HCaUw zF|6P@20ujy6eaqqgFVe|G>grhng_A}Kz;WuWE%P4bKp~y9jf&LQW=0@EXIO97i7L2AG zrGuLKDL(B#W*@AtYGtW%5oHxzDaxbr7Nff&k85+Pje|ar)T#1QXm8;}>Ca9#XI{qFtzx>^V~~E$)uYK*({8<5X#a|p7b+qu(J22H8dgL4J& zo%VY%Yjw4qq(9yEakzRe#wk?ZN^sx)P|Nw1TIBXnqQiv_Q=3T1?>vVFw@=W_B3l;J zRGWZF0a(L=aDOCjfh88+Z-&E?7P7R0&??C93m^`haMEdP5jR*LykR?{+DM3xM?r5m zFr4MZ#c`pHegbnrl4~8Z50S?Qez^%~4TN}w^V=lU;&sB^qBGH8-`jY~xd*-}*eB|s zG==5h6RbV+1Np7rj9}f8F!#p+9T_wty3lc=qrbl87Z})TZY}|E30gD+JkmH#o(&}I z_fvInKoNPcH9;?dL%^oi(cTU)Cp!im&UhHSg{Q9j<0;zH8oA;30wdpnRBQ#J^s*tW zh<*)laD?0e_7i+nHbe(9!&eyx4^I<=u8tFGSb z959U0KCN#Kdxk5VOCVWh!jFtiaMMV64VDS}+)w}Od8=AD`gSpCWk;TZ^-L!}6Ukl_)+$(fCOZ|Cg=flLyG z4BeHmRs<*pf!{x17w92$vR+4&+w}gBg0!?wOB6p5jZhe}hQY&bFZb|(E1a-%(;OJu za|E9r8X(}FBloMeZX3v8b`=&4o)WU*xI5s11I@=WA3mFUUP|4{l3FYj8t15}2*|fe ztzf478G%9wfbF16qQ&WZu6!-cMs{~p`wKrY zqv1L3$A+RX;n`I0AD23Y&z`0(yy&=;A*G0ONr3a+&%oRSY%W>I$i;e}#Qt*^DNp>! zyKThBS<2l~o@+F4v{`l8Ag{Jr}ljK5+IoawM$^&vXRUZf|~pC}UOYTc>t?+tVEWSZ(JSc%u}lplv#^^?Wo zpl%>Dz!E<4HLXdrs6)(^>3OHXqWEWjeNM2yY(gUuE@1cFYONPMT!u|yV^(da&{?+W zxFx{YX2TV3Fu8uO4guz}PS=%fPC?Hs9!UjI#X>|O> z@U#NAq$DKT;Ik$q*Ysfj{4?LD-${s~;{qc+SoMaQ!E*miDe-OQFhGuwYg-u>I4AiO zJ{G40>~Dw{(%10+i>RG|Y-;1L-Y;KT0x>}mJ5*@SfI`@8$O;x>5k{mx)+R-}p>hDr z1~795lf+av>rT9M6vRLsT8rSqRo1jip4ECQ-&!?QR4An0QgdKRA-5l*EOn{Nf%ijG z6rDRDWbA{!XW9sw*((2Vf1Mb!rfb7BO3b)Qbi;%{{lZ<;6(M_X2C}@6|%CZ*aqjALW=kWQBMdE zy)Zorrm+qnVK4Vip%p03-!FeDz)65>ZQsw||7Hs@#wL+%r*2np1#F6(&pH|O1(b&t zp9^Panc`y&th#0d4jgw_fm@rJ@o_)Z`$x=1tJiYSL>AY=kl0-MjwKFGk9XHsnITX5 zRBD<9rI8b2{xfQ!qiS6;f=+LhoK9oRg%*Qo&r<)8yol6-___e`kh+-@u zfi2S&z?e$IY$uJU&XEo>bLJT5h*`Cz|B4C*xJ?wDh4qdhD-hOn2$Z<|25R;PPdtty z9W8|IEklN^qglA;z&?lQf&-rDV9!Wtk|ne@L0W?x(=EA+kSZV%!5LYoJxL4wEtq{K ze00psS)pA814&U`fP!(;Ksz~*MA)JO02y?tkHU_?Zr-lRdkfkK4A2%(>;dHitEZdq z5V>L5d}J1b@g5c;-EH$N7bbwwvDsYKb%*c}y`J(fVLgRc(k7}&srwSUYLTS)dkVTq$wo z7^v8@u^Pd{+T3#TdrLG~rDUe+?r_DS#G#-wU#Q@?$$Dg&-{|I2@8<0`gJ(S%3ANKv z+0*>*KPcQAb~+tf$M$|Hzy<5Qn0}F=z5P;_IZg#P;R`${{isF46^b+3=Yqe>94c<# zy?d+A9Xj#|b5lh4v-R<|ts}$KuV?B#NtkCYOQ&SKA1SOli*wMBcDsC_l z+*KR#KD*%62XlvK?t)^f2B*>{ld(0s^Q$XcmF)Dmr(!7!pXxBQNp;9kY0j^hXP*4)<{wt86n6xvBOxXK3O^~ z(H5Qf4(HY&atKWBAHW2X4GitX$-LyNFA7hl&LwH6n#;lLxCLb~3dhly|6!4q=!r?R zUEJFXyeL=$oGvy|1I_2W&Hyj)<_n@Uvk6(=fUNn*)pl4R;lu&og~y|2dzuO% zeiI86so&N-nH2NkWOmr|Z$l>0D1H9txu~hM96J_2U!m5Ic?XD%f#QK_ibT`Hy69I5Sm(J9IU(co#FuHlZ7ivxAP$?iu8a3`OoJSWzEua)(Jl=Z4# zj_JQ1|DA_po5vQt&|#(st)le$_~<5gtEzsC^#?8;C{aoCM+~x_TwYtomN*8(|z~- z2v$SdLPtTVG(h2c%~ZM$aDWzuSOx{BBL;B@TReqg(B z`*5H83wVVEEva*!JI(IJuekemkNY4m(7%K z@qUdkpXH6#ip#OoEZpp82I#^R%0$g_(xe;fr=g<4=sl458(9%9?2^}(GT5cy0JZm2 zdrIETG(R|N!PB`IP1x*q&1R-ERm3 z1YtxXcseCrbInj9gT;m81%BFvFi`h94KhISlL=HjoFMkhG#^IpN4{$>c+nxuFq>QMU7(b3T~ zFPE;^?|#g)Xz;7n8v~jdg;v+N9w(-0`tkheXG;$_Q2BjW*wabKh$yZKp-+20t1*!X zC|F`|#jQrv4e&vQ5oAylX0{0#OH469i@=GR(gXGzwqzj;0w5okjr&lXXEkH>mx_eXhr_mrWb+lf$q85HKFL>Osee zfI%qdL2*X4W{LLzYJH_~lgkHdd&YsvVrl&@5ekcDcVQ@eQ_80L;L!JMc65 zqWIl~RX9AD-J+J~%Z>^DGVFk1J;bZ+XHGRZ#v{iu1jaWu{>w$7dguFz#O%s@#5DhQ z^I0iiAJ?c;dH%e={Ok%6Uj7f!Nq3Qkce3HjKZ?iy-Ejm3PY#ub1rENazu}@>(t^aE zcLrc5Is8hhyx|~5C+gSf(QHst^&;rS#P~H%eXAQ1+9lS;rw<%hxng?c%b|URFKN|B z!=5jOYTnlxE!Px%xLIz-j2&?h#Re8eW8Qw))nj$>xwoGYnUYnW@K2J`|-g`EOU2kjk`U;9ig!ZZRm?{bTsdZb{ZpOfc_j-|j|m ze2TPI_1h1R1te6HX7IOmM8<*(*|LArGD+Wq?`Jp9^ox948u0<~(P(FWitIg1a)j{A zK&80*wli7N_64v}$gL*ae8z1d<>!0^GE^1x>~|ZPHG`VSjZfp=+Y?h8S|nPaHq<)B zS$^-@)DtE(!GoW&t)EdBIX%|)AIpP3dSG?I3Gee1GlEu~L@K;qpH&)&n!YwvQ$MWm;oX_mAMAX~ zt&~jz)D7Pfr@0Qo@4qy4^XjXo#EBB36k9KF1OlRwB;VV5GkYtg7vIbsE2LkHGnUV} zur(d-tiH>V1@ zcIAxiNwEZBQNMaGacMV;(=zy%_yk_HJjs}ut85`VePJL^{9!o-D=X3AQPqH#Iz4Rh ztXv^FeZ(wMGQvg(4?7jo5>iRb!?WM>*3OcdW9-PWmyi@nUYFGerw$eqT}d!zfTgC+ zoI|C)*8DZ-{X=j*MR-GYJTo`lT=GFy{wnG%d?hO~sUcGF;7r)}~RMcbTq|V>oZUH7QpKv)2AgBGvr|%~Hg) z#~*}Y6Dun&5jIW>jy~bbt$5sBaqj0|UA}_2c%^rF;_-74p_P@_M=o&- zg%~RcHSQYeWkw`Ako;o+7)p(Kd8|f@c;jb^>Sf7Df^QUh!C0Ow@W}*#xpEfpYiW9DW&yB}=>X_29 zPIy%PDhzhggWN}AsPWfj`0DVB8bH|+Xp7Xk{DdHes z8C%K3Pno<^mokSX@l;km>GRSnsEA$SBtEmcBpLPskpya6)x2z)yq|T5Y%TX3JX*(H zyEJ`8uSgP`p~i4dE0gvrqkA|v{K~SRw=r}kEbl&xjK~|w^p4k318-#nzN4-+JqPF%0y58dsJUi)+_X{>hggxTocYY!24T^w)7|AN}T()}OU{A?Uf zRROk*C34<~|52Wp8v!jshK~2@eg#OXeCn)PLx<`W*duveE={y-x2(74EM8 z@W3eB8s6Gseg+w>g))KVMfT4Rz7Y{ty_npxI49G0Hf1%`H`{sohexrU?&Hsa49N%< z6=p)dDSKcXAM<V}$(G>qVUp?#_X?a)gFI z{Ek~JLOHQfCYzpb-!dVsW%cCqZeWU`mo_RmJomNht-iAr=JES_nH9%@{2E@tb}9$w zp)~pfywzjry*&&DuHsJfvo(i18@o_>czo-W+IS4-`scfyclZW4Cth$!9m;U*6vu2h z{`9zcotBi7S^Q~}-1W(;J)N*bcO`Q@~*kIGUQM^IErCYRmWY=SstF0 zJA+gL?%vQt0BtR{#s}1DUb77z}NR4mxW{ADo$PLo< z*w;VQ2h9F<(NrQl;TLN8-D;n^mu+>*tfTR1#Uh#6W45;AY9_2pxMU`;r7_r7apS*8 zV>R}Y9(0E{w#GoOi`M4W)Zt0<4-Oc!Kbt02!0mw+QWRnF|E1IVamccVlAPP@Z!#gr zv}Jlibcx5M!z*;{S^fLnYn3x+Ca6hR!xls6Pgmb`W37|*IU)7vaSSsV`Sj7zkFOq= z4F-=+tnTE75k2cVnVC@|m_8T6#mbd&d)dPO=~aQmYw6}^+D`i3Vd0*AnyH`1{Unj_ zDPgz_&+RPjki@5_iCD8vN8t(Qu9#mj2 z{^7au<>5S*GxU5~c4_J})9F-t33r9kvV(g_2FpaI{f!Aj|3JTl8=>42eSr5&hdzPCu>Qqf`KFS;CJW4)Gd-Lc?v=?c_#J#xW zOJP^`KTb`&SL3|h*N}}<|28q|lH_`SLw4|x(Zl({=3~)rT4bvO{ONq+Ib{C(%>2er z?ggCNzdGiXrmS(V&6Cr>`p)qRqcJFkPAx8m1L58o`7ON)Lw^N8vE}k4PYjb#yd6}@ zym2A6`}e8Wz8W1gDok2kco;VGvgV^P`#ztqZWhb_1MhxGzY8!EycoBk$o?tEnnqc^ zdDZS#<-S-zW<|Kr3nln~%V$TF=BZ*ZeM94vZXax$E5Z#N?>5sueJM`1IEv>ixgI>@ z&HjUa+$n$8wKDnm`wn5c+_ysHYuq?CHl`P=C5)PUuPa_8oZ3pb;p}rI!pe76$xux) z__y(LQ(az7bt0PE8kV^Cvo{XTZd%_NuZ1Q=Ouo+r$bVOT`ku4Bq!cQ=Pu>xcuq8~H zX<96(W=@C6HE7eB6W`*AC$O!chbR01Pmt83Dl!!3kFJXfGJEO$>p@8YP z*XJi160)TCOuCd>gY6FDnu2&vyuLSdGnZ~Xo+8;oSQp!N-x z**h$66f9PP@@);gcn?-9*`l={)O?!nSkuxlJN3>QX`RS5&X9VoxG%!aijtD zEulMOQqF5GD_4H=(Z6PPE_A=btRnm)ebv>I>tNN3WPDcwN9%j?Ois;kfp=PD*|M#_F58GiI{e93y!RGz@5TU0a`1?{- zYcUSWe_q>Z^Z&OG)?!U~KRDgob$4Sfa|=g&-M%#Xj`^AP*@ zV3oh$YFpsN#>8vj*bEjJ@Z=8Kq9^=dz|)-~V_g2Hg!=JgMO6p#tY5qL%JmDe0dL2azg%b4-112-iBNJ*2veEAZRjOEpDH1rAv zr9i+AnpOovw;yHJxHJIAT!uDS1Ox=FQ@u|>1P?OjEOF;2mzE+kGFVggprY;W?%vqa zf(_&hX*cxkmT1roFTuJ7^)@BQag;&Tm55P_42o6Fps@kz>*e3`QNynefU5$410fEb zRKDVmOMvpJ2AyvLLc%h~33+F?_;jO!GEge1e*1O?fNk!e-6#xtZ|I7ljaw_I6vd^a z@SFWi0?tSy5G4}Oq8R+6cbt9rBKgitoH`NR8b$no) z1vVV+5k8Z2M2r%rsb(BN(;*&PBV?D_3gkqgpma`O|0ML43I@a?3kQc9qcV()fyJMp zQ309(z*$aZ>2pI3_XIqg@i3DIzcz#u^1^qBn`ooW#$_%!GDd z#Nny{%b-q!4qi4;Bz&U54)a0=#ne0lf^gMioSdAHw$vPXaQ-}A_lFN<4xTMks4_De5rIGgnNMXh1#)k@$nrmk2V!Zm;uCUkSeQxU7Z}1 zWCZQ8*(Z`fV-v7DxFA)g2$DHv06Z9L@HOWwmJKO3^yv5nMQfzs*(?+b4a*5y?urf9 zKpwIA4Je0xTD&0&Hq632q)Ti^9K=eux3`=7qCh;v8yJVuqYBV4t*4&XGX`AE0y#1k z`RO2uISooasH6<2T$bVod5Cjd!@MclkA6kC_z>Rq1^Jfca6^uth(hfWe zf3Vx>(BN)$d%Jz5;YgtHlCG{Fzy?r*B5rnS*B^tszckUqE zt@Qn6zCzx6tCLFTmw;kD2h(G5^JWNeHwYa+=R(71NFhVn;2t7rxdX5sg8>3@X;l@C zLy>)qOEEq4*{$~eB?16hKaf@lE3mM#LXz<)1zGJfAFcZUL~gtR|At3M*i!4U;RF&x z4^PP$(bB_$io(LiZT#?oI6=U)87MnpP?a|^f&*>ON#$M5xPQ4qhyxpnIn&>^Nk)aKJ??Q^%yd6P7a=A3Wdy4BLrae^TD7)X60 zd&0-ZrwfAu1_M9`gir|cud0#+6-R$^Hnr0*K`?^iyOmTb>l+)+lZ{vhko`sF4NWbr zsg;!&AfiDFzT;r?6QRoildSvcA8#{ukl%Gh9>Rc_ikg~LQj(^!vJ%y`tM|&FO8^5@ z`P2Y=?$!lJ2qbrT3>pp(e}x>3P!>GTrzxfLDi0B;IJz2X3IrUL2teqcPfE)5Zf zk}?CW0T)d0gQe>2wn~OI$Hzy=4Om2*g zf%-%!=vSo#DFfLE5l%r`2NxhOU8&RGhFKA^mG77;iin^ ztkmxd0PrRX3e#4A*@0_s0kB2T9;DT?UcR&hJ{Pk8P-yo@%N@+E@?kfi*rp9{YY4ssAHE=#;GU=b+rKM^pmh$0s@W!rgcFsVO_STCSrDW7XRx(8*+nSb*8ct%uaT} z%FPzFhB}V`7Y|P}P>Y>*mQ4@VI+&&0SJV2~C%U>&LqR^bmA9^otOsvlQ|cBBHpluy zHQ5T>nUj-~-{Z#=n8tVS@JUEWz*Tq_N=n<97pzAE1N{vmWP+?~6nF;9U+c4=krb%P zN?yF-hlL$F&6d$hYunk`!6L+kMc>X_QX2qG zAggO?roi}2Yspf92N1WVJ$#(=&wiyloQMZ=TL^5l#^GTa&_LV$_E-mWu{cTk;~qa| zJJ<(zq_#uZ2Y3`LSOwEFGgFh37;w_wOgZX`SHw!BKxRo8!E!C4gP?ww0k|^M05et- zK**@43hJ(Fg(6R_sR`dBs?;GGYU*jIp-ji>GtHZ*N73V8@CXQ+K@yzpiGueo8H0p< zBl3*Q%*-iUfH`3wQ~1ytqMt_z7%ji-Y<3XHBnVda2LGSAYv2hj2k$z^pb5G-aoxnPuH|69v;tOUa;R zusT2_Tsf*f)yE_s*YW_T7yN9aL{r09KlAz_SKGqD%mdA@TGw%Qk{SlbVyD;9laSVgLXhXQ{2cU$kdhVPB6p*>NxD=swm9C#B1RPZR(Mn>d_!sr@ z9fF%epb&s2JArB}a_evhEkXDcePA-7ttTmnqqV~Sx(?Ocoh3k>nmaqsqu)bIWOfv< zBFjWgO})CdrUXEF=pr8gH;2A55kQ3FxUb}shwp%!WQJlqO>rg|T#yn8fDJeeV0ST3 z8~@7nXi+)2Q}XiiQUKYbB)|a&$$3T#`AlJ}cD&)U5W9d^g{|BSYXuD|HoJ>AG~guM z=?A&|1VM9ZW)1G;<>l4Q%~o)t=>FiQS%B{cD`~^(s}&qWJcIt`5?Wi2qw7Y75?V0Q z!ZMTtdUP1rXlNSC^z`Y|(y}ros0se~@naSA;z^;}4|GwyZXmKfDjTBSs8d zkrNaKWnqy6vk#eWz-R??UwSzSe%;B2zwYkt?p_CXr91|@*`=Vxi+l~# z7Exp`U{dV|LGYq|2iXTZ$^Femwr;!hsf7hRNl8hI;s{Q?A?M_zBo=Vq;3!qWwx;WH zkx@|v1Ei}3h7ssactk|4l@6mq0G(o#btk)9!wAfH>PO}kyD0SF-4 z>Rlm?vdYSKK(0iB`h^uZW^~^OpaRBUZb*)Ul{G{C%-x!?>K$P^WTcw;-oxpYcK=Of z$Bpg>t!2|Ue2Cg?5|$$Sjd3w>ChyBP&NBe(kDl;Do zCUe(MGLD`$C_J{v{yyTAMc9RvApDCe!Z|Sxd7f~OoG^0!5CI`4&$x_E(S!Q&T3mRq zES)wZ?&xWck)KISRJ?z+fOEj!fc9JZIN8f;eXLTuhc_jRVv`s0{%O48>2LV?0-Y!+ z4x3F5@pEaewAdcM1Jj&G^z7T+glZXXq9zSyn3Z!)tFKO^b%k2KV0^a2$*PW%IC8(a z^nOQkK~lN9c>Kv6iNe;WL5z*9Y#$P#$w?)OcE&d}5kw1|2 z5YDG?SfHdC5d8!^csOK6Mnb?6W_H<7HhnS-pMsFQ9zU_vmf*P-*4Rj(U$(}>WSf9h zEpBD2ea|cf<>-;?eTCc$Y`m@6Ki`+Jrwg|~_)a?Pnc+FA$ru+pmBSz+hmQlwOLM0! zyfDp|POv!LW0p$1GPPHA6+c)ehito_`Wx|PN4I}eT#e&dWsWd9b~;;mdI)6Wk-$4D8~7RIC}et)9tzPri-y(8EKUfx3e8-VaYMuWs9 z>q_$vVo%(rA$3nYGkM;G1ao@LfQVpBF5)raF&u5CccFug&a^{csEv$7s-w5n7|yVo z#a((c{*pL^*mY>!kAxy}+JYpPB{<&jT|+_1!hFU2U z-V<)Y-CYE)Qd=fx)80PdHVPE-bY~d3`Z?o6-|*B!Z?41cSyAD(-P`wGw$!O_HWX(V>3BXsGm9 ztm&#?Y_{sF>iW4%UwU3Sb{s`nqlOh@S?6yZdb-EB46qjSxcyXIqzLdyDH5ODkLS-4 z8aQ|xa_?2+@g5)9>vw03&&XqnIiD+AnB4XIF?rgJqSF|PWVn#l_#{K?T8TT~1>5>mQDf91v%Ou1%{K8KhG(1re z@ZC)6!zwLHz*^MEqS*<<)eTdgn!wE^(98X`6X^X7J2$&VlwYQ4P}To64V9GF$<>7! zlf1`z|MU%(i?)r^Rjlh-7?h^SnLgRyT|jC*?J1RR%*=gv+p#^BH>vZT-t?z!t}_~@ z{W*d7x|wIcl;b>6P@B$yKV6Us-Wr{0W5^R4H8y48m5nI6Mv>P2PD$H{AbL4Qoda{l zj`KWuOzsDK-8pMk=N;-F+$*M!7llVCa`@zzOW4vB88#O!ubCKU5_@jF4zM&n{veoF zx`MI<#jEhx{9zaqbgXdl#}K|E=jLMG(O29L<0fI;cN+V91Ift99)@*mC$7J{ER}T3 zkIIB|zvcZD{dOUda$~-#E0eW5N$}mzUKTw>f{DK?nVPV$@DdwNa&97mCnB~u#Y;GRz2Srl7kw&|x%qB~6`S)CxaYhR@DL21IOALOC z{+lH1-ze&;pY<=#`l-~{>@FpEIly9;oimIlF1?D;K*DO9Et*Pj$%-q)(c`q$DnBW@ zmp8k$Z#8&T9~iN2dv8#PTq9Lx=Ab|sB}h$2Tvtx#d0UNhxY10OxgKytX}rMvrO= zz)$O%Xc}ZoNlBr8_kS{{7w~zX{X2Qwa)It0?U%4d75Yp(v z&;bkp;&|HH+S9i1g&DWz6m+xs?h2A=WJ47YvO}`7 z41l{3s;VdQPF;X>#syl>emIKjyGWo!I$b16jAv*J!No%~#wsYNV}gT0=Qfz3_dY5? zFoY0dg(4yy1nhA3yU7(;2{#lTiNOF=ZKT4E1|gq0we{iS`Xm43*)ufeAk%nKzadxK*IZZ0Mdw5{6JKBLN2e0S$ zk8p@aAIENYwxK?6dgy#`(Vu z6N@-ER;N@TuXoWzvsV|I)AyYpe!y?-(LxRDC@j_Z$N#>*5f=xiU%=%yPm!QCqWphU z{|Gk`7(U9_(s24$k)hBE0DSJNlm69-xf+ND1n5W}fd|WZgE~Kl14;gE1l8V}m_Mu+ z!9bkrXH=(=9dNZM1fKANMt1X9jmtSZIl79g#Ds)|&cbpkaH>d@bWHtQ0Sj&mI;Mvs z1Z5(+e;17gx&Pjv!JtzoDng>i|9dr?|E082Xa#3vhZRJL@A9@sW8 zxzhgoI={^BS#HK-`DtVyEB|iDL-om49G(U|RPY2$8Sm9UD2jz&>n}Jd!*aiid`c^} z2gPJYUbCSNU7~*qPcbnnOlVr+(P@Kb7MIRb+5A6)urU>u9kb}_zb`pWc#+q+pTdgt z|Db}E|1IkwXr4`jKOv<}ep331?)8g^)~8ffx!UG6^&&R$b{v+UG~4lt@aa{Ql~wHw z1+=@)qcz^{PXAcM=pF1bL*fwnPw^iTe}`GMz2Bpa=oc8K$t)rAe*b5F3{`a4(i>U+ zI6-*Zf2pJ~uA^J}zW$%LMB~#L2qmEj-`~L_BQ4c?^DjEzf55?9zH%k~E3fv#KnJ$S z73}k8POjynt1$Q=8^uBj6_7Zk|9TM~aTElfKi`Md5xz8hs5*S->*SZ$G~N8&SePqG zNm4#@Vh=8!S6q^C5c1rlO?*Z(j24pK)~47=0(4cI0eUa9r#4E3;CrWzsQH_r2Ul=N z6nzGywNU;%xyr^LPJ|~m=6oN92A{k9_NF*wZrR3((8ttGBJ=C%|7~{8a_=#yUFj(+%lz84ec9zkL_NAL<|L_0>u_uf5jT=pn0CRTE zX*D!>SA2%OsiT9&_|YTybMQMz_-N=qlG0T7nCJ})`*|4@52~w`!IKG+P;201zyi{cu>rejkPj?$L0S04!AWp`m5yRqi&WZXV@*E?j(KVi`xzH#`6hL zQN(b^eXny0s=I99dWpt2j`R)B77rC&Wf2hp41L8(V>uVN^wr_5CDigTb#F{?)0K*n zvJu+PT~K26eEDaDm9k!DXWT5hYBM&X2hM0=EE`o2_yuq=<+h&*NFyVhWHu_RHA;9d zT(B8lrh-Bi5fnTE!Fk9Z9k@8tlAMwb1HmnenzS?wbc_O5KHv;`oscD>X(FUv&?0~# zWpQshhJI0BpV>C3YM`_&cvg`oJ_4QVy1+h>_Y<=IyvbwlV9yf&-PfvRXTm2mQ$0E? z5({%vU-XlNcw0(ggjUbU8$(_q1_^`9_wKJm0SG*WvjyGW{)%tohoUOor_Uc9b8#Vb z{r-s#)0cPY76fdQUGb_cxEZa1^_<^fh=T5X11veEr;yFg1FnL|s<{gEgGSPe) zeI|SRBv8;IVJRhixp}3IUrbDn8-8K8TFuH@xl|MxHWK}7Lm{N0LBZR{=OVkA*r?f7 z8;x{F+UL}5;qbTPK%*oAfm4`%1*lHBRr<1X<8RFS@|iq?ym$ADqq5SUKwFps?T0qI zk~$7q{=rQSa>t9zKke#n^_rzGy{W(R1H!lg57q*oqVisy3Fq(z(ml^NHa3pNE(d$5 zcy{ItK1(*1L|duC<Pi!5QQPx>u`{4G;TT;^Di-E12!BMR<7$A-BZz;P;j>l(|bj`M?gn`gaxm z36nRw0jH1M)ARNEO(R)+Ybc9U-uWDHzr7yYWkX#@?X`$w%a{pIE0DoZ8B+4j7?~y1 zd4!=ila$Q;foGZ+zf?KXGK(3t@;ieB3J0s$-ahJt7sAq2LhiIM2Iwb&?HTi{YT~5O?C%{gJXj124Cy9pT6sY!b*CU+3=y_c zvs%x(p!lu^iazv94F?~uw^%`RmbiYnF9qL^l9iB?3sccp+d35m1D*jfB1&g^Zo8g? zWWG^Qh-F(>JbEeAQ=%cyzx1_ayFwoRU%>qE6eRAah3a6mL6E@a0QlGFg+_%Y;Z*7A z>{0JNcDT)IWH|Xs*pG$Dd1biK_tFCc3W^sr%l>m$6JaDj*D%0k3bY=29x|GFWb^B; zn^KirqE^}O=UVd0D9RYVw&5~8H*~F<rj4c?hRF28ghtDcozBX31!+h zDTuNA8jdj5x3{=HGB#}Y>Ol*kxoJqp!sR56GhMjh|h{zc{J6JG`*KuiRg@Jarro61I$=KWn6<7I-J@dtgpB(-^fp_DD@8Mav7l%CD%6rp_GqVZL7WkkD zK!p+V-o;B2Z_QvgR)H z^YfvZh0-l9%q&V+!@BExMkJi&uNeY4c5L=hX7Mftv=rc5o1#zY(TaCnvtg`$y8{kX z;mgZ{0=vAW+#-KX4wL;J(>EDz1xC{EJ!)z?dwXe$4DGuU`G&2WT)v3$2T&`g}?z12WXEEM*_Tl!-YE6&jGzM8T2|&N8r4S1-5e zS7to+Wb82{mcNf-N5PlD34s}gSefmWOG5FDVyp8WOSF5bVJjETl~i3mS26{1m75$T z_V)IDlj9@u$Oc7fvS zH!Ms_yC_C8Y5JCvcF6k3=qO>7IHt6sOv!9K6W1+SKU_svSUEs(#_Wy&hfeYtS0%Fs zGqdm2V&xR4kw)h9j4$_~LyfFNu+HduLDrLb`6>d;vb<2O97YwMYBbHtR#W%Xd zsIe2jVDODB=yJQ@jQ{j0b!k}zl`)-xEN*pGPxo3fXWEyj6iB>Z|Ls{Lbmz+3G80%k zz@^m>4*|PI29iQIqTPnn$H;oSy8>!zf`wR2v6G0PgexFUo)s7v*oR}Xk=OZf_vX!; z04^99b*SO{qN33Kfxi??Yl@F?$8z_jm%EG41JhYf!$OuOp6D**`(`MF|1=@tT7KN_ z&V;0fkFW0fS12x;y`z|$E}ku^|8jv5_lelL_{23W8aHC7hSaWWTs!bQe6EfCrqGQ@ z_}-~CG8PA_x^c^P6BQq^I?SVD4i+{xm4#DSa_^d6MNeFYkwT-MvV`J;K$VwW(#5ZO z(qWO6R=sYEZ{=0iiSJmeZg30>J0$!YH}UcF_h;ttlC-rh_^>W4FGr(bBQg$RF({pH zy8aA?x_o58)bw$kw_AErvQt^Nzt2X1gSigiAwU}iKv?zT=g;@2j?AcE-wjd7;*OVcT{x|K zw+;#`*!OqFm8~iYJxC4-DA^9FWPuHZA7k84ewVs% zF2{=ziH9?5?d(@qUqT)sZBX3Ai*GOfkuj^Sq^&p<&*KuF@ybT&XkTaoLwVl@< z8U1X;<!2N zXZAwhpgomz9og7cH+4x&QWD+E+9Ua=0{p2{-do?rq0ddpT-o^pcY3 z_O*?tC%_h?^0e2E8ezxg`g>>;Q@0B46DAmSwyb{iDyNroOwo$`H$`*Nbq3U(3_5(@)WF@5|pf`aDri}f5>SspHm zOGZUPE_1FacgFUX>B>D{*i3(^@H$6J=x8Kz(^T~Mw~?~xDhiw3z36tZrAQAI3Jz=# zuJY|~UP{+1@G&tiK4W?Cb|%>BDmgttlID>KaZ6}S-&qdf52YtuT=M=kIbD0HHllN$ zk@5G5c{9LRS36B*^ib?UG;RUTzqxlCqP`Hl%-Z0Y;d;qTwWxqlb>_@B>j_(Rkik=0 zZ4Pd({|a&Ac);~;H~u9?-AA?6_kQZQ{@ix2qez=K;aT6u&3Hiy3DdAtD##=F+P-%A zTI)dp-I`9)Q0R%T6>_Zn4>0HdgimavIFRPLNs*&Rblrx=E~^A{%V{{5ct8Eg80%S5fl@ z2U&ll)7e0&;~D?Jl?f>oqbH{s|%(yq0+fAa+kZ7nRM zy^3?`*LQyft6*U|{Vi9h4_vc%e}8N5`dc@gUK@Jg9Z~XovPuXyX*zL#4%HZMhmaho6dNUd2)wnZS@|5QL zExxah1yBENCX+FL9x92HOF{VQ>3GD%?GMdvo*$_>=~WzNwOQ>#98MR%mjC?bD#>HE z&8VBx5d^m-c!<_@9VttG3qCQv6l-q8yV1jK|5eXV$*T6WR~t?GpO0w^62LIxvXokT zM87R^*ZQckv!{^sqk$ssO1k}1N^Qqq8COiCGBz7}p8vV;g&JKKXbjf`NLpwPj?G)p z!H^T@`;*J^C^yt>TyE0F7JvWV)6QLCrgu@EY4=6BW65Jy1?2+|MomHNlQ}wPNPWL2 z%EE)2@qyh8*0<^PIG>RnyHlI1HYB%;kLkTXA~m-k4fHu7I2rxJ(Dg}D#Qkfp%4eG%R=RM_-ffTMAD0T#V`AoT>5&>%DVA zCuT18WykfkAG1<-zEIUYyLJq>MCD!0pXW0dmvco%tmM|QM6sZ)I-jtE7U~i= zhp^jDPbA1Kt_yfl3!n+E}$1Xtd-+p@jFP0^vno2=3D8HI~q0Ka}`onNobrnB~|eN zi?vm!m6@xQ{R{!OR;_S$15s|Hn{KLR(2u(8%OdXyUv1I`F>7F9tPlCrcj;W*1@Tla zk9ZRm7>D^X7rj*ZwQ=Fw4yo#bzAw=$jmD>jUTMd9Dvu&RFH|U!{5mGMuoAi^HGCAe z8>H!eJcxDIfs>M}^7MD_E2e4@6%Q^}C3G_0zc$&!U3i7oUz5f+sF*Ke@&-@i7r#ql~`3$)xc6 z5#WnO!Cjcefx+YjQx)=Q`n~6L7Bde~Vl!nC756W?#_X!hAz&)ttLlBb$KA( zzd9)$hHfv$HMm;?cQ`c#lf@=;t(N4vr3KDyVoK&0@l+ZldKaZ14PcV$4sUL+DAef8?xv#HD}i$^h+=8NsxZm?N+NohCHiOTccer`M|-ML2^Xot-^Qn~GC zhkcs!b#?LT0~SA7>j~a~@K5ASnI^MCK?-$Ij7dP?KnHmoFZ07 zx9M5CR5B&&MEhmrveMkpi4=@={B!dwnpx~S1#0i=L~%rVnUz?d3wzrzTu##H@BTm~ za_01!?K=&UD3%sCt{3i1NCg548u@t0DjSVmWa@qC=q3Hg5%cP1wEL6C zr2$2!bT6m5`Fyk_bK_ukP}^(HvmR3nE>_Xfd&wqFk%X}h%(1f1u|Fsm!3^XGsu^5y ztnV8S84}OyJp7P-GlrVTJ!45QLht3RMQQ_GPYnV;_reDa-%RAA15PSVSv|=4V^_oS zpYcF3kK?BpmgpX4&8~&~1t~`3#})d3T>D$QAFhSXn)mp$j#8{^qw!(f9v_-0Ohf5KswPPnENYe-xu)bGV?E9c?n2~J;CB&T0PR=G2w6} zQ~z5CmwS=FoAogH9l5{fhwn2hfhU}&%`UmuU-fgD=wPdGHg>PqXRev|Z7n8T%pK^V zVET~VSjQ_xn}ea>R`fc-t!Zz^HZvb66{mkfu&P|;XNIqn+lt4*M%Vl7H$Rt_C9(1M zr{gZYGF=l6PjKeN+JEp)>1{oe%^iVcM)Gs#kHm3l?p)2hQI%lfV9V-x`H*aJK6*Oe z;a%3_tFBdS!KV&Eppl!)K{_LT3;Uqr$WGO)tUcCQrF?soiEZ46c#F!5+>Hxc%j}ZUp3OTa$}y^=g~4xde|zh9tyiApwGtz38781iq;uw*3&h zoOTOYVG#M&ac(k)L3`EnEk$=vC>z&x@(d=4kjMAzMM;V5ERMBRDXp552Wnp8e3&vH zIdsK~OhOQYg>{iE?uI^3(yHr~^h*lHm%oN9UG`I8)sP!cNWVmLEz6{)cyL2efmc%D zczr)T_h=wF8Cg)NXyq!&a~whE_Z1;TX3RZ)qSIx28?;j%KcVHQ=-35;-H8pWiQDXORi&WO>$bni+p>hrX+~cu|Q1 z&fcV``sVY=DE!~`#CNfO;66GwmDlk@#L0|}P7nm`7pZkIT+Htg3OQ1KAZxU*-Y9XJ zsGh$yelT+P!oh&$zl)4vUesZWhys-F{)$KoYH}w%IsO|0(B7u5JNYM<;9iECyNl0g zxz5&@i^r3;-6yXecR5yD;34vtK9$R-$z>q?>TrYQ#XJt(il#Vw@W5LOJcjwKEg*gV zv0d||&x#Dfp9`h))K&b$xk{z~|AJ9*br4lSO#K03kBb2}$CZLm@BrQ+@vN~yPdriG1yA7*l-XuR<^O>GBGi^Zf8e$?bG70%Z6|BCg+foB;=n6;?7eyQ0$pDt!^Lmxb<~^UtAeTL zys?d4KMoW$rKb~h8GLp1oA#EqmXoF;&Es6H89O;Cqe1(_nKZtrG?X=jyaF3IWvn z+D;B+F@xrIW+%DFUmlxcucN~>ZW|hR_7tk^wmn*XG_2}r$KlO)-%ER8IYaeRg0Qk6 zZd;6ABSVV@rz#|YbOHB#=45!2j;m|Ad%Qld+;|!HYoTXIaA@cWpuNHPKx1fr5`X1% zMTD6z)&q?{5Yq|*@ijmV8`pOEYVX&Iy@33j)yJ$0=XHFyeHOa*qEX5MFhD#nbzU5* zZjeJO?3VU+wH5h?YXEqj0p?($$PI$R!a~>Yj~+cLg(i8j?yHJ9P*6JsK;{;}8>cRy z1)E>MldQ}2QZ7qr7ms*veWcm0>7+YniNF;}yc|zH4>4EegJg9t!Q+DKY7sYIFuw>` zV*NQzK5I|vHvlTkHjKe(kE~3i=P33WDBQ6zSvHsM{Zaje^*4W$oq=0#K@3t~77=-@UN`Q{&2SsM* z3MHUvpoGQUyCT4mg32klF*$H!f``nz+|?c%9Ke1!BT!5)5Yn>ZeZO{`c)}0y8o^MIj2QCtgxM8FBk=VtjYHGv+dO~8pO^W3rp&;Um9-aQFiB3eJ+qu(@$pawbV)lVhRuLw_m zZy}TO4Ri8_@`V5pOFIU)1so0u?Nr%tz?-n6H26Akwhp=S}06(^tiAyv%8= zA$*Lly6LSCgBKMQ3?L&|pU@LM92l}wP#mHTCD48@g$3t!Q2QnYvR|{rf5Ucs^ZNbl z-P&Js&wz{z9KH_N>cI|&u=~)0v>h%+3{NOt&A@`b>g5A?rQ<*jgEX?F=gu;}{R)_< zfeXuEl%W0HCfS>f{r!D5K|xZk+nN%$KcYEoPZYKyx>q%9GgL^2xB~!^V`H1?KO+VG zJ=$mACji|(Z-7BbG^_D1W4HM(tb#Hia6zF&g*EPaccQTJ3hoDC5OdZZiLM4DpEJ@L z^LsST35i&L{XD)MCIeMHJ#tuHfHB8{Mr4@At)*c!g;VwA4x`jiw8z%)w1**U3+wJcdEj3y?Lv*Eh9bdJ#wO!i5Xc`vqKU;(dJkKq3H2XskvyJdr$Z+@nXV z-kiX+L%=x;nJ+CoUDDgHKpGT8s_~!>N@KWaP`a>VhI#r#dm%2FiDvJh@q-KVfnW*Aqi|&1n&feZBpRPgcdoh z3Dvf?ssgW<8R!&khz||x+7hs))ox4F;2I);O^84>&3)le$v~7#par7EiKv{=@MQ*S z6tvJMVSMG&%--St3NY$?Ct+=e78UV>C5{H(Nz33M)eE0(y5;e@NDym`r;k3?I9ZmU zKJXsJ2M;%zQKXq_47Er^kOqKAFwkZY2@+TzAtnMHa6!?DiL@!Zz#&Iwui9r{0?n$} z?T@M>Avrmm^}ExAQfJR1ey)?Vvy!W;*z!b!VBrzWjV0hL5@kI^pb=mQAW2N#-XjLN z*HHaDRp=ieY}0qzb@3w(M4xJ*Bq6VBz@{MLWhGM1Ov~D@2LiLZyA+Tj{}5teWgz+q zs1>5ifpj(g=;GJ6lF(fs3e1PCgJ}aO8K(jF8+ChuSzJYYWhmwYNjM}sJ6p;`5YPpP ztOY!l%h1*_S?qQI#2n}5rQ@^V)|%ZGp<^B{2_qf`O_*`9Nt4V)=S0dE%$xf~41=VL&S^ zDMS{Pl99o_ItpaV)37ID=Yk_f9NcfSvx`L73m>y1S62Yws|4g&r~v`(i&n%M4>TGl z7Z$>R=*cE0SJ$mpZ@@En8&iL<{vPOmy6qY90$w9o>Ic3_AhM7~~dN1q4V)ndMtSTd_4Aw*qX|y?ggC7~nM4!Y5}JKQ4P% za{>yo%bh(v?Lahl0_GfGLKaP40894|2iMsdvDQIS+jVGo9w>CcX>SFn2Mf1HU@L=T zEz8(S7wUv|`PwyXztu{d4`ei8;i3982^iiRsSRa_;~B4!EqOsN73k3>HFt4PriXX~?B-ZEveV!=#>Du>FCxH5@K#4ND!WK*tbI4y`Ce@(d=V zX|=$Gf=KD7YZwTk5H32!Y7ai?x?_olL(E!jU7i1<^pmhaQKSW?laFLyM|N2bdh6jr zJ3Ej%&#{7C(QI-Owj$CXfH@3>QyGhd1U2vrX-bL{pa=($7~SvgwY9aa>G2u|^c`JB zRd-C0g0mE?pu89U>jx!hEnlIf7*fZdegY%dXQt zK0d;*k^%TbgP=KS6~uq}m*Cd%nW3C_URCt#Ql@~%(m7CMEd}Xaz@SfqX@SD0pOgXO znS=c!d=JR!+qZAq3>TjQE^B}J?MpD-)8KBUJFFcY&2RRGt|J!(Y7nXrhXe2&$(|sn zF3@Vry{ORI;OEz3%*Yg3_E%+X0xir?h&v}iAs=CF4iEPof%x7uI?4cs=@0Q0#MHx# z`LjW30aZicSm73Zj^lAUkmsjYZEu0VGBlBG1P|P1XfyDV7^<0};NzE`(17oNsf6n^ z&3gecT?P#nD&F_f0mv4dF*dkJwESTwg;4PtH3D(WXMdZjecley(V=#-Fpxuv2$Q?J z?uZf%w0lI~gm)l_JhT);$lqOvt=xtr&p@OA^maAib|3D}kU~(blC7T(K%!|~e?W0U zWE=y26^UaYD~T)}h;YMBLY+~N?*;-X>P(GjcnE+8NbuHYA{x-qV6{hP<+P|M1-!d( z)%qco9@OGom&fRUUV0gV~Z$W0Uz*iSSKfWE5=n@td@ zA%G>ge*1PfTqUcB$Vr4mFTee`M>^TM9ac2*tzc}ctE(sDjm?^%tV>2o83YE66&yFS z_bL|J>O(X#9_weILqJe3Q1e+x^2GL{>sXQV!otQqEhbj}sr0mrj0{}f70!2AMg?nd z2%G{7sKbs0JyHx#A)dx2BR!bMdLU(>bI~8&5eOPg?hJQxf*i9r0XTVTZhkXeDLq;8uYh>ZB0Ih?-4rO%!*AeR9K^?SG6 zS{4wV1A)(;0nngbXxK|mO&tOxQ7LKZ@2xd&-&(;0B2u@qipm;HB4_;xdL*j>-%<*c zVaO(-X3o|3SC{~D7+j@p^SZQD@%r`Cnwpxckm5sO2h_h&xe6Ron2y44N>HS30v2o^ zFnRBKaiNF`D$OV5!EsTA8qoW6n32PY!m)0P8kp$lLY~Xq@0nZ;1qBXdpkP@j!^gt{ zYh{6>hT!o(c=d)G?EgM$aAepv_c0IruQ+A)lEgrxy196lWP!tfDJGqpg+Qw5==a3Y z6i7TvQ^=i^y9mx3%H2q~27*s?=s?R62(;DEClHZLrd#nLNSK25hVB_PsMScm3>5a@ zM8$)&Z9tL=i1er%LRd^p2k5d0yUb~J&fZN-Ohn;K2l(G4V5Viz{AIJKvI6h`F|A>I zA<+U35f3qV(Taq1cQyTjFnnCl=f!(bdp?xSzZ=RlywzmNYTwWDNJfrNs# z;P_cE%fMWTtFAWr7ce~cUzq+&aU-uRi3S&WhmCQI?gH-38{q61#cX|WtZZk_sQz0# zNCRT7Tw9_4f@=TgJjZo`Ua`RJv1;EL>i-D~-~8XB#L-RudHg>Va>@tYkvBoT^7}-v zuR~y!BUTJ-C$`h4F(4)~?UZl*Kj#T8VHp4SLR&m={eNy5v^K!pDP`+h03acw0)?Bo zuRsTh297aE_n}%NG&7S8VnbX~Qc`Cs!()FJ1XyI-8HhUDK}E>nsc!XB^8lre5ft6U}8o^@%x2PTX)yN>gh9W^4L$K{oSBy1| zMf}pxe`K)?2@8x%+96u#E3;CE&M*X^WIz_WKnqK?1GjGrq8D7My z2S{<>OD%?4z~G$M(V+=F#=ds6<9mb{G^`R5MmftR4TmQy2eL2HcyA3FgKWmh@7UPb zNW2Z4EkGOL_(K8z1)|&qXJx*{zvQ~|YmxpDuurX6e4ilL0Y_LA4gxET4B~T|&>0w%4meu0wZWxz0IkY-Wn2!$r{Lby z!Fj{!gN1|&aQFBK7#$f&2re%WhAmtr|6#<}e`Q&ZV&~p9z%dZ8N&LkE0}$w7FmMih z4Ult)*ja%Si=-d`oFnXe=v@n9Hz=k8#RE6Xm5GKJbn*d@Dm}~f+5GBNdTK$lhrmC& zK3?mAS|=caIjAByOCf~>*x*Rt2)b^UmX}k88gL_W1>Koc-pSj-|IR?Sx-dgpUq#&W zJ+kqr0T|G0)J?zzsTw%=KqmmynG<$3IQw%C9>^o?G1$_Gg7+05h=t@9KySuoytYOc z1XTUOrM3W>0Gu)$k<@{Zz6QA?6g)t+0QxQ{LFNpD0c+okvJF3v7LV>FS<-k_UbU)($!l$-`Q=jnl;ZL!AK< z){R@Y*o1`0AXbFf^>N zBjY8ARw>>L{a?(zc{tYZ*EV{W%yY<)Ol6iak<2QRkP;%9m06i0^E`!0LZ)P%2`TfK zd7g)m%tDAV?RDwV*dxIQoREgA zRLElu)3uIN${{hNdz)1*jyQ<#;5`a3>g&@RI#^HmAWt68o-b#?!D4CN^EtI zT7*YLc!Jdyla$m0dNo#C5^NywbkQIGmm}%v#N1Kvc8-TjH5trT=>fY7+*HoN?t(I9 zy#|*t@lkIo#2GS;rATld1G+5>J3B5ae<7+d_ksHw9PGNQ^+A{}7$5+3g+5fMU)tNR zLZJy6hUbei8e}8`DP>J}_pLW_`cU?xl4j=drGxFNL;YGGe1MUrZ)$MzS&{k%Ppxeb zUQ@#M>*T2X<^8pm#wdszMLDP>qPsRp0P(Ad5TjjKRV3V+H$1AZ23v8A|*w%@&-vbQ&~Y-GW#20b!Mqja6G zJMDi1l-XA|`8$khybOgZKB~b0C!02_K*m#VrQ8|L-}@j9_a58}OwXWfz=mfas0|XM zm^E;}A;cs?RX#saaT%d}eX#mp<(FiT%W`!!#N+6(tMU+!1+?rQZRmmXV%m)BT zr`+ily;{nhVIMChZXEfm&1r8xo>kx%@C>vPI6BS9C7KX`0jDj>>%eq`4aRKLGmBOJ zo;{61EC!5+_>|H$<3TP04$+g#!$lCq{5s}f^Hcku?t?)(u)I?c<&jVoa6(Tcn5a0v z`3UAFqtUuh`yw+Dbcly@CAV}OT{D@>mXm!l$9(N@W>&=_`#1_rb@@-`#7_O$I_|_r z;1?xs=)@{|T@1o}O-r)5?zPxsB4lf6{K z6LqSQ>N;)t9;1r)J;Ek?Px2Uiyv&y(6O2pP?F-}*#D246>Hngj`+FZ{x}mBH076`d z0_o~mryv`h0xm8xD1y+7Vm|=M`7Ya52>-+Y+foT865cE4be2^w_xGY4kGp)#RGH;K z=6ZLi)7!R1A4ymH*k6NHq_vBT8(m9vtiV?Dv##R~Ac**Orw&%KIX`((Dj!-11N zyqkW_UxRvy8nGCyp1-;_s$89L3y)+7>*cMQf2chc`(-;ckCn4! zY>e2PU1@@ZxsP}6-=&l-sZUll*MPW2}m)MZMz`E*hH)$BnossbT}VYY6L{Tl9>3aTp|w zq?OqO)iHX(%KJJ~56}AES;U)T5KQD}9O1)6Y%$^t;@k}{+md0&*tngkyEj@}Qq2AK zj0Z5hF-N2w8`|gkIXQX;-Pul3_<{UZTqlq6+;@w4aPMkmVxJm|Sq$#R>ryxl0#$njEwB6s)GLj5v>NfPqdUAUF4H0WD*2CWV z^;^-A!r2%j>ODdu&Iq2|yx;Q)FFD0?-bjN}c<1Zk zfj?>*f`1YcQbUI454Mq?t9`ipVJ32?4i~!BI)VS zX-^!C$6JdTOn!zbabJ-yk+kBcyWR&=$?mnfIP#eGXR^#ut&FalZP9umwGV2^KQJFF z6gggCvrfnJRE~`#g9-P773R&y-6e*EXU8uH-S{WPmblT+lH}nW*PUh0rrf~#E?R@} zmDTIZ|67I-#RJx%KX}~(xeR6)ahS~++%TEHl{iWQNQYtQ^%Y*1|3G+@-L*gVszE_bpJ0`9xEiLlnNle#r zyYHK69Qu86yko~|0=_K!a3+NKzU0Jc*4P&Uy)IY3vxVtIw&ImJnUNkqon087! zcydYcs$Z-M>Ki0e{iLyQajt6>pm2GO!=x8^d?4pWnk-jK$yfTec~d{1n>BcNc+y>S z>=D-H=SlvV#o)}AxomzmaK6fPzBuq-&g?1pOLXWkJP=-hLjUhW9shsiA^(%&_5W=x z!}9;gl$NY{PvVT*=N=!Gotw9++egK=;16VbE}k6ok6L9uRvP}tSGuO_;Uh~@>zHzu zOC@U~msNsyOh1Un?0{;81yUv*;#c%l zJNFtI8(-g(e>MS0tu3FlYz>Xc+T#Mx9FuNE0*xnB$|>TDp5f8^syia!OMM=YMTEw| zsf_#&Apby)WB~5+@5o;~2C%H?vmJg}S=oB^O`6cpi)_#u1J60XxVX5Fiv{Wf!mXX1 zO@QMR{LSw)IHFp=*LLoJ!vQ}`Ia1s0d9GFY>=JmIJ>a4RTdXNHV>+lzt~xlB%{Z(hCSIq2$HJ^Vl zaghcbvJ$(+Fi>35!sG3Cku8P4<0-=9hj8ZrmV*x#B+wQ?HfN~x1(?zPlZ5#v zT=SBY@tc}sTlad4HE&nif?~xW-C{0*{d0n{PoS2D9C3DA>@h*!RO z4hTbsa6n6C?}TQr?_;4fn2p|N zD#hE$u2$0<1JC?h{#cnY2E!{reP{pt5S9`-uE5P0J(ykY!sGxiTK;D-EAllN0xj)d zgBvlp=zUT9S_)0?WT?{SVPdFq8Mp{SX`CX0e~b3dua#iv4}U|Rm>RU3!=?#n(76-T zXbtMp&HLd0k+A7z`r$m(;eyd2%_o4tpn8>p06U;e2TzOJJjbt;ip@@_n-~l8M$fM` zd7x~CTTL{X%#^XT`|V3#y_0q{3QkTVf1hyM^917@FV{9WyZW`i9myM@@ zs@MnI0Z|!RQqoZM8zJK`-D(m_TD1RVld9pCT?ae7NocJg^GYM5_xb1B8RuhMiJ@I# z%7`Ea$gg~oXcbe`=E4qOsSulpnpdZ5xKcF31}-Fw(Nzn$e)A?(yuv?jJ7P@$6WvO} z>4umjVA?^iQ{ha7oF8AX-th6$fIx~c2k<1ELT?H#i-055$9CP=9T^!O#-Gc+BnkeK zZSfM?4%ah}DGW7Z zC6I55OWxBL^x(e;p%K-p)Att@l$9GrzCQCtz1Pfm-ktBRQ5QycpwfkYPrsml+QZ5; zkdygBOhBfu)|J0;;9GKR?ekyR{mdEF3R%YuR%s#uyY`mPxCY>&15o@4(Bkpt_CW9t zhCa4FM9OI{&soPS03X=3+}F|R%GZ|zsO38QFGz%N+!LUQ^6rkobk)hzrwPHe8r&2E zvM@Q!XN64KJSr+GKHh^*4r_?o^tM#M+fNE&qa=uUt5K7LGb=oqcw5h_aaIgFCE)8! zqR^e{7Mz+GADLv_??emHww~%?O&1otR!-u# z@CnFF12h#%(Y!XPH@5^kM1_T^4tKvE`Urw?D%@mKlQrIA1<+(oX?*44?7XYI2z&r38>?hP*2CqoB-`}ZWPswXGb##z` z=?U^1?YKP-*a+d=`%5@Cz2(1N3ZoTDl^xI6-`n$favG+2qn0N03TL81ng((Q>9wyu ze4q_&toQHF#eGA+2YADOX)tf8f&_BQ1|TH)?gWWGnI%$v{DTg`i$F{AITwg%DMmxE z3*Z)}HrRSJl+WDO9(?$61<^o{8^rwAxy;^w#xC>5magIL5avxS(QM(ud zzy8O#~aT$oHAJ<8ag8|my+ng4DBeVE5{9z!LTrvbdW!FmR~ zn)%)KoStp8i_~_$niVA`E&`zG(ksQ3ns}<1c+r?|y$7!b&(_hzB$14KYBTxdm7)F}-!bKBJ4{|#f+E?qZ{jNELI||9{eV5<3_{u;X zOEBjuyFQ-mx~Z4b$NC~8<9EHEy#nI-Ai4a0(FqQ+)Ly*4u0iRp?K^iaUrKyBT?WT{ z98Mh^^uCtFH2fI2nFmLG8Q-lOgWV>PogL&pY-(f#O{-Une=Gn!g_Z;}xF2!!Ix5E) z^4oP#F<0Wmn7@sR^6%^GQ`@3Ssn7){B3NYb7vj{ZgQ1PGNvcd|&(=-a^A7;%hoXVq z^3biJP8eq#{^ErfoHR*b7lrTxD9LVYVY*Rupm6i%RUp3H*s|)DzpUl?;=~K3yL(b5 zuqb~@Zrutiir@Jh_`-bW9J@`uq`8aFc>_>Z{SEM2HcZWsg}Q z3MFrgog}~2px*7{g-;hek?TQFrJqFB{8WXH_Lkyv0!3S`t6I_0j*Dk8mwXk-qtsPR ze#tQh>Y3laoi^uFNyCy^iTb60GZP3q83}5o?8)RME!@k^>+XUyN$31R?N{h{K5pU< zmat-x&Mxev#lT+Ql)U6dk5U9bl85dWRMfeb#GvOSM{_ah_rp)$ZltKefcUF4W{K9% zu`FY*C7XN+NqQe4(DH{@g0JNht^shI!0^}eqo)6CJ0UD7-`327-Secz_vPUxC8E24 z2UnfSxh=p&@Q!dysn|pIo3HsEJO|QBGK^~%b^Jk(aG1Cg%CI64Z{HG0NmZ#;1p|IQ z1s+fcYz(XdQ(eA7AoKCdrTGabDz&)wF?BT8#+-|^q6uQ>(Mc9IlCfo@UKh-G*ds^v zf?Lp1TKQD1{lu$q@3jX6Q)9|PVY0^zIv#G{<;aaHE=*rD3ga9e8Rb)cd*%KBy$<|J zJl)2N<$yY;^zMw2{>_t<=KRw5)1}`@UWTb`)U*0BtqrH5E=)#sSOh1eQ;mXpG90_`?^GBMEQ{qkV`B-(l`$5ID%q_cb*% zBte4|yu+R6CXtIg{3cV#9Uo4DG&l)@{8=fh3S>4eV(WdXyiVJGb1KwAzr(BlQmW=V z33B~rBj1KRbDp{4h`VgkkojFAd+cfTPM`1+DcQM95m6&qf7l zx4-z5UNgetJ!Nw$&&=bA$nCfN^sQ64Hp{8|zwm^dw(@XS>2pNX=y`pz(ju(iFumi- z0#iBUF9xm6^xlV^ot?m(qJg}_7k@}{cUdS&7s3JbfMgAO8z!F4oq@DNGp*GH4tz+O z?SBzs5%aDD{y_OMIR4{(dlUEeZC8%ANF00K^qU}{JxHhunpZUbGX)@r;~Qk}aMUlH zvq4oOc5%lHLHIGO^tOrJ!`$vI`F#6(XMi=-}iD8`FGPhS%XF~~=l#mONhKMU4)c#sdl zsS6A>FxFX(fH>Vq!={%W6$8G zpZfMKS?K9KV1^nAMk_~sPfSb{WY?r9Dk?%o_P}%VHC%>z#+S{QrxorqpCtP8H$G`c ziQF9}Yj=*>;8~4NGU}1Ggg{O;@vy7V=v^i3WxOp=x^dslu7$iV3Dg2xxw4t&DLbTz}xzVS6vuh z5G9R3{pkio7%?E>yZR@8OZE)@lZ#d=0a#;`;8n$4YMVv-E-@~fe*jWd#hc|^4p4ZW zV`Y_C?^IDwDJumG&;+Pg^8){~d!1tOV@4I_Q(mM}9?5sZJlnqZlD{(ZVojkNyK;y2 zzi*N@)a70Hx6_EW3|a@BU3IlWyB1BLFxr92V#^^d*8=bY$osAqe{@4__Ch0kxW=u^ ziYh6O-0q_J;CEP79zA{i)fQUZZkZOO*y~wmf5vdj^$Bqgx=%L~xXcc7k%;a6ix3)-e;ReinhAcBb<&^T!_MqhcPGH)X z&1c<8&wf8}FNH$Jd@Ym`(46$UAgZ2^7V7(61?Fh~F}w(wgu!ET2Yr-(+U=R2{tsL4 z|GytJ)P4H}q&E;DfCzpJu}0t^Tpuyd(+83WGKT;aLVXs35Y6h23XrxunvYj_ z24>i#GF7SSC)DSj2;{VtJdBhe?Q@s^= zcjY9(hy*n#*!aP`!3#Ipp(`-j4=7UL~c0G z)bowJ(#hF&Z1+IZoedYv0P_D3d3@kFfj<`+xRXa+S2a_JK=7FYEBFTJzk`0}#6J^?fZ#W+o)8FhT4eW+OuL{9`2KbpOeTOQ zPeBSH_8nfl4)N!pYKNZxQXfLq)s12ACFX2tC*jd^V0GbB$jS*cji0*2KENa@NKqk4 z)DT1EIq$|zz^px;v^rJsvrExux%_z3t&M_ncKCbu-+!^5I>vXJ>7d+XN1E-VGPizz z9`%TH;G0f4GW!PdiH+Axb{SI6*SF3Q{CHa%<4gXvh63ufH>LzxFxmr7*?k_a*WGI%_f_?U2gFV+dLUm(&?Q1*CYb-ctv%9nnq z*3f|o=MrXK@EDNq)-1Uv!U7aM10toEi|UAc{Ct`13MrH}-{O#~TLK5^zn_1l{>R51 zk+^}efJSu$WyOC%+63S|qI9>cjP={_CYpP)x4&<@K7IAsuVK?S{QAe?-nD^)0Es&2 zdC|RqOu>MU8JxuhwK+XHcc6p`%4&fjS$x2@z*=8lKk{vWMCTqmrPBMm`7e%p;J?2M zcY(?Da^RI}V^P2260g9kLQb3E$M6F`vgpzd+S*wYWdG@`rgW4)vgo$q6P3X+rPiL@ zW7Wa+xJ}@w|E~dFV+Ufv!+$b#mbOY>N>+&8nV_i~rsP+}4QsCnM%#Lg+FJ zg+i=T_2;zn@M*$hFvvSR(u6}^=p%`}SDP%sBMDQx)#H=8k_Xs-RpA&m)^)b&{Q%0@ zWHMVf6!rUSsJ#^v;40g~pq6^*woWZB2KDvbfuC?u{|p+nwtG9a;4Qq)r80x~ zBhdOzRC{F7kVB8G#DEYPk?_F5qzJSGP>%sLb{fS!7-|L4K~nc23&XmlSVXZD3==4^YimPAUi>hbqMBw*r1+6L6DMi_IG1) z?xCAo3^Td&A9kbrb;YuYH;YWB=}@yMdrd><)3F(jlQ!dxv>n7AZB=; z81O@ePS^Owx!KuT(1pO)YlM)hXKhUiWMzPK@b0fQ0bvzQhr;j!n-192CxBbRLHjs~ z)=%}bgC!u+NQZ(UJDO2;9Q?20OrQV~AR-P{xl0{QH-n4;8<-i$l?fngmUHLc&wOFL z1hOYx=LJ?YxQsnFo0%kLV+5Efkb!qYr2@Xz(-s6!NDBZClPFnPNAopcKu6d|0!-KY>lE)(QX*qDk_PC@x`^9D=I(=AUTkDLl3X_gmT z+3(sxU&p`haTqny#MI zUPz~&+&P`V_l}~n!*k`x55*$0d0O6GTDtryLtbk`K@}=q0hxaPbqv|58CVe@8N=eq zNlRnkbFVFCZlN;WtIk72;yH>f*V9t(qD$$yj| zz(4lL%Hm!e%nL)v9zl63#VI+~=>cklsuVKH~R|7fn8nAAXfWD#$&fALv zIl%zz7{aFj7sB-X_-nuiAqO@t{ACnvS`KiWVHO%<>mjNuVvp3+)Bw+d1S!4%yoHKq z3Pc1uU^=+oEzig-F0NyG`akEXg+6w9mfpW8=&y^i5(#T?pGYG64edqF-!oA$-qLNS z;+(&zI$3a+ywyMinrM;d(`iC~=vs5SE7B-0?G8rOb0r0Ya6Y}6LZ!1S#>_5xTYmz8)FE+3gJd3hhcBAFk*$x5y_EeEIYeUz&j8cH;am&bNIr7fz-VXRvx? z1uC?0GIUIKsp7nEnoQd`^4VW;pqCkX39P(%to&b;%ak3C)7PLkHKgtIbgF*~wF;L!k!;Bv^WNrs2{5FRN zu4u8&h*6qeb%9ZhkbAvDYd}is@9m9M`Dc+EDOu1=9cc29Q&4z=3zGimzn>1KTIJ>n z$v6hcu}DY(|De0Xav@L;Z4;oY)Ya4^wTW65oAsk{h=@iFOy*=@4TLHAu|COBlABx3+6{fo|@hAFW#1||Emde@|c z&RGy#gRSl*DHZrk+uW80Lm^sGM0R_ujjW2923p8NZ%vNhe1F~l$Na0vz_x*kt}@;0 zNf{K4Zi3jksq30m{Fr0G7tdCt##@g5n}GtpmQn`9k4z}N_R|di$Y`$JNW8jd>5Pn+W3ml+`XO6V)EyBTvFujI--$ z2Fuo4$ z;}+k$zwq+f-9E@3j^tK=cOwlQuqg@)Yr%H%Mw~SOO?Ct4gW;V4+YNv9>RfJSIDl-) zaCgH~|49$7!(dXzt?M~PBGwZ;TO-+K-(mrrBAK$E(~Kt;^SO&=erpjff35O@=|Ujt*824@jZ4G5Dx2?6e38phid% zhp7Ls=df6Dr5=R37(fI2qb>4bun_W}gm~ivax{Xa zL?gH3D=<0}pPHKb0(j33MM?g7e!VCcHtt312^}+9);T&VDpnn=n;|g15X5pRMPX<+ z!>~Faw?D}EmKy+9@f++}&2lFJ7(Qsf2y)nU=!;E(?87%F$LgdAIMYJyi=~E;Ale~ zz$6}cE%&{3TqD6@3JGGE2_O$q6-KiKz?eBXq;&y>SYNi*?Q+FPbYb8?251_on#7tc z4GpmZudwK*)PgKD6qu)|VWr~>0u?r@X zKkVFpJLQYb?clfR`r@?=P_}`}ju=u5VEZlf0b{Oac)09Nr!O!X^Z^S3{^(B%Rwu;r zh!?&X)v#Pa#uFuXut52d&W-Ri5Dhy1*Ymml86kyi2NK-=a6-Gtc4b5u2_=!Qvvr{o zD8gtYoG&2WFOYyB9Vh#5)f#H60|H+_qYT>yhCJ3F%aoxfWHf>RUctGj4~;swq9-u_ zje;a~PQAAAF*tMs{1rGksRt%?Unb9m#w4*I>smPG&HA1ZqvcgUhFEmoMD5I~|LQp} z?tfg}6=v0PgG?Gpqfkg69;St~4D}9yriyBMG|#l-?IoPKY9vO;Y(dC~H<57qzYZVq zIWA!dh6_lgj>yRM_0xmfAUvZ$#T~?)shJrcI^iL$aJcN5?7+zV!t{TeGJ-zl=FX0$ z4_X^|{2xzp&Xkdr@wAbW+F5w!MgEfcd6rpF*yTNg=4mu>%U6y$h)d%dk=Onn*)xvY zIvZC6NwJr7noymsP2(N3a@EetS^b>f-_MYl1>}v7G$c}0{nZza%#2vAG0h9plXJIc ztgqzvw8<+hLMLk)=pqpkWZFtqvx!j(!;9KEM z7J0XD2rL~=In|uvSa7bQdsA{E)KMbHHl9uKj!`|+&NmK@8>ft|d1P!3r_WCsi0%y> z7fL;rHQ0#7%i6?xPo4w!!!PYZ#`cRd6qE1wt=m7b&V`H1E`1a(QY6V2@CL52M)Y(h`&}Rq zIMWT{P&LIbuBY2(#)fcM4K>Q)tX^%nzxQ|0*tg_HCs;lyN$kNFf7C+tZ`SFg-6967 zJ^9)!ip{*Ux&x1@gAIk7!ilVNPI!XR%N1dsOL=PeCnQE;A*=3(`o8y=Ub30Ik2R;M zEStnKm+E>`*qBrHh4hE#PKVRZnJ@n5t$jO=f&iUTXmaXN@qDyiCv0tPks}7&;+(n8 z2I_WS6)I0FGN?Xu5TsV-%TJN zq?_$)pe1|3fQ)9weHt2y0<0&4l;2&eaKO?jCyqODLbb<5?#Z)zaNr`a68=eU*A*v&m4_d)A&D2Q=F_Sm2CvWvP{!dRD(H?PF8DSsU4O zF~ynhi~K|r#ELH%QeNOTR1nt(zUVhijFj%$yvLs(8bWhomx>gdQ?inE&1&u8uCYt) z6QSxIxCTcLb#?gKlv1u5KgN=UEMyqrb1=xtrM3QyqA4> zZP74TS;FIXmGNu3%)|xD$qI$H=d_Nm;;aYs5N&%`~olW9d z+mlu$m1A_Ro9FNtHt5hOwWF#ZK*xFhV?sL91tWYk0>%}*1$F#RA^}Z{dBwL$34E4# zqH>%%eEadJXW2R8Kd72&wIvA{jpRKpr^$8Hl-9C%V2`bwC-drIy6kLHx|+nPpy7A7 zNyncOyw%<29rhFYWPa1?;nHM!Z!GghS4>)=l?pLaxoXm+kzg($(l#Vwn2-|^3Gy@BR=(jeR2 z|D3;-Dpqx|2mCrrJLVU}ju!CT&kqL<**y}J@s163WOn`t)kP0=TqTY@YF@vn@{8T% zLNgTq@*6Dl8!;u0r{-AT)p7K=BY{SB{C!+QQ*`DpfiJr#7*NE8L5CHMHYdpmhb$BR z<}a+Frepa5{`?nu&;TF-=w25;^8E!onZ(5I1@udH!V}iH(5tFM3vzfI%xD{HB9~i~Zbb6rwUSK`MtV29R*9MTrYO01`Ax z0}rkXbXvAs|GfP}FiJ%|av0ueZl~f4c_av})O7ax03AbLl8Z}G$2UEl1Ad0j0bCA! z=p94;O*RlKW}i8+b@W~*0SJOVE*krZ^O7LONH$Nm!iORKP#B4CbXCzX?F32JHGo`} zUYV0dbTOEbojmCUY56qdija`jgU^P?V?WyY(;ZAdJQFPeH+J5hT3JEzqeUVYRjx=h z@EcU=#U*I9?np#N-boK?K5-<+HGoTU291#7F{fLLNg{`-vM`Q|58xgowgR-O7L0qg zf9$)vI5KG1M;_Yl*-x<)e)8Z*50Ckrkunw2^D0s(y2I#ukBT{uZS!l}RGk)uBBOiR zeFVKNEyk4GRNM_}H_k-0-FbZB_LE~e(!=E4F7ED_L6<6BpDaC()wypqhUIh5DqAb~ zXSdt61fxZ)8Bi6%3ewoDe&>c{h9DR_(zzh~_NGgI!jDNMkcgvssX6zG1szb_yQ9z{ z>l;MJmM9G)zepG*(G#Gli$we|%DH_&#OMndC`zCVNvIR}1;c9%v~MJVKL)r$y#daU z;jp^x#TC%rzXl0D)#vARAfY>RpaB=^y&wkx)kvvzCxFNu@Sie(%G^3{JdR_Nlj8&1 zad0pS{yiLGFdjL;@{cUkULaI2w~&TGGaxoQ0d)op&4SUn);ydPzj_$}pBn~s6=c3? z1#J#ez_4ZG=f< z?1yoh{)WL8Un}Eai}!qXw9dZE9UUFLxjfvfd6#WDO=557TNso!FQGWN+y~suUT{c& zN~Rs2PRWMeDa9Dx3FwUx5fM~^O46QQ0bjPvA=QNRH-K5r4+ERaVAQJM17x6bFa`MO zkFvRt`>G$Wc}MKW5a;G6`~p_;4WL?{IgYRo=m45?=1IuO1l1m_@=MzSR?u!ZW|J)2LeHWO!~4JRa?K|7N_sf zw*$F3C>PHH4cg+fGUX%SWtC1ibHjg2ti5M2qYf{Bt@bxH%>_Bepklol= z_!ux(P|WFBoQhDaH9KEg#X$e@@fpLM^dIg%*xnB(mh6d>JHF-37 zPN7DwXr_hzkFjW=telQT;>0Vww{#C3`tV?T9X)KL(N6pS?nhtqDI@V)5fN?U#>*To z_>5Oi{#Fy`Y$PGiPy$~3JOEvi?1VL0lO1runeDM zyw84aFx@i~Ow4d_VR?-jM}kbo@WF12fzi2qn^~Qot8pFSC5;{C%kW-&^*uIrM0JXv zqLP>1NV*VOHj2yVF&UaRpOg$TYK%BgaxI|tY*Z$v*2?+*YtS1@BiArnxY8l@Bi*ip zsc^>#hUXOF(3UpODcIz9v#5d_yc!Jj_jcC(l3NDABH@jss~Dg-jl4(KC>HdL8*M_J zFkH~la8NyK(oyRU6As`6sKuhrzgg-tUFrhgz2vyoves$& zF_r#>liep)vriZtN<^2=@tw8mSME4bnsz_mw5EdL>z7?VmG9oHw%H@m{%NB!Zk?75 zXUK1^IXK;BQM(%=<1O$e|6b0R!>A>RFuGza>NmgNldJhm>kvcO<`RZOQv>D?_hR8!`n<)4`Q0zII%Jce!IT= zww$W|(TJP7RG3-tkMKLc5@!2$JIUeKq|%L~0!j~FD#j+=DnGSPYNdFidWuWy_lb#Z z9$2io;-=c>@pb&*izd>op5?#h+ok#4-F^t`o<1#;_??OH!JC@KYR@YbLN7B&`}LM~ zyLoOL=fTyY*~h=Uci1MBSAMMzi-BGtzY3MzC~p2v*YP|)`F#b8Qv9Qw;ivIIKfMRm z<>tP<8Cc=bb(cg)%UucGq*kX^!m6r77|!METawgN?Dpow<1)yMf2_cIf9{;l)N#Kl zBIoSWH3tVL99M4SIp?3%XAG}>&G<`p)RSn5E~LtxfBerD%g{krY$P#-xO=gFE>FBv8=C7(z}-%{ z>LKOXzWXyudxh_xIjL%rT=x^hZN;$nuriM6HBU{BsJuCtuG$$Cif9|M$S!#z80{_n zvnLSdH=d-`(tPmhrp__a(X=`J?YVE~82u(2i3!uBFjgf}RA0oteCkZ45dQH%T<9^5 z_8FHm@6Vk%@gmSQX)^xoS;ECmZKm%#=<{S9s z=imBCKOVdDE-ICR0W7xnVMbG2DDDtGK`L{XQ?+@0eZ9`!r?zR|-`GpJGS|dt{Un!n zn_1bj=6i{l@)^#tGgN6lEKoWBLhF~}g@T2tE6PrR?o}dwHlKBqtDT8G8;RbUICcsM zTp6cQ(39r3c{THrdTE1i=wCjIQ)0M^@--aV>n_6u2NdrbJ6M^srBueW`8}Doln5q6 z{n_726-N+%UM9);ag%s*@}RNpRyLIkl6`wB%QeRLQXl+Y$1_8%?jhKg-=_T z2UHbUC8*XmtGzegTa0+r72-}zIEA-OxN^?S9BaQ!{=+uhcw>4vHb&;!H9`Cv15{%w zFHe17iq1RrtYM;$=!Hl{r0~tMd?|s%cQ4%{3N@g+J41NXP@dp z-h^z+&6SKQ^MsYLKB9WVuh*a@X4)UWXjfK27&o$OJG0BTWAIy~zR7w>XBBI_GhTKpWI9+fl4ZYwE;Y=jQ9R)ca1abOF| zdTv1UtWP1Pr4S0a*r*)ARlhT-qx+u$KSahOqUa(fI7P4W_N}T0g92>)Q^DB54YlBY zLM(q&W#2hZ^{mDFZ%hcda|Pu)M~`}A0?J(J$@{nJAAb7h*E&s<9REBqp!L80s1V)K z(P1)G>kH%XLR@~7`CtC=(Hs3A(on?03XhF77e}wm#f(h=H;@_|9D-lnfVwNS$N$y+ zPy)Cmvrzqfmpc4S3U)jKkgJ=(Tn`}Ag@Y*&e<5A1fOML-%>>+H-L6SQQ`Pm%6^53E3kp@)>(D)~zlz@@|@)R&S zlmuWJ$%Dbq6X~njUN$>{g)4c+i3_r5;%@E;7(O8B1r&CvkTW#+oq$%unEl{=@P+gR zJ^Tjr#c6K9GvB{Zg~L_tdX8qn@t4hZ&4XZ$7LuaDBPK=%dX5t7uU5;2oS(u9%XyJ; z1mxRiFI^fQQcr?rb_npeH|i-=Yyc-W+_q=tMK@X5**D-Lyoioo z=$ZgB?ExtIiyn5f0J-tZAN1CbkKVdjVt2U+x`Xw<>|4*5(%g7Y|9xi0X0eYGdMbL$ znc>Kz7HlS6hQq(81DAy5!i86;!2Hac1w#ZbSXcoe zbTz=Xv<+G>M$6#J9-){xjY9z004?pKotPV51sxeDW+4j$sm;Cpw zg&*nCHBb3=-INl2zv7n@|Z@v`t<2$sLh#16aUN-xbmWn`UA!B79WGWP= zPNAVdmV?c3vq51DE@Xr+o3B9e7<8Yz+N}5|?30lnW$<1hbiytAbb&<~>>VRu6=Aq5 z8Lh1OkfNmh{HkS(?g)_+AR%)LJMjG;))4(@cA!K&1>>zV zKg_`j4E$_D26bIRLCz)!XcB6Qfou!59oV%9f*!~1aNluM;1@IxpUy^`qR0htjz{NX zIE3O$C(iu>N18UnFo{NDk+Eu^n_mMID4>8)Sutz=iHf3P;l69kn>SzXJB~N-Rqe?_ z(6L%FJBpt149|Dvu3@feG>KTMb=@8}-3|Rny-vV?M z8c-=5?leg`fi`y-2uIhiW8;s(MmI&~8lX8)1grzDGO~V#;M_Jv4;Yd$(9glt@+~5) zctFXa9qPIQw2KUlJO*geF+-s~SQO)q0&%*1_w6k6c4ZU7C)cA%Qk^6O#H zsRpv?(*7}=zb9%KFRM4b;ojjDImq_85xUv50fNHg1d{Wd9a8POp4eb@75-fDYp_wed4BLCw%AO3mpU9gPKO z;JBV(G$NV7r7b4rgXyct^d}V}!3k_^0dN@{Ew~B@Bcuc=D_d}%AV=}Hk`^YWrs&3o z%ZXMV05NFApW@M!28a$qK%<;R-T2{Vnv_1*m>EA4lSi!8r{=qcg3(U%N&e77H34Y$ zeOlTEyi*TwF0DRJ;b@PCicBcLd5J-8nj&L1Sl|X-a*G(Cxo;V1@$Z1ja{?E)NWTsT zKG1UK)c5aK;k-fyOqP^ax?QPClj|UP$k46$bbh|O9yr-R>KAl=xY6YtV(*g8uGCduLxxNTT(pWewT089Eu3N~a7;@MH> ze9s&TOIAN166a+M5w(4cd-`SVg3;lGmsi=&8@IP-uh;m9&0;&`&Pa$liVM)ndK35Z zz76}L@cZf6=D5pc2X0q0@piA$LONpUvUdJDFreU^G=WVozE+L(3`lF@z|U(eyrJ{a zCaIqJN8Dw{l29LxKakYKt|Sf9f*IRrLeAYKR?=l~?0yf|E?c)E20W%VU=qO3E;-W3 zoi5!O@J&gAcR(8gMDD(;1~0IclUHGZx)9F=lP=Hloe`y82Aum<4hMMG3zR-Q$j;;7=b(RmSbyKa`|z+QVX|GD z{?rO#31iHI9sYY`;lI@i_Ghbz81AbyQV5%KPo+FvQxTJ`Sv@?%`s~8`i_R&t<6&M4 zM6Sf!y?*)v0;~z0qA}I!Z?-4tubpAA-nCMSfYcC*KQsmJN>b*x^Ru0K? z95|$QL#oyYx~h_k=}HHR%bRu+7@(l1-HO;|QQ3QpG&EnZNX4t+e9_9+I|e*GP&alb%|3=_Nh>N6 zfaBAwCoTgMKYuuIx&e1KhXfk(18~G@9I_7t?j;2!Wj!R3mc244*LU5HD|iKW7>s8n z^!4=Kr#p2uVHfPA&I~vp9Wqi1KqM(Frj7hHH~AQtPcqKVpL)p|K?a2UgaLc@cYp|v z6ar-*>ui)cH{xT%YA=b_hPCb-&G^xoYNDkFt(X zyI8d#OyW66J;4K4Kag}0!<-~aLcsB+Mc|u)d+JDDM!q>O#o=gZh6bh6xqRa%{Np1J zrslnFT%(}T`N=@f$`|Lf?sAE+vHr@iugtHJu8b#wr`>r&_#7N&nD^wQm;GHp&zs<%v{R^vPi@j^pl{Ky4@CwF`XJU=RUHHaalkKO@Pz)s-tVLIk{SmfXE-L0%@5`! zthfk+u{)etPQbykEQaj{1WbSQ1#G5d+f1H7!oZsbQ3y!u0gxzzTiHt2-0G?d7?^?g z1j!1d;HJZc#<0{ny=UOo;>rj*!=dwBz#<@!Id#atD@!9kOHQzWPlrgr!~*AHCx`xJ zIeE^kWCF%Fq585+HAyy`JI3yv4?f;Ep0XqL$EG86w70>#XOd4fB$h=vs{MMgrko*( zxtX5RAzkLt*SZvXho>C+XM!)fFOc4dikP%$$BkHmdDHvW3{I2BMh8JR9LlQPpZ6eP zd%`6QLMv(wz;1pDW(T3hPA)D&kZM6@rHnjs4-QaL1}4tkngrF5%v}eDy-88DJi}8T zR0D(GYENqD#-Zu%pFcmL672=u9~?rBLY?AukcrLOo$-V)-U}>ds14xl@(OkfQIi8Y zVLQ{od>_GTabc(+xz}7L2Q(9FP|vP^IUBYHM&&=Nu}Bdk7udAMJlK=X>Y}(lnU)96zYJE?hC65lOT?oLT13<4h#-XhBX0<4DCm&%HJm7^S6?p@(p-8 zWh*74;7o=Ca(+a>z9WUpx&ZFsAd+Reb?X*{)=xuM={A1;L@u&VmvTyJgw0;{M)0EZ zwCh(5eWrd*f7K&J!*c+yUIxJOq_It$Tu3vd*ZZCVA9)EJy)Ib%5t z%>4mMu|ROVei<9vimFH0y+Cw>F)coDkgP!%QvoZTUQ+DK7tis?uD2zF8AhkpjEM8OpY{0sHL?h)DL!B`PLpa~*eoq1)80&T;jsrjF=CuIb*+2}0ej7xub+ZHq@>R!3I z(AoWt&eZ%O&EWHZkaL29Emje=+%|&I$utg*9Ll5m^&wVHwu?2vOL;H9LH$;rdsxJE{kj~kP?~79tSe3SweU_=9Ga2!J+Ox!FjpMk*UafHn znbCgPl0E!ML7u@9|KNpUMK`X~pWfTqdzWJCg}~wlqmwT^9le46oF*l475&=ndS<~9 z(`@%2x}#1`=Kc%<&<9+SP!t2r6@J*~{Xhy2&Mcp-;icnbxpWavk}KRNo+ z>G|hmieWd7&Y>-T_+J?OiHZxJ5pc)$2qh{=5@@0!`Aw$}PdC2$P)Vq_pKqh;vo^v@d~!FL>=v9#d23vc3jS@by@N0!S?Wo0%Vo)EAUg-xj+d$ItWxBbO- zA5j49A3EjZNsM}+B`%5SQL@gLN-O#U3K!8$b~ZM#O?18QTs>Nsq;Cs@CZ@PWp93BA z;~QsN39Qw|r|h-9E}`x}98{kDX2#&cE52S&QG&ZSGh-wxCuiBq3bRd66Zy=5pe9s{ z;MZB$4@Z3vq-hUMo}{D|v*YlP+lQeAmUB9AC9!ab(4?Vyv*JQKMq8o_p3x}m{>lG|fXNcK@) z?BqJzMys7Dt&fMsBsCko1E?3mV`EY!hkiuaw^st zPtpIaLOsYmNjG8s4}k=Z>B5^*9HEe@z@>ZZcO1#*bME?+ll=m{TDn#d8%PXoAA%)E zSe^ON)kz=2!p`b9vp7i}k#*r?meh~$2O>_Mky55Y<~L&shBIihdUvLqg3nTpjT8LX zy0^Bq#R}UJoCY|EXrR(?X@2Do$d(YCAEQeEt#^Z|_5W4gm4{QgwQo^6MWLJqqLb4h zGS9PQ$h;lGmN9B8^E^Z;B_%`bLc~U-ohizcsR)_2lX;%m8DmRkzGtcL4BzkiUDx;j zxBfA``?cQpUGG}!x$paV?z9ui;IvQ(3lZBP&jxrZfHiP|cpX4sw4Z?tU^|mm@S2)| zO0iweoZH<sZ5*PnBDZy zf5__LdT(joo;i;h1+i+Cqwx+c*r$*!kmJ}V4Vx#x%`QW=q-ZUl&#+g4hmyZ=T;8wq zgry+Q)`sLQVP93DU+u<&YXbV zGlX!|)YKtnFf%d#lgfX2Ht`kL{4uX{T#??@dGSZ;Og1jy4CRanhWw6F+^|A&JPxuO z(KQ>LnP~(#5^U1MKwYGE^ol__OL{^&CP=lzkt;CL{mLj&5D^`t(z;o){#-K zMJxP}dC05*9;3R`?MtKl_gR&gT<5WGmoG#tzsbq5!O5X?G6W1r5D%rsQaSV#j~stm z|KN0oP&l+bl4kA}N8QU`LV(1|6;c!kj&=08%bk`l~4n^YpI z;WpjYTG%|x>-TDFAo*d^DO2Rp;IN499*4mvF$QMk6pHgVOEdPr9{JP2o!_43B{3li zi-W_N@6cstxDU`GUY>)==6fsD`k(u@;k&RV_#PEDvtvAa_kS;afoH@+cc?| zY%*chpI~yk_tcMEZpy@swi~GN3%RuI$>$npi5rv7krRr(pRRKxPUYB-tzLvCZ-2a_ zedJWg4ce5GXmW)%dLVZEm(`;WBmcG^yc)fmiQCYn_}C+{zb$wJQ_SkCfi*) zyK8)6!q{=@!r!j^$4)px*uzW2VpAqps7U|9#p`)uQAQW=l4Vl<4l_=?nXQ~ldpW+0 z8D{uQ(^?x5(HDxjW7Vj+Vl{t+hT@M-JbFS(GB4>6h|$?sDj6Hq@ zCnjW!L+}2d|IE!u-{S1LY^SSe^nMWrLCYbbYmFQ6tQG@J0fd&&e;i6CPpDS1uvg=# z59&ENL@qkAp*3xXT{*Qj_|4+Jl^wezxczB5#K>Edfeovr)+ zMUETT_wGCDPiLxM9~zz~$#&+`$`EWTx+=Jjbm2`XT}u1r7I?jn#k%?b)NW+*+FtEc zd6+}+*yDiMnDV=lPi+hbESt^kFBX&Y7hlSJm1*Jmh%Zb^x>VMBU@AScme3tsb(@8y zK5y1~z3 zr1|Gf>+Cx^ty%lb!wl0de}g8w*R12)pm~aevj=J1#K%pE$H_^ns&S{16R|e&n^@Jjxt$F8uJYF6*W_PsVB#D)^YqnkjZ?DbBTJZ>oLh|cP?>!c0HzGHnZjh zDNoWVC)eBS{rL!5FcCOBzM#GR7%n z4%fUNA_KK2U2ekR9|a<@{-@%ooV=~ zB|X-=!Slv5cpZ9Oo(UZFIh0`R(`&dkCUCTs;+kf0T7ji??p;SqXhTVWvz6Yux^$`U zL#ZB;>8F3(k8Zz{(f7K_7v2R}Ppipmj&!$quk~M}zhXqQ*X!2uWQ1TC#$M;!Xq2)j zC5LUzu1*T(!C{le(6GhQ&n?a2jQ%3!gj$=bE#a#NbVDa`2mP-O1;ML=wQy+s&@-jm z*mnmbMh=&VS1QFyChj;_Blhc5Y7~7Aoy7D;Y^2TF!WSFT5H4Vnozu}_P(@onf;4JA zSo0y^_XEs#xybxjhwRYUHsamrAlA6B(O2ZrDmR%H(RcW6^VyR&30_z`D~mcQ3ZEhM z^#(t$U*aruMwvc4;NIV)wP)*#xskap6_wzxLeMmDf#g4wqP+TbLJ79FSGwPDBMmD6 z*Wyu|CtO`!k$DwRw6ImG=`E-7}xI5b%GJx=`I=6Ap0N_S3668msiJq`P$_f?w|!- z$r(#kf+c%lE#cY3N=d2E09g1v|#8|+E6C2 z0}o=#X+O#9ZNx7DnMNYIO$fMFGDg zzU@*DXZ3qQwi_>Kb?e<2x`^9c676RX3;)gTMm>j+9D#NdH(U5As72~9Hcx@>W=hDy z(_+i9>YKKHMO?QrUMXE=Wik(EoY`;s#;!CMdWwbF#FJOrxSkZQp6us$wD94TBlP;6 z&MWCD4qLL^NWvSA~34`y8<>E=xr?oBWV(SEX5ieIdyRH zHBBv{ENy*0E5-VvzSasNn{mNG#rN4m{Y*DaUs3}?50Mixr9ntBFznzRCIVC@NTaJX z1xhc;D%ZvQ&;fIc;AdY{Mz}OCpU7n*$YKdwVfGD54t5W(lPaE90-Ys~Dcd!<7WZUJ zm!@pJT(`S*(Ot48o147MJ;8fR9v8doTCTv@vD2H;d)^tX39C1JOuO#vAs%++L&#bH z`jDqw+lL{=5;a@WF4rcLpXS6xp4(B?-@W7Ni+`Zct1;5?$jx5%sglQQ)`L#jmC3jE zbxI~PdG&v`=~kf~XhP+yiVM$9o2H=a<~AS1l%U zu2?EMl(%o5yga%v5he;RL{&%l-5kp^Gzl|IYSFq*T7kEM(qS|sA{Id932>c2*{=5D zteRA)JiI{395ou&z~LZ2M`U}Io@Y7w5$ripny)cfn*i-B3YHd$=+74A{X?=4WLZJI z)n4}&ZK$dm0GXmOu*g&8=H`MqY_QS*kio~ovV;GeKcyaD-=RPBcGs4do3^z03msRS zJboBMAw54Y-=9#O96#Y*74YdXw2jWl&F817GyCp+)od%RVl^V;2BKf3ECtDkbp4%=l9N_=a|mapwt z6Q0kWt-dYMmKYY^C2pH6>8mq7-x@q(FBPl#th?< za0eOkGT#$om*>jyKC(HoPqLRTM?A3^3aF~PEwOU2ub3ZtVVooJfNVr?UvVSu4Lc@P z0iG#^;JXH7=rW&d_;CdtsMd*+^&o#e4J=g=0uEfDOzU;v{lR@U3i4Q>be!t>^UNUa zxLq-41%cpnZxJ4h(kS3&_eX}Y2&DXAbaeJ(NrXgx5SQEkk6v&z+F^Eu$mRyme=7aaWavAhV7A36eCzPf7B z&BNAs)1Zv(d~tCzE2yFlooWXd4Vk_{jWke#4`KXQ@j1mxL>40;)TNgl>@2lL*koe)1Y{Nj@O)0ZQHr5?f`Y09%^a<-qRDwh zDKIb)RkeWZq?$lFQd$In6#d>K`@mWEowh{Z2XJsjb)`curw-4~nopNAY;0_-Ksk^u zu=lGp0OFTqogoh&rNhdA+d1@-MA4Gfxc6 z>9B8~fPesOk&&&?ZU%Ny&z3?LYtVB^0WJ=uOk9>JU)@ce-@N=Cpup|+IIPQl++u6> zFxX-Z0#q=P;5&<)bV}wb=pdsd7OZQSVAl=YOBE358o>F$Dm4tijzq#_E>d~X?yCUN zjBV>FBrP$_GOYz>6-}3@sg`;kif%;l z8dreuLLGy#0jnsG_}u~FWGL`9kQIeEB5*aZ1cO^}nd~~G=B@&l`vh!f016oc^TUh4 zUqDzM3%z7wWPAr)9tOtR3W%b-LD+}}$Q2+hA0JOjG-Fpz!IJKVK8EORB%|Fpu5@@+ zvwQ58Rd-%JNI*>)-qhdLeedP-u=#ZMmk8BUKcd|~wV)q=JG5X)nn`C8mJdP-pObXK+T z+L)&g;0~Y^P%M^(j(+rbR^rL{FjwDv;MdUqi||LB9v>P~G&fJkG3G&@d(c7}FP}vl z@pk=fq{9j#xszgIZJoS7D?>a6KZ%M*??Gc5HPwfwhNDbR}LPtSFE56PI`naR(GgIPZfM|GbCqdKM+Q=gk&z37fOo9f9@6W*W3FSUFdJ~|fXdoo} zd*esn4s*=>geVgZ^hB~~_EK#hR+sSqCgViZ|KSc#4H_AS;m6}yFXi4C Date: Tue, 17 Dec 2024 18:14:23 -0500 Subject: [PATCH 13/21] Type checking --- .../autogen-core/samples/adas/adas.py | 61 +++++++++++-------- .../autogen-core/samples/adas/adas_prompt.py | 58 +++++++++--------- .../autogen-core/samples/adas/utils.py | 6 +- .../samples/adas/utils_benchmark_template.py | 4 +- .../autogen-core/samples/adas/utils_drop.py | 8 +-- 5 files changed, 76 insertions(+), 61 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index e00edb81b69e..3ba1d12c07a8 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -5,7 +5,6 @@ systems. Please read the README.md for more information. """ -import argparse import asyncio import importlib import json @@ -14,23 +13,23 @@ import random import time import uuid -from collections import namedtuple +from argparse import ArgumentParser, Namespace from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass -from typing import Dict, List +from typing import Any, Callable, Dict, List, Sequence, Union import numpy as np from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt -from autogen_core import DefaultTopicId, RoutedAgent, SingleThreadedAgentRuntime, default_subscription, message_handler -from autogen_core.base import MessageContext -from autogen_core.components.models import ( +from autogen_core import DefaultTopicId, RoutedAgent, SingleThreadedAgentRuntime, default_subscription, message_handler, MessageContext +# from autogen_core.base import MessageContext +from autogen_core.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, - SystemMessage, + # SystemMessage, # SystemMessage is not allowed in o1-preview API. TODO: Accomodate o1 model UserMessage, ) -from autogen_ext.models import AzureOpenAIChatCompletionClient +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider from pydantic import BaseModel from tqdm import tqdm @@ -39,7 +38,13 @@ logging.basicConfig(level=logging.WARNING) logging.getLogger("autogen_core").setLevel(logging.DEBUG) -Info = namedtuple("Info", ["name", "author", "content", "iteration_idx"]) +@dataclass +class Info: + def __init__(self, name: str, author: str, content: str, iteration_idx: int) -> None: + self.name = name + self.author = author + self.content = content + self.iteration_idx = iteration_idx SEARCHING_MODE = True @@ -50,7 +55,7 @@ class ADASTask: class LLMMessageList(BaseModel): - llm_message_list: List[LLMMessage] + llm_message_list: Sequence[LLMMessage] @dataclass @@ -63,12 +68,12 @@ def __init__(self) -> None: pass -def generate_task(input_infos) -> str: +def generate_task(input_infos: List[Union[Info, tuple[str, str, str, int]]]) -> str: # construct input infos text input_infos_text = "" for input_info in input_infos: if isinstance(input_info, Info): - (field_name, author, content, iteration_idx) = input_info + (field_name, content, iteration_idx) = input_info.name, input_info.content, input_info.iteration_idx else: continue @@ -83,7 +88,7 @@ def generate_task(input_infos) -> str: return prompt -def evaluate_forward_fn(arguments, forward_str): +def evaluate_forward_fn(arguments: Namespace, forward_str: str) -> List[float]: # Dynamically import benchmark-specific module given the path to the python file. # File must contain load_dataset and compute_metrics functions print(f"Loading functions from {arguments.benchmark_specific_utils_file}") @@ -93,19 +98,20 @@ def evaluate_forward_fn(arguments, forward_str): # dynamically define forward() # modified from https://github.com/luchris429/DiscoPOP/blob/main/scripts/launch_evo.py - namespace = {} + namespace: Dict[str, Callable[[str, str], str]] = {} print(f"forward str {forward_str}") exec(forward_str, globals(), namespace) - names = list(namespace.keys()) + names: List[str] = list(namespace.keys()) if len(names) != 1: raise AssertionError(f"{len(names)} things in namespace. Please only provide 1") - func = namespace[names[0]] + func: Callable[[str, str], str] = namespace[names[0]] if not callable(func): raise AssertionError(f"{func} is not callable") AgentSystem.forward = func # set seed 0 for valid set - examples = module.load_dataset(arguments.data_filename)[1:-1] # first one and the last one is for few-shot examples + # first one and the last one is for few-shot examples + examples: List[Dict[str, Any]] = module.load_dataset(arguments.data_filename)[1:-1] random.seed(arguments.shuffle_seed) random.shuffle(examples) @@ -114,8 +120,8 @@ def evaluate_forward_fn(arguments, forward_str): else: examples = examples[arguments.valid_size : arguments.valid_size + arguments.test_size] * arguments.n_repeat - questions = [example["inputs"] for example in examples] - answers = [example["targets"] for example in examples] + questions: List[str] = [example["inputs"] for example in examples] + answers: List[Any] = [example["targets"] for example in examples] print(f"problem length: {len(examples)}") max_workers = min(len(examples), arguments.max_workers) if arguments.multiprocessing else 1 @@ -125,12 +131,12 @@ def evaluate_forward_fn(arguments, forward_str): taskInfo = Info("task", "User", q, -1) task_queue.append((taskInfo, AgentSystem())) - def call_forward(agent_task_queue): + def call_forward(agent_task_queue: List[tuple[Info, AgentSystem]]) -> str: taskInfo, agent = agent_task_queue print(f"taskInfo {taskInfo}") task = generate_task([taskInfo]) - result = agent.forward(task, arguments.base_agent_model_config) + result: str = agent.forward(task, arguments.base_agent_model_config) if arguments.thread_sleep: print(f"Sleeping for {arguments.thread_sleep}") time.sleep(arguments.thread_sleep) @@ -139,7 +145,7 @@ def call_forward(agent_task_queue): with ThreadPoolExecutor(max_workers=max_workers) as executor: results = list(tqdm(executor.map(call_forward, task_queue), total=len(task_queue))) - acc_list = module.compute_metrics(results, answers) + acc_list: List[float] = module.compute_metrics(results, answers) print(f"f1: {bootstrap_confidence_interval(acc_list)}") return acc_list @@ -149,7 +155,12 @@ def call_forward(agent_task_queue): class ADASAgent(RoutedAgent): """An agent that performs ADAS.""" - def __init__(self, model_client: ChatCompletionClient, system_prompt: str, args, archive) -> None: + def __init__(self, + model_client: ChatCompletionClient, + system_prompt: str, + args: Namespace, + archive: List[Dict[str, str]] = [{}] + ) -> None: super().__init__("An agent searching agent.") self._args = args self._archive = archive @@ -327,7 +338,7 @@ async def handle_adas_task(self, message: ADASTask, ctx: MessageContext) -> None json.dump(archive, json_file, indent=4) -async def main(arguments) -> None: +async def main(arguments: Namespace) -> None: token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") # Create an AzureOpenAI model client. client = AzureOpenAIChatCompletionClient( @@ -367,7 +378,7 @@ async def main(arguments) -> None: if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run ADAS") + parser = ArgumentParser(description="Run ADAS") parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.") parser.add_argument("--data_filename", type=str, default="dataset/drop_v0_dev.jsonl.gz") parser.add_argument("--valid_size", type=int, default=128) diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index edf5123bbc21..eec15243804c 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -1,7 +1,13 @@ +""" +ADAS helper to generate prompt for ADAS meta-agent. +""" +# pyright: basic import json import requests from github import Github +from github.Repository import Repository +from typing import List, Dict, Union EXAMPLE = { "thought": "**Insights:**\nYour insights on what should be the next interesting agent.\n**Overall Idea:**\nyour reasoning and the overall concept behind the agent design.\n**Implementation:**\ndescribe the implementation step by step.", @@ -13,7 +19,7 @@ } -def read_github_file(url): +def read_github_file(url: str) -> Union[str, None]: response = requests.get(url) if response.status_code == 200: return response.text @@ -21,10 +27,10 @@ def read_github_file(url): return None -def print_repo_contents(repo, path="", indent=""): +def print_repo_contents(repo: Repository, path: str = "", indent: str = "") -> List[str]: contents = repo.get_contents(path) documentation = [] - for content_file in contents: + for content_file in contents: # pyright: ignore [reportGeneralTypeIssues] if content_file.type == "dir": documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) else: @@ -35,7 +41,7 @@ def print_repo_contents(repo, path="", indent=""): return documentation -def get_autogen_documentation(): +def get_autogen_documentation() -> List[str]: repo_name = "microsoft/autogen" directory_name = "python/packages/autogen-core/docs/src/user-guide/core-user-guide" g = Github() @@ -63,15 +69,14 @@ def get_autogen_documentation(): import json from dataclasses import dataclass import sys - from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription - from autogen_core.base import AgentId, AgentRuntime, MessageContext - from autogen_core.components.models import ( + from autogen_core import AgentId, AgentRuntime, MessageContext, SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription + from autogen_core.models import ( ChatCompletionClient, LLMMessage, SystemMessage, UserMessage, ) - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from typing import List from azure.identity import DefaultAzureCredential, get_bearer_token_provider @@ -79,6 +84,7 @@ def get_autogen_documentation(): # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], @@ -181,16 +187,15 @@ async def output_result(_agent: ClosureContext, message: FinalResult, ctx: Messa import json from dataclasses import dataclass import sys - from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription - from autogen_core.base import AgentId, AgentRuntime, MessageContext - from autogen_core.components.models import ( + from autogen_core import AgentId, AgentRuntime, MessageContext, SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, message_handler, ClosureAgent, ClosureContext, DefaultSubscription + from autogen_core.models import ( ChatCompletionClient, LLMMessage, SystemMessage, UserMessage, ) from typing import List - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") @@ -342,16 +347,15 @@ async def main(): import uuid from dataclasses import dataclass from typing import Dict, List, Union - from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime - from autogen_core import SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, TypeSubscription, DefaultSubscription, ClosureAgent, ClosureContext, message_handler, default_subscription - from autogen_core.components.models import ( + from autogen_core import MessageContext, TopicId, AgentId, AgentRuntime, SingleThreadedAgentRuntime, DefaultTopicId, RoutedAgent, TypeSubscription, DefaultSubscription, ClosureAgent, ClosureContext, message_handler, default_subscription + from autogen_core.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, SystemMessage, UserMessage, ) - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") @@ -634,16 +638,15 @@ async def output_result(_agent: ClosureContext, message: WritingResult, ctx: Mes import uuid from dataclasses import dataclass from typing import Dict, List, Union - from autogen_core.base import MessageContext, TopicId, AgentId, AgentRuntime - from autogen_core import SingleThreadedAgentRuntime, RoutedAgent, default_subscription, message_handler, TypeSubscription, ClosureAgent, ClosureContext, DefaultTopicId - from autogen_core.components.models import ( + from autogen_core import MessageContext, TopicId, AgentId, AgentRuntime, SingleThreadedAgentRuntime, RoutedAgent, default_subscription, message_handler, TypeSubscription, ClosureAgent, ClosureContext, DefaultTopicId + from autogen_core.models import ( AssistantMessage, ChatCompletionClient, LLMMessage, SystemMessage, UserMessage, ) - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") @@ -887,16 +890,15 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon import logging from dataclasses import dataclass from typing import List, Dict, Any - from autogen_core import SingleThreadedAgentRuntime, default_subscription, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription, DefaultTopicId - from autogen_core.base import AgentId, AgentRuntime, MessageContext, TopicId - from autogen_core.components.models import ( + from autogen_core import AgentId, AgentRuntime, MessageContext, TopicId, SingleThreadedAgentRuntime, default_subscription, RoutedAgent, message_handler, ClosureAgent, ClosureContext, TypeSubscription, DefaultTopicId + from autogen_core.models import ( ChatCompletionClient, SystemMessage, UserMessage, AssistantMessage, LLMMessage, ) - from autogen_ext.models import AzureOpenAIChatCompletionClient + from autogen_ext.models.openai import AzureOpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider from autogen_core.application.logging import TRACE_LOGGER_NAME @@ -1692,7 +1694,7 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon """ -def get_init_archive(): +def get_init_archive() -> List[Dict[str, str]]: return [ COT, COT_SC, @@ -1703,7 +1705,7 @@ def get_init_archive(): # from typing import tuple -def get_prompt(current_archive, adaptive=False) -> tuple[str, str]: +def get_prompt(current_archive: List[Dict[str, str]]) -> tuple[str, str]: archive_str = ",\n".join([json.dumps(sol) for sol in current_archive]) archive_str = f"[{archive_str}]" prompt = base.replace("[ARCHIVE]", archive_str) @@ -1711,8 +1713,8 @@ def get_prompt(current_archive, adaptive=False) -> tuple[str, str]: prompt = prompt.replace("[DOCUMENTATION]", json.dumps(DOCUMENTATION)) return system_prompt, prompt - -def get_reflexion_prompt(prev_example) -> tuple[str, str, str, str]: +from typing import Dict +def get_reflexion_prompt(prev_example: Dict[str, str]) -> tuple[str, str, str, str]: prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" r1 = ( Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) diff --git a/python/packages/autogen-core/samples/adas/utils.py b/python/packages/autogen-core/samples/adas/utils.py index 1a7591286474..c413fe566a0b 100644 --- a/python/packages/autogen-core/samples/adas/utils.py +++ b/python/packages/autogen-core/samples/adas/utils.py @@ -1,11 +1,13 @@ """ Benchmark-agnostic utilities """ +# pyright: basic import random import string import numpy as np +from typing import List def random_id(length=4): @@ -14,7 +16,7 @@ def random_id(length=4): return random_id -def bootstrap_confidence_interval(data, num_bootstrap_samples=100000, confidence_level=0.95): +def bootstrap_confidence_interval(data: List[float], num_bootstrap_samples=100000, confidence_level=0.95) -> str: """ Calculate the bootstrap confidence interval for the mean of 1D accuracy data. Also returns the median of the bootstrap means. @@ -28,7 +30,7 @@ def bootstrap_confidence_interval(data, num_bootstrap_samples=100000, confidence - str: Formatted string with 95% confidence interval and median as percentages with one decimal place. """ # Convert data to a numpy array for easier manipulation - data = np.array(data) + data = np.array(data) # pyright: ignore # List to store the means of bootstrap samples bootstrap_means = [] diff --git a/python/packages/autogen-core/samples/adas/utils_benchmark_template.py b/python/packages/autogen-core/samples/adas/utils_benchmark_template.py index 04cf33e694da..e9c548689fb9 100644 --- a/python/packages/autogen-core/samples/adas/utils_benchmark_template.py +++ b/python/packages/autogen-core/samples/adas/utils_benchmark_template.py @@ -6,7 +6,7 @@ from typing import Any, Dict, List -def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: +def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: # pyright: ignore """ Calculates the score based on a list of predictions and labels. @@ -21,7 +21,7 @@ def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: pass -def load_dataset(file_path: str) -> List[Dict[str, Any]]: +def load_dataset(file_path: str) -> List[Dict[str, Any]]: # pyright: ignore """ Loads in a dataset, with both input and targets, based on a file path. Any preprocessing, such as adding few-shot examples, must be done in this function. diff --git a/python/packages/autogen-core/samples/adas/utils_drop.py b/python/packages/autogen-core/samples/adas/utils_drop.py index a92fdaca830e..5783e0ba3f4d 100644 --- a/python/packages/autogen-core/samples/adas/utils_drop.py +++ b/python/packages/autogen-core/samples/adas/utils_drop.py @@ -6,10 +6,10 @@ Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner https://arxiv.org/abs/1903.00161 """ +# pyright: basic import gzip import json -import random import re import string from typing import Any, Dict, List, Set, Tuple, Union @@ -85,7 +85,7 @@ def _answer_to_bags(answer: Union[str, List[str], Tuple[str, ...]]) -> Tuple[Lis return normalized_spans, token_bags -def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: +def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]): """ Takes gold and predicted answer sets and first finds the optimal 1-1 alignment between them and gets maximum metric values over all the answers. @@ -99,7 +99,7 @@ def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: max_scores = np.zeros([max(len(gold), len(predicted))]) for row, column in zip(row_ind, col_ind, strict=False): - max_scores[row] = max(max_scores[row], scores[row, column]) + max_scores[row] = max(max_scores[row], scores[row, column]) # pyright: ignore return max_scores @@ -152,7 +152,7 @@ def get_drop_metrics( f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) f1 = np.mean(f1_per_bag) f1 = round(f1, 2) - return exact_match, f1 + return exact_match, f1 # pyright: ignore def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: From 296daf74c9942865de1a469d96df0b45564d7f37 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Wed, 18 Dec 2024 17:51:47 -0500 Subject: [PATCH 14/21] Format; lint --- .../autogen-core/samples/adas/adas.py | 44 ++++++++++++------- .../autogen-core/samples/adas/adas_prompt.py | 9 ++-- .../autogen-core/samples/adas/utils.py | 2 +- .../samples/adas/utils_benchmark_template.py | 4 +- .../autogen-core/samples/adas/utils_drop.py | 4 +- 5 files changed, 38 insertions(+), 25 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index 3ba1d12c07a8..fa7ac7dd89cb 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -5,6 +5,7 @@ systems. Please read the README.md for more information. """ +# pyright: basic import asyncio import importlib import json @@ -20,7 +21,15 @@ import numpy as np from adas_prompt import get_init_archive, get_prompt, get_reflexion_prompt -from autogen_core import DefaultTopicId, RoutedAgent, SingleThreadedAgentRuntime, default_subscription, message_handler, MessageContext +from autogen_core import ( + DefaultTopicId, + MessageContext, + RoutedAgent, + SingleThreadedAgentRuntime, + default_subscription, + message_handler, +) + # from autogen_core.base import MessageContext from autogen_core.models import ( AssistantMessage, @@ -38,6 +47,7 @@ logging.basicConfig(level=logging.WARNING) logging.getLogger("autogen_core").setLevel(logging.DEBUG) + @dataclass class Info: def __init__(self, name: str, author: str, content: str, iteration_idx: int) -> None: @@ -46,6 +56,7 @@ def __init__(self, name: str, author: str, content: str, iteration_idx: int) -> self.content = content self.iteration_idx = iteration_idx + SEARCHING_MODE = True @@ -68,7 +79,7 @@ def __init__(self) -> None: pass -def generate_task(input_infos: List[Union[Info, tuple[str, str, str, int]]]) -> str: +def generate_task(input_infos: List[Union[Info, Any]]) -> str: # construct input infos text input_infos_text = "" for input_info in input_infos: @@ -92,8 +103,8 @@ def evaluate_forward_fn(arguments: Namespace, forward_str: str) -> List[float]: # Dynamically import benchmark-specific module given the path to the python file. # File must contain load_dataset and compute_metrics functions print(f"Loading functions from {arguments.benchmark_specific_utils_file}") - spec = importlib.util.spec_from_file_location("module_name", arguments.benchmark_specific_utils_file) - module = importlib.util.module_from_spec(spec) + spec = importlib.util.spec_from_file_location("module_name", arguments.benchmark_specific_utils_file) # pyright: ignore reportAttributeAccessIssue + module = importlib.util.module_from_spec(spec) # pyright: ignore reportAttributeAccessIssue spec.loader.exec_module(module) # dynamically define forward() @@ -107,11 +118,11 @@ def evaluate_forward_fn(arguments: Namespace, forward_str: str) -> List[float]: func: Callable[[str, str], str] = namespace[names[0]] if not callable(func): raise AssertionError(f"{func} is not callable") - AgentSystem.forward = func + AgentSystem.forward = func # pyright: ignore reportAttributeAccessIssue # set seed 0 for valid set # first one and the last one is for few-shot examples - examples: List[Dict[str, Any]] = module.load_dataset(arguments.data_filename)[1:-1] + examples: List[Dict[str, Any]] = list(module.load_dataset(arguments.data_filename)[1:-1]) random.seed(arguments.shuffle_seed) random.shuffle(examples) @@ -136,7 +147,7 @@ def call_forward(agent_task_queue: List[tuple[Info, AgentSystem]]) -> str: print(f"taskInfo {taskInfo}") task = generate_task([taskInfo]) - result: str = agent.forward(task, arguments.base_agent_model_config) + result: str = agent.forward(task, arguments.base_agent_model_config) # pyright: ignore reportAttributeAccessIssue if arguments.thread_sleep: print(f"Sleeping for {arguments.thread_sleep}") time.sleep(arguments.thread_sleep) @@ -155,19 +166,20 @@ def call_forward(agent_task_queue: List[tuple[Info, AgentSystem]]) -> str: class ADASAgent(RoutedAgent): """An agent that performs ADAS.""" - def __init__(self, - model_client: ChatCompletionClient, - system_prompt: str, - args: Namespace, - archive: List[Dict[str, str]] = [{}] - ) -> None: + def __init__( + self, + model_client: ChatCompletionClient, + system_prompt: str, + args: Namespace, + archive: List[Dict[str, str]] = [{}], + ) -> None: super().__init__("An agent searching agent.") self._args = args - self._archive = archive + self._archive = archive if archive else [{}] self._model_client = model_client self._session_memory: Dict[str, List[ADASTask]] = {} - self._system_messages: List[LLMMessage] = [ + self._system_messages: Sequence[LLMMessage] = [ # SystemMessage is not allowed in o1-preview API. TODO: Accomodate o1 model # SystemMessage( AssistantMessage( @@ -182,7 +194,7 @@ def __init__(self, async def handle_task(self, message: LLMMessageList, ctx: MessageContext) -> LLMResponse: print("Meta-Agent making a LLM call...") logging.info(f"{self._description} received message: {message}") - model_result = await self._model_client.create(self._system_messages + message.llm_message_list) + model_result = await self._model_client.create(self._system_messages + message.llm_message_list) # pyright: ignore reportAttributeAccessIssue assert isinstance(model_result.content, str) print(f"Model client result: {model_result.content}") diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index eec15243804c..f88b50c464fc 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -1,13 +1,14 @@ """ ADAS helper to generate prompt for ADAS meta-agent. """ + # pyright: basic import json +from typing import Dict, List, Union import requests from github import Github from github.Repository import Repository -from typing import List, Dict, Union EXAMPLE = { "thought": "**Insights:**\nYour insights on what should be the next interesting agent.\n**Overall Idea:**\nyour reasoning and the overall concept behind the agent design.\n**Implementation:**\ndescribe the implementation step by step.", @@ -30,7 +31,7 @@ def read_github_file(url: str) -> Union[str, None]: def print_repo_contents(repo: Repository, path: str = "", indent: str = "") -> List[str]: contents = repo.get_contents(path) documentation = [] - for content_file in contents: # pyright: ignore [reportGeneralTypeIssues] + for content_file in contents: # pyright: ignore [reportGeneralTypeIssues] if content_file.type == "dir": documentation.extend(print_repo_contents(repo, content_file.path, indent + "│ ")) else: @@ -1713,8 +1714,8 @@ def get_prompt(current_archive: List[Dict[str, str]]) -> tuple[str, str]: prompt = prompt.replace("[DOCUMENTATION]", json.dumps(DOCUMENTATION)) return system_prompt, prompt -from typing import Dict -def get_reflexion_prompt(prev_example: Dict[str, str]) -> tuple[str, str, str, str]: + +def get_reflexion_prompt(prev_example: Union[Dict[str, str], None]) -> tuple[str, str, str, str]: prev_example_str = "Here is the previous agent you tried:\n" + json.dumps(prev_example) + "\n\n" r1 = ( Reflexion_prompt_1.replace("[EXAMPLE]", prev_example_str) diff --git a/python/packages/autogen-core/samples/adas/utils.py b/python/packages/autogen-core/samples/adas/utils.py index c413fe566a0b..f53b99bbaa5e 100644 --- a/python/packages/autogen-core/samples/adas/utils.py +++ b/python/packages/autogen-core/samples/adas/utils.py @@ -30,7 +30,7 @@ def bootstrap_confidence_interval(data: List[float], num_bootstrap_samples=10000 - str: Formatted string with 95% confidence interval and median as percentages with one decimal place. """ # Convert data to a numpy array for easier manipulation - data = np.array(data) # pyright: ignore + data = np.array(data) # pyright: ignore # List to store the means of bootstrap samples bootstrap_means = [] diff --git a/python/packages/autogen-core/samples/adas/utils_benchmark_template.py b/python/packages/autogen-core/samples/adas/utils_benchmark_template.py index e9c548689fb9..a862fe3f0bde 100644 --- a/python/packages/autogen-core/samples/adas/utils_benchmark_template.py +++ b/python/packages/autogen-core/samples/adas/utils_benchmark_template.py @@ -6,7 +6,7 @@ from typing import Any, Dict, List -def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: # pyright: ignore +def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: # pyright: ignore """ Calculates the score based on a list of predictions and labels. @@ -21,7 +21,7 @@ def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: # pass -def load_dataset(file_path: str) -> List[Dict[str, Any]]: # pyright: ignore +def load_dataset(file_path: str) -> List[Dict[str, Any]]: # pyright: ignore """ Loads in a dataset, with both input and targets, based on a file path. Any preprocessing, such as adding few-shot examples, must be done in this function. diff --git a/python/packages/autogen-core/samples/adas/utils_drop.py b/python/packages/autogen-core/samples/adas/utils_drop.py index 5783e0ba3f4d..8619c4ddd797 100644 --- a/python/packages/autogen-core/samples/adas/utils_drop.py +++ b/python/packages/autogen-core/samples/adas/utils_drop.py @@ -99,7 +99,7 @@ def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]): max_scores = np.zeros([max(len(gold), len(predicted))]) for row, column in zip(row_ind, col_ind, strict=False): - max_scores[row] = max(max_scores[row], scores[row, column]) # pyright: ignore + max_scores[row] = max(max_scores[row], scores[row, column]) # pyright: ignore return max_scores @@ -152,7 +152,7 @@ def get_drop_metrics( f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) f1 = np.mean(f1_per_bag) f1 = round(f1, 2) - return exact_match, f1 # pyright: ignore + return exact_match, f1 # pyright: ignore def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: From 34cc06750cf44f8454f61c201700089e8b3ad233 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Wed, 18 Dec 2024 17:56:53 -0500 Subject: [PATCH 15/21] Add NOTICE --- python/packages/autogen-core/samples/adas/NOTICE | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 python/packages/autogen-core/samples/adas/NOTICE diff --git a/python/packages/autogen-core/samples/adas/NOTICE b/python/packages/autogen-core/samples/adas/NOTICE new file mode 100644 index 000000000000..4c2e92bd93f9 --- /dev/null +++ b/python/packages/autogen-core/samples/adas/NOTICE @@ -0,0 +1,10 @@ +Work was adapted from the work here https://github.com/ShengranHu/ADAS/tree/main, which is under Apache 2.0 license. + +Reference: +@article{hu2024ADAS, +title={Automated Design of Agentic Systems}, +author={Hu, Shengran and Lu, Cong and Clune, Jeff}, +journal={arXiv preprint arXiv:2408.08435}, +year={2024} +} + From dc2fe0afa3ca0378e94fe0ec9b29c2c537756243 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Wed, 18 Dec 2024 18:16:57 -0500 Subject: [PATCH 16/21] Update --- python/packages/autogen-core/samples/adas/NOTICE | 2 -- python/packages/autogen-core/samples/adas/adas.py | 2 +- python/packages/autogen-core/samples/adas/adas_prompt.py | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/NOTICE b/python/packages/autogen-core/samples/adas/NOTICE index 4c2e92bd93f9..5ffe8c5308f3 100644 --- a/python/packages/autogen-core/samples/adas/NOTICE +++ b/python/packages/autogen-core/samples/adas/NOTICE @@ -1,10 +1,8 @@ Work was adapted from the work here https://github.com/ShengranHu/ADAS/tree/main, which is under Apache 2.0 license. -Reference: @article{hu2024ADAS, title={Automated Design of Agentic Systems}, author={Hu, Shengran and Lu, Cong and Clune, Jeff}, journal={arXiv preprint arXiv:2408.08435}, year={2024} } - diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index fa7ac7dd89cb..283fce65b9af 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -171,7 +171,7 @@ def __init__( model_client: ChatCompletionClient, system_prompt: str, args: Namespace, - archive: List[Dict[str, str]] = [{}], + archive=None, ) -> None: super().__init__("An agent searching agent.") self._args = args diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index f88b50c464fc..4250a684494f 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -7,8 +7,8 @@ from typing import Dict, List, Union import requests -from github import Github -from github.Repository import Repository +from github import Github # pyright: ignore reportMissingImports +from github.Repository import Repository # pyright: ignore reportMissingImports EXAMPLE = { "thought": "**Insights:**\nYour insights on what should be the next interesting agent.\n**Overall Idea:**\nyour reasoning and the overall concept behind the agent design.\n**Implementation:**\ndescribe the implementation step by step.", From e5d073fc806a05431a83f75bae6d54d3d0e38716 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Thu, 19 Dec 2024 15:33:15 -0500 Subject: [PATCH 17/21] Add azure_deployment --- .../packages/autogen-core/samples/adas/README.md | 15 ++++++++------- python/packages/autogen-core/samples/adas/adas.py | 1 + .../autogen-core/samples/adas/adas_prompt.py | 5 +++++ 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index 1073730882ae..66032f88c501 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -331,7 +331,8 @@ agent_model_kwargs = { 'azure_endpoint': 'https://-aoai1.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-08-01-preview', 'model_capabilities': {'function_calling': True, 'json_output': True, 'vision': True}, 'azure_ad_token_provider': 'DEFAULT', - 'model': 'gpt-35-turbo' + 'model': 'gpt-35-turbo', + 'azure_deployment': 'gpt-35-turbo' } ``` Finally, the output of this `forward` function should be the answer that the agent system comes up with. @@ -374,14 +375,14 @@ o1-preview is also reported to be great at writing code, and we suggest you try This should be passed as a JSON string to the `meta_agent_model_config` flag. ```bash ---meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": "o1-preview-2024-09-12"}' +--meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": "o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' ``` #### Choose the LLM for the base agents used within the agent system The paper authors use GPT-3.5 (for cost purposes), but we recommend GPT-4o for better quality. This should be passed as a JSON string to the `base_agent_model_config` flag. ```bash ---base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' +--base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' ``` ### Run ADAS ```bash @@ -390,8 +391,8 @@ python packages/autogen-core/samples/adas/adas.py \ --data_filename=/home//ADAS/dataset/drop_v0_dev.jsonl.gz \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ - --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12"}' \ - --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' \ + --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_drop.py' # For your own benchmark @@ -399,8 +400,8 @@ python packages/autogen-core/samples/adas/adas.py \ --data_filename=/home//my_benchmark_data.csv \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ - --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12"}' \ - --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06"}' \ + --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_my_benchmark.py' ``` You can also increase the number of generations for the meta-agent to try creating. Note that if there is any compilation error, the count of the generation will be skipped. (Potential bug, or at least confusing behavior). diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index 283fce65b9af..c92ea99bb296 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -354,6 +354,7 @@ async def main(arguments: Namespace) -> None: token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") # Create an AzureOpenAI model client. client = AzureOpenAIChatCompletionClient( + azure_deployment=arguments.meta_agent_model_config['azure_deployment'], model=arguments.meta_agent_model_config["model"], api_version=arguments.meta_agent_model_config["api_version"], azure_endpoint=arguments.meta_agent_model_config["azure_endpoint"], diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/packages/autogen-core/samples/adas/adas_prompt.py index 4250a684494f..a12ad8c9cf58 100644 --- a/python/packages/autogen-core/samples/adas/adas_prompt.py +++ b/python/packages/autogen-core/samples/adas/adas_prompt.py @@ -203,6 +203,7 @@ async def output_result(_agent: ClosureContext, message: FinalResult, ctx: Messa # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], @@ -363,6 +364,7 @@ async def main(): # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], @@ -654,6 +656,7 @@ async def output_result(_agent: ClosureContext, message: WritingResult, ctx: Mes # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], @@ -911,6 +914,7 @@ async def output_result(_agent: ClosureContext, message: Answer, ctx: MessageCon # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], @@ -1548,6 +1552,7 @@ async def main(): # Create an AzureOpenAI model client. model_client = AzureOpenAIChatCompletionClient( + azure_deployment=model_client_kwargs['azure_deployment'], model=model_client_kwargs['model'], api_version=model_client_kwargs['api_version'], azure_endpoint=model_client_kwargs['azure_endpoint'], From c6916996dfdb7681ae34bf38008dcefd488fa2dc Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Thu, 19 Dec 2024 16:08:32 -0500 Subject: [PATCH 18/21] Update README --- .../autogen-core/samples/adas/README.md | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/packages/autogen-core/samples/adas/README.md index 66032f88c501..7996815a12d5 100644 --- a/python/packages/autogen-core/samples/adas/README.md +++ b/python/packages/autogen-core/samples/adas/README.md @@ -228,15 +228,24 @@ In the original paper, the discovered agents significantly outperformed state-of To see the [results](#results-for-drop-benchmark) of early experiments with ADAS in AutoGen, please see the Results section. -## ADAS in AutoGen +## Implementing ADAS using AutoGen-Core API -We have refactored the building block Agent Systems found in the original ADAS code to run using the AutoGen API. Specifically, we decided to implement these Agent Systems at the `AutoGen-Core` level of abstraction (rather than at the `AutoGen-AgentChat` level). +We have refactored the building block Agent Systems found in the original ADAS code to run using the AutoGen API. Specifically, we decided to implement these Agent Systems at the `AutoGen-Core` level of abstraction (rather than at the `AutoGen-AgentChat` level). Here is why: -The vision for going down this path is that the meta-agent can design, using `AutoGen-Core` building blocks, a new (multi-)agent system, which if proven useful (after going through a period of testing/adoption by the team), be incorporated into the official `AgentChat` API. +While it is generally the case that `AgentChat` makes it easier to put together an complex multi-agent system, as the API already has some preset teams that implements some of multi-agent design patterns (RoundRobinGroupChat, SelectorGroupChat), the `AgentChat` API has two main drawbacks 1) comprehensiveness and 2) low-level flexibility. -See this document for more on the design tradeoffs between AutoGen-Core and `AutoGen-AgentChat` API. +On the comprehensiveness aspect, the Mixture of Agents design -– which is similar to chain-of-thought with self-consistency, where we aggregate the outputs of multiple agents –- is not a built-in team/orchestrator at the `AgentChat` level. As a result, this has to be implemented -- using the `AutoGen-Core` API -- into the codebase as an official team-level Agent, and also added as one of the seeds in the ADAS archive. This suggests that we don't even need to bother with `AutoGen-Agent-Chat` and just implement everything with the `AutoGen-Core` API. + +On the low-level flexibility aspect, `AgentChat` hides a lot of flexibility on how to configure custom pub/sub topics, message handling, etc. + +In other words, the abstraction at the `AgentChat` level, while useful in the short term for quick development, may restrict the ability for the meta-agent to design novel multi-agent patterns, which is really our goal. If all the meta-agent could do is build off the limited building blocks provided by `AgentChat`, then it wouldn't be able to be as creative as it could be. + +The ultimate vision for going down this path is that the meta-agent can design, using `AutoGen-Core` building blocks, a new (multi-)agent system, which if proven useful (after going through a period of testing/adoption by the team), be incorporated into the official `AgentChat` API. + +## ADAS features in AutoGen ### 4 manually crafted Agent Systems serving as the seeds to the archive +- Please refer to the `get_init_archive()` function in the `adas_prompt.py` file for the current seed Agent Systems. - More will be added over time ### Prompt to Meta-Agent @@ -278,6 +287,7 @@ Please see the `adas.py` file for details of all available settings. - `base_agent_model_config`: JSON string of the `AzureOpenAIChatCompletionClient` settings for the Base Agent. - `n_generation`: number of generations of new agents that the meta-agent tries to discover - `expr_name`: name of the output file containing both the original/seed and newly generated agent systems, as well as their fitness scores. +- `save_dir`: the name of the directory to which the output file as indicated by `expr_name` will be saved. - `max_workers`: the number of threads to spin up in a `ThreadPoolExecutor`, to parallelize the execution of the particular Agent System that is currently being evaluated. ## QuickStart @@ -346,7 +356,7 @@ Note: If you add a new agent system after you’ve started generating new Agent ### Generate new Agent Systems #### Prepare your dataset -First download your dataset locally. +First download your dataset locally. It can be in any format, as long as your custom logic in the `load_dataset()` function properly reads the dataset file. Then create a copy the file called `utils_benchmark_template.py`, and name it with a suffix corresponding to your benchmark. For example, see `utils_drop.py`. Place this under the `adas` directory. This file will later be passed to the `benchmark_specific_utils_file` flag when running the script. @@ -354,8 +364,8 @@ Under the `load_dataset` function, add logic to load in your dataset. Do any pre ```python # utils_my_benchmark.py -def load_dataset(filename: str) -> List[Dict[str, Any]]: - df = pd.read_csv(filename) +def load_dataset(file_path: str) -> List[Dict[str, Any]]: + df = pd.read_csv(file_path) data = [{"inputs": "Your job is to solve this math problem: " + inputs, "targets": targets} for inputs, targets in df] return data ``` @@ -365,7 +375,7 @@ In the same `utils_my_benchmark.py` file, add logic to the compute_metrics funct ```python # utils_my_benchmark.py def compute_metrics(predictions: List[Any], labels: List[Any]) -> List[float]: - return np.square(np.subtract(A, B)).mean() + return np.square(np.subtract(predictions, labels)).mean() ``` ### Choose the LLMs #### Choose the LLMs for the meta-agent @@ -391,6 +401,7 @@ python packages/autogen-core/samples/adas/adas.py \ --data_filename=/home//ADAS/dataset/drop_v0_dev.jsonl.gz \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ + --save_dir='results/' \ --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_drop.py' @@ -400,6 +411,7 @@ python packages/autogen-core/samples/adas/adas.py \ --data_filename=/home//my_benchmark_data.csv \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ + --save_dir='results/' \ --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_my_benchmark.py' From 95a7a98b7aa680a61bcb17a680d4ec7648cd59a8 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Thu, 19 Dec 2024 16:09:57 -0500 Subject: [PATCH 19/21] Format --- python/packages/autogen-core/samples/adas/adas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/packages/autogen-core/samples/adas/adas.py index c92ea99bb296..c21a3f24a9e8 100644 --- a/python/packages/autogen-core/samples/adas/adas.py +++ b/python/packages/autogen-core/samples/adas/adas.py @@ -354,7 +354,7 @@ async def main(arguments: Namespace) -> None: token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") # Create an AzureOpenAI model client. client = AzureOpenAIChatCompletionClient( - azure_deployment=arguments.meta_agent_model_config['azure_deployment'], + azure_deployment=arguments.meta_agent_model_config["azure_deployment"], model=arguments.meta_agent_model_config["model"], api_version=arguments.meta_agent_model_config["api_version"], azure_endpoint=arguments.meta_agent_model_config["azure_endpoint"], From 36305dba6e1c082ae9f1869e7866df897000a061 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Fri, 20 Dec 2024 15:01:12 -0500 Subject: [PATCH 20/21] Move directories --- python/{packages/autogen-core => }/samples/adas/NOTICE | 0 python/{packages/autogen-core => }/samples/adas/README.md | 0 python/{packages/autogen-core => }/samples/adas/adas.py | 0 python/{packages/autogen-core => }/samples/adas/adas_prompt.py | 0 .../samples/adas/docs/azure_ai_studio_edit_deployment.png | 0 python/{packages/autogen-core => }/samples/adas/requirements.txt | 0 ..._preview_meta_agent_gpt3.5_base_agent_results_run_archive.json | 0 ...1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json | 0 ...view_meta_agent_gpt4o_base_agent_results_run_archive_run2.json | 0 python/{packages/autogen-core => }/samples/adas/utils.py | 0 .../autogen-core => }/samples/adas/utils_benchmark_template.py | 0 python/{packages/autogen-core => }/samples/adas/utils_drop.py | 0 12 files changed, 0 insertions(+), 0 deletions(-) rename python/{packages/autogen-core => }/samples/adas/NOTICE (100%) rename python/{packages/autogen-core => }/samples/adas/README.md (100%) rename python/{packages/autogen-core => }/samples/adas/adas.py (100%) rename python/{packages/autogen-core => }/samples/adas/adas_prompt.py (100%) rename python/{packages/autogen-core => }/samples/adas/docs/azure_ai_studio_edit_deployment.png (100%) rename python/{packages/autogen-core => }/samples/adas/requirements.txt (100%) rename python/{packages/autogen-core => }/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json (100%) rename python/{packages/autogen-core => }/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json (100%) rename python/{packages/autogen-core => }/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json (100%) rename python/{packages/autogen-core => }/samples/adas/utils.py (100%) rename python/{packages/autogen-core => }/samples/adas/utils_benchmark_template.py (100%) rename python/{packages/autogen-core => }/samples/adas/utils_drop.py (100%) diff --git a/python/packages/autogen-core/samples/adas/NOTICE b/python/samples/adas/NOTICE similarity index 100% rename from python/packages/autogen-core/samples/adas/NOTICE rename to python/samples/adas/NOTICE diff --git a/python/packages/autogen-core/samples/adas/README.md b/python/samples/adas/README.md similarity index 100% rename from python/packages/autogen-core/samples/adas/README.md rename to python/samples/adas/README.md diff --git a/python/packages/autogen-core/samples/adas/adas.py b/python/samples/adas/adas.py similarity index 100% rename from python/packages/autogen-core/samples/adas/adas.py rename to python/samples/adas/adas.py diff --git a/python/packages/autogen-core/samples/adas/adas_prompt.py b/python/samples/adas/adas_prompt.py similarity index 100% rename from python/packages/autogen-core/samples/adas/adas_prompt.py rename to python/samples/adas/adas_prompt.py diff --git a/python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png b/python/samples/adas/docs/azure_ai_studio_edit_deployment.png similarity index 100% rename from python/packages/autogen-core/samples/adas/docs/azure_ai_studio_edit_deployment.png rename to python/samples/adas/docs/azure_ai_studio_edit_deployment.png diff --git a/python/packages/autogen-core/samples/adas/requirements.txt b/python/samples/adas/requirements.txt similarity index 100% rename from python/packages/autogen-core/samples/adas/requirements.txt rename to python/samples/adas/requirements.txt diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json b/python/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json similarity index 100% rename from python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json rename to python/samples/adas/results/drop_o1_preview_meta_agent_gpt3.5_base_agent_results_run_archive.json diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json b/python/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json similarity index 100% rename from python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json rename to python/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive.json diff --git a/python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json b/python/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json similarity index 100% rename from python/packages/autogen-core/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json rename to python/samples/adas/results/drop_o1_preview_meta_agent_gpt4o_base_agent_results_run_archive_run2.json diff --git a/python/packages/autogen-core/samples/adas/utils.py b/python/samples/adas/utils.py similarity index 100% rename from python/packages/autogen-core/samples/adas/utils.py rename to python/samples/adas/utils.py diff --git a/python/packages/autogen-core/samples/adas/utils_benchmark_template.py b/python/samples/adas/utils_benchmark_template.py similarity index 100% rename from python/packages/autogen-core/samples/adas/utils_benchmark_template.py rename to python/samples/adas/utils_benchmark_template.py diff --git a/python/packages/autogen-core/samples/adas/utils_drop.py b/python/samples/adas/utils_drop.py similarity index 100% rename from python/packages/autogen-core/samples/adas/utils_drop.py rename to python/samples/adas/utils_drop.py From 78767e747dd557c77c807e038c7279cfe7348242 Mon Sep 17 00:00:00 2001 From: Andy Ye Date: Fri, 20 Dec 2024 15:21:08 -0500 Subject: [PATCH 21/21] Fix path names --- python/samples/adas/README.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/python/samples/adas/README.md b/python/samples/adas/README.md index 7996815a12d5..315531182f8c 100644 --- a/python/samples/adas/README.md +++ b/python/samples/adas/README.md @@ -311,7 +311,7 @@ cd autogen/python # Install autogen-core and autogen-ext in editable mode pip install -e packages/autogen-core pip install -e packages/autogen-ext -pip install -r packages/autogen-core/samples/adas/requirements.txt +pip install -r samples/adas/requirements.txt ``` ### Agent System code definitions @@ -397,24 +397,26 @@ This should be passed as a JSON string to the `base_agent_model_config` flag. ### Run ADAS ```bash # For DROP benchmark -python packages/autogen-core/samples/adas/adas.py \ +python samples/adas/adas.py \ --data_filename=/home//ADAS/dataset/drop_v0_dev.jsonl.gz \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ --save_dir='results/' \ + --max_workers=1 \ --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ - --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ - --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_drop.py' + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ + --benchmark_specific_utils_file='/home//autogen/python/samples/adas/utils_drop.py' # For your own benchmark -python packages/autogen-core/samples/adas/adas.py \ +python samples/adas/adas.py \ --data_filename=/home//my_benchmark_data.csv \ --n_generation=150 \ --expr_name=drop_o1_preview_meta_gpt4o_base_results \ --save_dir='results/' \ + --max_workers=1 \ --meta_agent_model_config='{"api_version": "2024-08-01-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/o1-preview/chat/completions?api-version=2024-08-01-preview", "model_capabilities": {"function_calling": false, "json_output": false, "vision": false}, "azure_ad_token_provider": "DEFAULT", "model": " o1-preview-2024-09-12", "azure_deployment": "o1-preview-2024-09-12"}' \ - --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_pr ovider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ - --benchmark_specific_utils_file='/home//autogen/python/packages/autogen-core/samples/adas/utils_my_benchmark.py' + --base_agent_model_config='{"api_version": "2023-03-15-preview", "azure_endpoint": "https://-aoai1.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview", "model_capabilities": {"function_calling": true, "json_output": true, "vision": true}, "azure_ad_token_provider": "DEFAULT", "model": "gpt-4o-2024-08-06", "azure_deployment": "gpt-4o-2024-08-06"}' \ + --benchmark_specific_utils_file='/home//autogen/python/samples/adas/utils_my_benchmark.py' ``` You can also increase the number of generations for the meta-agent to try creating. Note that if there is any compilation error, the count of the generation will be skipped. (Potential bug, or at least confusing behavior). @@ -424,7 +426,7 @@ python3 adas.py --n_generations 100 --max_workers 1 ``` ## Results for DROP benchmark ### Best Agent System that the Meta-Agent discovered -See the files in the `adas/results` director for the full list of discovered Agent Systems. +See the files in the `adas/results` directory for the full list of discovered Agent Systems. #### Meta-Agent used o1-preview, and Base Agents used GPT3.5 ``` TODO: Testing/optimizations/reruns actively in progress.