diff --git a/README.md b/README.md index b5a564a4c..c1066056e 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ Welcome to join our community on - new**[2024-06-11]** The RAG functionality is available for agents in **AgentScope** now! [**A quick introduction to RAG in AgentScope**](https://modelscope.github.io/agentscope/en/tutorial/210-rag.html) can help you equip your agent with external knowledge! -- new**[2024-06-09]** We release **AgentScope** v0.0.5 now! In this new version, [**AgentScope Workstation**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html) is open-sourced with the refactored [**AgentScope Studio**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)! +- new**[2024-06-09]** We release **AgentScope** v0.0.5 now! In this new version, [**AgentScope Workstation**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html) (the online version is running on [agentscope.io](https://agentscope.io)) is open-sourced with the refactored [**AgentScope Studio**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)!
agentscope-logo diff --git a/README_ZH.md b/README_ZH.md index 58864e354..7f0d4290b 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -32,7 +32,7 @@ - new**[2024-06-11]** RAG功能现在已经整合进 **AgentScope** 中! 大家可以根据 [**简要介绍AgentScope中的RAG**](https://modelscope.github.io/agentscope/en/tutorial/210-rag.html) ,让自己的agent用上外部知识! -- new**[2024-06-09]** AgentScope v0.0.5 已经更新!在这个新版本中,我们开源了 [**AgentScope Workstation**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)! +- new**[2024-06-09]** AgentScope v0.0.5 已经更新!在这个新版本中,我们开源了 [**AgentScope Workstation**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html) (在线版本的网址是[agentscope.io](https://agentscope.io))!
agentscope-logo diff --git a/docs/sphinx_doc/en/source/tutorial/104-usecase.md b/docs/sphinx_doc/en/source/tutorial/104-usecase.md index 94ba8ae92..7b0fddae1 100644 --- a/docs/sphinx_doc/en/source/tutorial/104-usecase.md +++ b/docs/sphinx_doc/en/source/tutorial/104-usecase.md @@ -46,9 +46,13 @@ To implement your own agent, you need to inherit `AgentBase` and implement the ` ```python from agentscope.agents import AgentBase +from agentscope.message import Msg + + +from typing import Optional, Union, Sequence class MyAgent(AgentBase): - def reply(self, x): + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # Do something here ... return x diff --git a/docs/sphinx_doc/en/source/tutorial/201-agent.md b/docs/sphinx_doc/en/source/tutorial/201-agent.md index 5b8563f4b..3fa916a88 100644 --- a/docs/sphinx_doc/en/source/tutorial/201-agent.md +++ b/docs/sphinx_doc/en/source/tutorial/201-agent.md @@ -39,7 +39,7 @@ class AgentBase(Operator): ) -> None: # ... [code omitted for brevity] - def observe(self, x: Union[dict, Sequence[dict]]) -> None: + def observe(self, x: Union[Msg, Sequence[Msg]]) -> None: # An optional method for updating the agent's internal state based on # messages it has observed. This method can be used to enrich the # agent's understanding and memory without producing an immediate @@ -47,7 +47,7 @@ class AgentBase(Operator): if self.memory: self.memory.add(x) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # The core method to be implemented by custom agents. It defines the # logic for processing an input message and generating a suitable # response. @@ -86,7 +86,7 @@ Below, we provide usages of how to configure various agents from the AgentPool: * **Reply Method**: The `reply` method is where the main logic for processing input *message* and generating responses. ```python -def reply(self, x: dict = None) -> dict: +def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # Additional processing steps can occur here # Record the input if needed @@ -142,9 +142,9 @@ service_bot = DialogAgent(**dialog_agent_config) ```python def reply( self, - x: dict = None, + x: Optional[Union[Msg, Sequence[Msg]]] = None, required_keys: Optional[Union[list[str], str]] = None, -) -> dict: +) -> Msg: # Check if there is initial data to be added to memory if self.memory: self.memory.add(x) diff --git a/docs/sphinx_doc/en/source/tutorial/203-parser.md b/docs/sphinx_doc/en/source/tutorial/203-parser.md index 71fff4d59..bb2fae98e 100644 --- a/docs/sphinx_doc/en/source/tutorial/203-parser.md +++ b/docs/sphinx_doc/en/source/tutorial/203-parser.md @@ -211,7 +211,7 @@ In AgentScope, we achieve post-processing by calling the `to_content`, `to_memor ```python # ... - def reply(x: dict = None) -> None: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # ... res = self.model(prompt, parse_func=self.parser.parse) diff --git a/docs/sphinx_doc/en/source/tutorial/204-service.md b/docs/sphinx_doc/en/source/tutorial/204-service.md index dad6fa3d9..49b8b3b16 100644 --- a/docs/sphinx_doc/en/source/tutorial/204-service.md +++ b/docs/sphinx_doc/en/source/tutorial/204-service.md @@ -262,6 +262,9 @@ import json import inspect from agentscope.service import ServiceResponse from agentscope.agents import AgentBase +from agentscope.message import Msg + +from typing import Optional, Union, Sequence def create_file(file_path: str, content: str = "") -> ServiceResponse: @@ -282,7 +285,7 @@ def create_file(file_path: str, content: str = "") -> ServiceResponse: class YourAgent(AgentBase): # ... [omitted for brevity] - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # ... [omitted for brevity] # construct a prompt to ask the agent to provide the parameters in JSON format diff --git a/docs/sphinx_doc/en/source/tutorial/209-prompt_opt.md b/docs/sphinx_doc/en/source/tutorial/209-prompt_opt.md index 9346ba5db..f1db3a248 100644 --- a/docs/sphinx_doc/en/source/tutorial/209-prompt_opt.md +++ b/docs/sphinx_doc/en/source/tutorial/209-prompt_opt.md @@ -397,6 +397,8 @@ from agentscope.agents import AgentBase from agentscope.prompt import SystemPromptOptimizer from agentscope.message import Msg +from typing import Optional, Union, Sequence + class MyAgent(AgentBase): def __init__( self, @@ -411,7 +413,7 @@ class MyAgent(AgentBase): # or model_or_model_config_name=self.model ) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: self.memory.add(x) prompt = self.model.format( diff --git a/docs/sphinx_doc/en/source/tutorial/210-rag.md b/docs/sphinx_doc/en/source/tutorial/210-rag.md index 867fdb2ec..39c3ecce0 100644 --- a/docs/sphinx_doc/en/source/tutorial/210-rag.md +++ b/docs/sphinx_doc/en/source/tutorial/210-rag.md @@ -190,6 +190,111 @@ RAG agent is an agent that can generate answers based on the retrieved knowledge Your agent will be equipped with a list of knowledge according to the `knowledge_id_list`. You can decide how to use the retrieved content and even update and refresh the index in your agent's `reply` function. +## (Optional) Setting up a local embedding model service + +For those who are interested in setting up a local embedding service, we provide the following example based on the +`sentence_transformers` package, which is a popular specialized package for embedding models (based on the `transformer` package and compatible with both HuggingFace and ModelScope models). +In this example, we will use one of the SOTA embedding models, `gte-Qwen2-7B-instruct`. + +* Step 1: Follow the instruction on [HuggingFace](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) or [ModelScope](https://www.modelscope.cn/models/iic/gte_Qwen2-7B-instruct ) to download the embedding model. + (For those who cannot access HuggingFace directly, you may want to use a HuggingFace mirror by running a bash command + `export HF_ENDPOINT=https://hf-mirror.com` or add a line of code `os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"` in your Python code.) +* Step 2: Set up the server. The following code is for reference. + +```python +import datetime +import argparse + +from flask import Flask +from flask import request +from sentence_transformers import SentenceTransformer + +def create_timestamp(format_: str = "%Y-%m-%d %H:%M:%S") -> str: + """Get current timestamp.""" + return datetime.datetime.now().strftime(format_) + +app = Flask(__name__) + +@app.route("/embedding/", methods=["POST"]) +def get_embedding() -> dict: + """Receive post request and return response""" + json = request.get_json() + + inputs = json.pop("inputs") + + global model + + if isinstance(inputs, str): + inputs = [inputs] + + embeddings = model.encode(inputs) + + return { + "data": { + "completion_tokens": 0, + "messages": {}, + "prompt_tokens": 0, + "response": { + "data": [ + { + "embedding": emb.astype(float).tolist(), + } + for emb in embeddings + ], + "created": "", + "id": create_timestamp(), + "model": "flask_model", + "object": "text_completion", + "usage": { + "completion_tokens": 0, + "prompt_tokens": 0, + "total_tokens": 0, + }, + }, + "total_tokens": 0, + "username": "", + }, + } + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model_name_or_path", type=str, required=True) + parser.add_argument("--device", type=str, default="auto") + parser.add_argument("--port", type=int, default=8000) + args = parser.parse_args() + + global model + + print("setting up for embedding model....") + model = SentenceTransformer( + args.model_name_or_path + ) + + app.run(port=args.port) +``` + +* Step 3: start server. +```bash +python setup_ms_service.py --model_name_or_path {$PATH_TO_gte_Qwen2_7B_instruct} +``` + + +Testing whether the model is running successfully. +```python +from agentscope.models.post_model import PostAPIEmbeddingWrapper + + +model = PostAPIEmbeddingWrapper( + config_name="test_config", + api_url="http://127.0.0.1:8000/embedding/", + json_args={ + "max_length": 4096, + "temperature": 0.5 + } +) + +print(model("testing")) +``` [[Back to the top]](#210-rag-en) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md index 7727d0b0e..25608c845 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md @@ -47,9 +47,14 @@ ```python from agentscope.agents import AgentBase +from agentscope.message import Msg + +from typing import Optional, Union, Sequence + class MyAgent(AgentBase): - def reply(self, x): + + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # Do something here ... return x diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md b/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md index b6df3dc7a..10b29aeba 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/201-agent.md @@ -48,7 +48,7 @@ class AgentBase(Operator): if self.memory: self.memory.add(x) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # The core method to be implemented by custom agents. It defines the # logic for processing an input message and generating a suitable # response. @@ -87,7 +87,7 @@ class AgentBase(Operator): * **回复方法**:`reply` 方法是处理输入消息和生成响应的主要逻辑所在 ```python -def reply(self, x: dict = None) -> dict: +def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # Additional processing steps can occur here # Record the input if needed @@ -143,9 +143,9 @@ service_bot = DialogAgent(**dialog_agent_config) ```python def reply( self, - x: dict = None, + x: Optional[Union[Msg, Sequence[Msg]]] = None, required_keys: Optional[Union[list[str], str]] = None, -) -> dict: +) -> Msg: # Check if there is initial data to be added to memory if self.memory: self.memory.add(x) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/203-parser.md b/docs/sphinx_doc/zh_CN/source/tutorial/203-parser.md index 1ae7fe863..8bad224e4 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/203-parser.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/203-parser.md @@ -209,7 +209,7 @@ AgentScope中,我们通过调用`to_content`,`to_memory`和`to_metadata`方 ```python # ... - def reply(x: dict = None) -> None: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # ... res = self.model(prompt, parse_func=self.parser.parse) diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md b/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md index 788d2bdad..187612bc2 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/204-service.md @@ -262,7 +262,7 @@ def create_file(file_path: str, content: str = "") -> ServiceResponse: class YourAgent(AgentBase): # ... [为简洁起见省略代码] - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # ... [为简洁起见省略代码] # 构造提示,让代理提供 JSON 格式的参数 diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/209-prompt_opt.md b/docs/sphinx_doc/zh_CN/source/tutorial/209-prompt_opt.md index 47a972da5..7b7cd72a1 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/209-prompt_opt.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/209-prompt_opt.md @@ -392,7 +392,7 @@ class MyAgent(AgentBase): # 或是 model_or_model_config_name=self.model ) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: self.memory.add(x) prompt = self.model.format( diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/210-rag.md b/docs/sphinx_doc/zh_CN/source/tutorial/210-rag.md index 7a0efd7d0..7921dd31d 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/210-rag.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/210-rag.md @@ -174,6 +174,113 @@ RAG 智能体是可以基于检索到的知识生成答案的智能体。 **自己搭建 RAG 智能体.** 只要您的智能体配置具有`knowledge_id_list`,您就可以将一个agent和这个列表传递给`KnowledgeBank.equip`;这样该agent就是被装配`knowledge_id`。 您可以在`reply`函数中自己决定如何从`Knowledge`对象中提取和使用信息,甚至通过`Knowledge`修改知识库。 + +## (拓展) 架设自己的embedding model服务 + +我们在此也对架设本地embedding model感兴趣的用户提供以下的样例。 +以下样例基于在embedding model范围中很受欢迎的`sentence_transformers` 包(基于`transformer` 而且兼容HuggingFace和ModelScope的模型)。 +这个样例中,我们会使用当下最好的文本向量模型之一`gte-Qwen2-7B-instruct`。 + + +* 第一步: 遵循在 [HuggingFace](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) 或者 [ModelScope](https://www.modelscope.cn/models/iic/gte_Qwen2-7B-instruct )的指示下载模型。 + (如果无法直接从HuggingFace下载模型,也可以考虑使用HuggingFace镜像:bash命令行`export HF_ENDPOINT=https://hf-mirror.com`,或者在Python代码中加入`os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"`) +* 第二步: 设置服务器。以下是一段参考代码。 + +```python +import datetime +import argparse + +from flask import Flask +from flask import request +from sentence_transformers import SentenceTransformer + +def create_timestamp(format_: str = "%Y-%m-%d %H:%M:%S") -> str: + """Get current timestamp.""" + return datetime.datetime.now().strftime(format_) + +app = Flask(__name__) + +@app.route("/embedding/", methods=["POST"]) +def get_embedding() -> dict: + """Receive post request and return response""" + json = request.get_json() + + inputs = json.pop("inputs") + + global model + + if isinstance(inputs, str): + inputs = [inputs] + + embeddings = model.encode(inputs) + + return { + "data": { + "completion_tokens": 0, + "messages": {}, + "prompt_tokens": 0, + "response": { + "data": [ + { + "embedding": emb.astype(float).tolist(), + } + for emb in embeddings + ], + "created": "", + "id": create_timestamp(), + "model": "flask_model", + "object": "text_completion", + "usage": { + "completion_tokens": 0, + "prompt_tokens": 0, + "total_tokens": 0, + }, + }, + "total_tokens": 0, + "username": "", + }, + } + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model_name_or_path", type=str, required=True) + parser.add_argument("--device", type=str, default="auto") + parser.add_argument("--port", type=int, default=8000) + args = parser.parse_args() + + global model + + print("setting up for embedding model....") + model = SentenceTransformer( + args.model_name_or_path + ) + + app.run(port=args.port) +``` + +* 第三部:启动服务器。 +```bash +python setup_ms_service.py --model_name_or_path {$PATH_TO_gte_Qwen2_7B_instruct} +``` + + +测试服务是否成功启动。 +```python +from agentscope.models.post_model import PostAPIEmbeddingWrapper + + +model = PostAPIEmbeddingWrapper( + config_name="test_config", + api_url="http://127.0.0.1:8000/embedding/", + json_args={ + "max_length": 4096, + "temperature": 0.5 + } +) + +print(model("testing")) +``` + [[回到顶部]](#210-rag-zh) diff --git a/examples/conversation_with_RAG_agents/README.md b/examples/conversation_with_RAG_agents/README.md index c8f9fb5c1..2c565abae 100644 --- a/examples/conversation_with_RAG_agents/README.md +++ b/examples/conversation_with_RAG_agents/README.md @@ -29,9 +29,9 @@ However, you are welcome to replace the Dashscope language and embedding model w ``` -* **AS studio:** If you want to have more organized, clean UI, you can also run with our `as_studio`. +* **AS gradio:** If you want to have more organized, clean UI, you can also run with our `as_gradio`. ```bash - as_studio ./rag_example.py + as_gradio ./rag_example.py ``` ### Agents in the example diff --git a/examples/conversation_with_langchain/conversation_with_langchain.py b/examples/conversation_with_langchain/conversation_with_langchain.py index 5d9f9c6b3..16b678892 100644 --- a/examples/conversation_with_langchain/conversation_with_langchain.py +++ b/examples/conversation_with_langchain/conversation_with_langchain.py @@ -2,7 +2,7 @@ """A simple example of using langchain to create an assistant agent in AgentScope.""" import os -from typing import Optional +from typing import Optional, Union, Sequence from langchain_openai import OpenAI from langchain.memory import ConversationBufferMemory @@ -52,7 +52,7 @@ def __init__(self, name: str) -> None: ) # [END] BY LANGCHAIN - def reply(self, x: Optional[dict] = None) -> Msg: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: # [START] BY LANGCHAIN # Generate response diff --git a/examples/conversation_with_mentions/README.md b/examples/conversation_with_mentions/README.md index 858915710..deea8aa41 100644 --- a/examples/conversation_with_mentions/README.md +++ b/examples/conversation_with_mentions/README.md @@ -18,7 +18,7 @@ These models are tested in this example. For other models, some modifications ma Fill the next cell to meet the following requirements: - Set your `api_key` in the `configs/model_configs.json` file -- Optional: Launch agentscope studio with `as_studio main.py` +- Optional: Launch agentscope gradio with `as_gradio main.py` ## How to Use diff --git a/examples/conversation_with_swe-agent/swe_agent.py b/examples/conversation_with_swe-agent/swe_agent.py index 3b55431d5..6d2c49424 100644 --- a/examples/conversation_with_swe-agent/swe_agent.py +++ b/examples/conversation_with_swe-agent/swe_agent.py @@ -11,7 +11,7 @@ from agentscope.message import Msg from agentscope.exception import ResponseParsingError from agentscope.parsers import MarkdownJsonDictParser -from typing import List, Callable +from typing import List, Callable, Optional, Union, Sequence import json from agentscope.service import ( ServiceFactory, @@ -206,7 +206,7 @@ def step(self) -> Msg: self.running_memory.append(str(action) + str(obs)) return msg_res - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: action_name = None self.main_goal = x.content while not action_name == "exit": diff --git a/examples/distributed_debate/README.md b/examples/distributed_debate/README.md index 502485806..b5bca2768 100644 --- a/examples/distributed_debate/README.md +++ b/examples/distributed_debate/README.md @@ -1,54 +1,80 @@ -# Distributed debate competition +# Distributed Debate Competition -This example simulate a debate competition with three participant agents, including the affirmative side (**Pro**), the negative side (**Con**), and the adjudicator (**Judge**). -**You can join in the debate as Pro or Con or both.** +This example demonstrates: +- How to simulate a debate competition with three participant agents +- How to allow human participation in the debate -Pro believes that AGI can be achieved using the GPT model framework, while Con contests it. -Judge listens to both sides' arguments and provides an analytical judgment on which side presented a more compelling and reasonable case. +## Background -Each agent is an independent process and can run on different machines. -You can join the debate as Pro or Con by providing the `--is-human` argument. -Messages generated by any agents can be observed by other agents in the debate. +This example simulates a debate competition with three participant agents: +1. The affirmative side (**Pro**) +2. The negative side (**Con**) +3. The adjudicator (**Judge**) -Before running the example, please install the distributed version of AgentScope, fill in your model configuration correctly in `configs/model_configs.json`, and modify the `model_config_name` field in `configs/debate_agent_configs.json` accordingly. +The debate topic is whether AGI can be achieved using the GPT model framework. Pro argues in favor, while Con contests it. Judge listens to both sides' arguments and provides an analytical judgment on which side presented a more compelling case. -### Step 1: setup Pro, Con agent servers +Each agent is an independent process and can run on different machines. Human participants can join as Pro or Con. +## Tested Models + +These models are tested in this example. For other models, some modifications may be needed. +- Ollama Chat (qwen2:1.5b) +- Dashscope Chat (qwen-Max) +- Gemini Chat (gemini-pro) + +## Prerequisites + +Before running the example: +- Install the distributed version of AgentScope by running +```bash +# On windows +pip install -e .[distribute] +# On mac / linux +pip install -e .\[distribute\] +``` +- Fill in your model configuration correctly in `configs/model_configs.json` +- Modify the `model_config_name` field in `configs/debate_agent_configs.json` accordingly +- Ensure the specified ports are available and IP addresses are accessible + +## Setup and Execution + +### Step 1: Setup Pro and Con agent servers + +For an LLM-based Pro: ```shell cd examples/distributed_debate -# setup LLM-based Pro python distributed_debate.py --role pro --pro-host localhost --pro-port 12011 -# or join the debate as Pro by yourself -# python distributed_debate.py --role pro --pro-host localhost --pro-port 12011 --is-human ``` +(Alternatively) for human participation as Pro: +```shell +python distributed_debate.py --role pro --pro-host localhost --pro-port 12011 --is-human +``` + +For an LLM-based Con: ```shell -# setup LLM-base Con python distributed_debate.py --role con --con-host localhost --con-port 12012 -# or join the debate as Con by yourself -# python distributed_debate.py --role con --con-host localhost --con-port 12012 --is-human ``` -> Please make sure the ports are available and the ip addresses are accessible, here we use localhost as an example. -> If you run all agent servers on the same machine, you can ignore the host field, it will use localhost by default. +(Alternatively) for human participation as Con: +```shell +python distributed_debate.py --role con --con-host localhost --con-port 12012 --is-human +``` -### step 2: run the main process +### Step 2: Run the main process ```shell -# setup main (Judge is in it) python distributed_debate.py --role main --pro-host localhost --pro-port 12011 --con-host localhost --con-port 12012 ``` -### step 3: watch or join in the debate in your terminal +### Step 3: Watch or join the debate in your terminal -Suppose you join the debate as Con, you will see the following in your command line. +If you join as Con, you'll see something like: ```text System: Welcome to the debate on whether Artificial General Intelligence (AGI) can be achieved ... - Pro: Thank you. I argue that AGI can be achieved using the GPT model framework. ... - User Input: ``` diff --git a/examples/distributed_debate/user_proxy_agent.py b/examples/distributed_debate/user_proxy_agent.py index cb27f3386..37f4b6d28 100644 --- a/examples/distributed_debate/user_proxy_agent.py +++ b/examples/distributed_debate/user_proxy_agent.py @@ -4,6 +4,7 @@ from typing import Optional from agentscope.agents import UserAgent +from agentscope.message import Msg class UserProxyAgent(UserAgent): @@ -11,9 +12,9 @@ class UserProxyAgent(UserAgent): def reply( # type: ignore[override] self, - x: dict = None, + x: Optional[Union[Msg, Sequence[Msg]]] = None, required_keys: Optional[Union[list[str], str]] = None, - ) -> dict: + ) -> Msg: """ Reply with `self.speak(x)` """ diff --git a/examples/distributed_parallel_optimization/answerer_agent.py b/examples/distributed_parallel_optimization/answerer_agent.py index 56ba014e9..e44551d01 100644 --- a/examples/distributed_parallel_optimization/answerer_agent.py +++ b/examples/distributed_parallel_optimization/answerer_agent.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- """Answerer Agent.""" +from typing import Optional, Union, Sequence from agentscope.message import Msg from agentscope.agents import AgentBase @@ -22,7 +23,7 @@ def __init__( use_memory=False, ) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: response = load_web( url=x.url, keep_raw=False, diff --git a/examples/distributed_parallel_optimization/searcher_agent.py b/examples/distributed_parallel_optimization/searcher_agent.py index 9a1c9b435..eb1ad2f23 100644 --- a/examples/distributed_parallel_optimization/searcher_agent.py +++ b/examples/distributed_parallel_optimization/searcher_agent.py @@ -2,6 +2,8 @@ """Searcher agent.""" from functools import partial +from typing import Optional, Union, Sequence + from agentscope.message import Msg from agentscope.agents import AgentBase from agentscope.service import google_search, bing_search @@ -56,7 +58,7 @@ def __init__( assert api_key is not None, "bing search requires 'api_key'" self.search = partial(bing_search, api_key=api_key) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: prompt = self.model.format( Msg(name="system", role="system", content=self.sys_prompt), x, diff --git a/examples/distributed_simulation/participant.py b/examples/distributed_simulation/participant.py index dac3d17bf..f017d4de3 100644 --- a/examples/distributed_simulation/participant.py +++ b/examples/distributed_simulation/participant.py @@ -3,6 +3,8 @@ import random import time import re +from typing import Optional, Union, Sequence + from loguru import logger from agentscope.message import Msg @@ -30,7 +32,7 @@ def generate_random_response(self) -> str: time.sleep(self.sleep_time) return str(random.randint(0, self.max_value)) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Generate a random value""" # generate a response in content response = self.generate_random_response() @@ -74,7 +76,7 @@ def parse_value(self, txt: str) -> str: else: return numbers[-1] - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Generate a value by LLM""" if self.memory: self.memory.add(x) @@ -134,7 +136,7 @@ def __init__( for config in part_configs ] - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: results = [] msg = Msg( name="moderator", diff --git a/examples/game_gomoku/code/board_agent.py b/examples/game_gomoku/code/board_agent.py index 6cbef4ced..b0cde430f 100644 --- a/examples/game_gomoku/code/board_agent.py +++ b/examples/game_gomoku/code/board_agent.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- """A board agent class that can host a Gomoku game, and a function to convert the board to an image.""" +from typing import Optional, Union, Sequence import numpy as np from matplotlib import pyplot as plt, patches @@ -81,7 +82,7 @@ def __init__(self, name: str) -> None: # Record the status of the game self.game_end = False - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: if x is None: # Beginning of the game content = ( @@ -89,12 +90,12 @@ def reply(self, x: dict = None) -> dict: "Please make your move." ) else: - row, col = x["content"] + row, col = x.content self.assert_valid_move(row, col) # change the board - self.board[row, col] = NAME_TO_PIECE[x["name"]] + self.board[row, col] = NAME_TO_PIECE[x.name] # check if the game ends if self.check_draw(): @@ -102,15 +103,15 @@ def reply(self, x: dict = None) -> dict: self.game_end = True else: next_player_name = ( - NAME_BLACK if x["name"] == NAME_WHITE else NAME_WHITE + NAME_BLACK if x.name == NAME_WHITE else NAME_WHITE ) content = CURRENT_BOARD_PROMPT_TEMPLATE.format( board=self.board2text(), player=next_player_name, ) - if self.check_win(row, col, NAME_TO_PIECE[x["name"]]): - content = f"The game ends, {x['name']} wins!" + if self.check_win(row, col, NAME_TO_PIECE[x.name]): + content = f"The game ends, {x.name} wins!" self.game_end = True msg_host = Msg(self.name, content, role="assistant") diff --git a/examples/game_gomoku/code/gomoku_agent.py b/examples/game_gomoku/code/gomoku_agent.py index 16e05cc36..0615f7909 100644 --- a/examples/game_gomoku/code/gomoku_agent.py +++ b/examples/game_gomoku/code/gomoku_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """A Gomoku agent that can play the game with another agent.""" -from typing import Optional +from typing import Optional, Union, Sequence import json @@ -48,7 +48,7 @@ def __init__( self.memory.add(Msg("system", sys_prompt, role="system")) - def reply(self, x: Optional[dict] = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: if self.memory: self.memory.add(x) diff --git a/examples/game_gomoku/main.ipynb b/examples/game_gomoku/main.ipynb index b3b31831a..a0e3d996f 100644 --- a/examples/game_gomoku/main.ipynb +++ b/examples/game_gomoku/main.ipynb @@ -130,6 +130,7 @@ "import numpy as np\n", "from agentscope.message import Msg\n", "from agentscope.agents import AgentBase\n", + "from typing import Optional, Union, Sequence\n", "\n", "CURRENT_BOARD_PROMPT_TEMPLATE = \"\"\"The current board is as follows:\n", "{board}\n", @@ -158,7 +159,7 @@ " # Record the status of the game\n", " self.game_end = False\n", " \n", - " def reply(self, x: dict = None) -> dict:\n", + " def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg:\n", " if x is None:\n", " # Beginning of the game\n", " content = (\n", @@ -263,7 +264,7 @@ "source": [ "import json\n", "from agentscope.models import ModelResponse\n", - "from typing import Optional\n", + "from typing import Optional, Union, Sequence\n", "\n", "SYS_PROMPT_TEMPLATE = \"\"\"\n", "You're a skillful Gomoku player. You should play against your opponent according to the following rules:\n", @@ -304,7 +305,7 @@ " \n", " self.memory.add(Msg(\"system\", sys_prompt, role=\"system\"))\n", " \n", - " def reply(self, x: Optional[dict] = None) -> dict:\n", + " def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg:\n", " if self.memory:\n", " self.memory.add(x)\n", " \n", diff --git a/src/agentscope/agents/agent.py b/src/agentscope/agents/agent.py index 7ae48a5b7..a7a25485e 100644 --- a/src/agentscope/agents/agent.py +++ b/src/agentscope/agents/agent.py @@ -252,15 +252,16 @@ def register_agent_class(cls, agent_class: Type[AgentBase]) -> None: else: cls._registry[agent_class_name] = agent_class - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Define the actions taken by this agent. Args: - x (`dict`, defaults to `None`): - Dialog history and some environment information + x (`Optional[Union[Msg, Sequence[Msg]]]`, defaults to `None`): + The input message(s) to the agent, which also can be omitted if + the agent doesn't need any input. Returns: - The agent's response to the input. + `Msg`: The output message generated by the agent. Note: Given that some agents are in an adversarial environment, diff --git a/src/agentscope/agents/dialog_agent.py b/src/agentscope/agents/dialog_agent.py index 1ece870b6..28f50d58a 100644 --- a/src/agentscope/agents/dialog_agent.py +++ b/src/agentscope/agents/dialog_agent.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """A general dialog agent.""" -from typing import Optional +from typing import Optional, Union, Sequence from ..message import Msg from .agent import AgentBase @@ -42,21 +42,19 @@ def __init__( memory_config=memory_config, ) - # TODO change typing from dict to MSG - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Reply function of the agent. Processes the input data, generates a prompt using the current dialogue memory and system prompt, and invokes the language model to produce a response. The response is then formatted and added to the dialogue memory. Args: - x (`dict`, defaults to `None`): - A dictionary representing the user's input to the agent. This - input is added to the dialogue memory if provided. Defaults to - None. + x (`Optional[Union[Msg, Sequence[Msg]]]`, defaults to `None`): + The input message(s) to the agent, which also can be omitted if + the agent doesn't need any input. + Returns: - A dictionary representing the message generated by the agent in - response to the user's input. + `Msg`: The output message generated by the agent. """ # record the input if needed if self.memory: diff --git a/src/agentscope/agents/dict_dialog_agent.py b/src/agentscope/agents/dict_dialog_agent.py index eb16690e0..5ce4ce065 100644 --- a/src/agentscope/agents/dict_dialog_agent.py +++ b/src/agentscope/agents/dict_dialog_agent.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """An agent that replies in a dictionary format.""" -from typing import Optional +from typing import Optional, Union, Sequence from ..message import Msg from .agent import AgentBase @@ -64,7 +64,7 @@ def set_parser(self, parser: ParserBase) -> None: """ self.parser = parser - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Reply function of the agent. Processes the input data, generates a prompt using the current dialogue memory and system prompt, and invokes the language @@ -72,14 +72,13 @@ def reply(self, x: dict = None) -> dict: and added to the dialogue memory. Args: - x (`dict`, defaults to `None`): - A dictionary representing the user's input to the agent. - This input is added to the dialogue memory if provided. + x (`Optional[Union[Msg, Sequence[Msg]]]`, defaults to `None`): + The input message(s) to the agent, which also can be omitted if + the agent doesn't need any input. + + Returns: - A dictionary representing the message generated by the agent in - response to the user's input. It contains at least a 'speak' key - with the textual response and may include other keys such as - 'agreement' if provided by the language model. + `Msg`: The output message generated by the agent. Raises: `json.decoder.JSONDecodeError`: diff --git a/src/agentscope/agents/rag_agent.py b/src/agentscope/agents/rag_agent.py index 59012b17c..63a23fdcd 100644 --- a/src/agentscope/agents/rag_agent.py +++ b/src/agentscope/agents/rag_agent.py @@ -6,7 +6,7 @@ Notice, this is a Beta version of RAG agent. """ -from typing import Any +from typing import Any, Optional, Union, Sequence from loguru import logger from agentscope.agents.agent import AgentBase @@ -82,7 +82,7 @@ def __init__( self.recent_n_mem_for_retrieve = recent_n_mem_for_retrieve self.description = kwargs.get("description", "") - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """ Reply function of the RAG agent. Processes the input data, @@ -93,13 +93,12 @@ def reply(self, x: dict = None) -> dict: response is then formatted and added to the dialogue memory. Args: - x (`dict`, defaults to `None`): - A dictionary representing the user's input to the agent. This - input is added to the memory if provided. Defaults to - None. + x (`Optional[Union[Msg, Sequence[Msg]]]`, defaults to `None`): + The input message(s) to the agent, which also can be omitted if + the agent doesn't need any input. + Returns: - A dictionary representing the message generated by the agent in - response to the user's input. + `Msg`: The output message generated by the agent. """ retrieved_docs_to_string = "" # record the input if needed @@ -118,7 +117,7 @@ def reply(self, x: dict = None) -> dict: else str(history) ) elif x is not None: - query = x["content"] + query = x.content else: query = "" diff --git a/src/agentscope/agents/react_agent.py b/src/agentscope/agents/react_agent.py index cdc81788b..cf0522e70 100644 --- a/src/agentscope/agents/react_agent.py +++ b/src/agentscope/agents/react_agent.py @@ -3,7 +3,7 @@ and act iteratively to solve problems. More details can be found in the paper https://arxiv.org/abs/2210.03629. """ -from typing import Any +from typing import Any, Optional, Union, Sequence from loguru import logger @@ -140,7 +140,7 @@ def __init__( keys_to_content=True if self.verbose else "speak", ) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """The reply function that achieves the ReAct algorithm. The more details please refer to https://arxiv.org/abs/2210.03629""" diff --git a/src/agentscope/agents/rpc_agent.py b/src/agentscope/agents/rpc_agent.py index ec6cc39ba..b175fa781 100644 --- a/src/agentscope/agents/rpc_agent.py +++ b/src/agentscope/agents/rpc_agent.py @@ -6,6 +6,7 @@ from agentscope.message import ( PlaceholderMessage, serialize, + Msg, ) from agentscope.rpc import RpcAgentClient from agentscope.server.launcher import RpcAgentServerLauncher @@ -105,7 +106,7 @@ def _launch_server(self) -> None: ) self.client.create_agent(self.agent_configs) - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: if self.client is None: self._launch_server() return PlaceholderMessage( diff --git a/src/agentscope/agents/text_to_image_agent.py b/src/agentscope/agents/text_to_image_agent.py index f31fb55df..00519a404 100644 --- a/src/agentscope/agents/text_to_image_agent.py +++ b/src/agentscope/agents/text_to_image_agent.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- """An agent that convert text to image.""" -from typing import Optional +from typing import Optional, Union, Sequence + +from loguru import logger from .agent import AgentBase from ..message import Msg @@ -42,7 +44,12 @@ def __init__( memory_config=memory_config, ) - def reply(self, x: dict = None) -> dict: + logger.warning( + "The `TextToImageAgent` will be deprecated in v0.0.6, " + "please use `text_to_image` service and `ReActAgent` instead.", + ) + + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: if self.memory: self.memory.add(x) if x is None: @@ -50,13 +57,16 @@ def reply(self, x: dict = None) -> dict: if self.memory and self.memory.size() > 0: x = self.memory.get_memory()[-1] else: - # if no message find, just return None - return {} + return Msg( + self.name, + content="Please provide a text prompt to generate image.", + role="assistant", + ) image_urls = self.model(x.content).image_urls # TODO: optimize the construction of content msg = Msg( self.name, - content="This is the generated image ", + content="This is the generated image", role="assistant", url=image_urls, ) diff --git a/src/agentscope/agents/user_agent.py b/src/agentscope/agents/user_agent.py index c18889229..b76cf28d5 100644 --- a/src/agentscope/agents/user_agent.py +++ b/src/agentscope/agents/user_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """User Agent class""" import time -from typing import Union +from typing import Union, Sequence from typing import Optional from loguru import logger @@ -33,10 +33,10 @@ def __init__(self, name: str = "User", require_url: bool = False) -> None: def reply( self, - x: dict = None, + x: Optional[Union[Msg, Sequence[Msg]]] = None, required_keys: Optional[Union[list[str], str]] = None, timeout: Optional[int] = None, - ) -> dict: + ) -> Msg: """ Processes the input provided by the user and stores it in memory, potentially formatting it with additional provided details. @@ -47,9 +47,9 @@ def reply( added to the object's memory. Arguments: - x (`dict`, defaults to `None`): - A dictionary containing initial data to be added to memory. - Defaults to None. + x (`Optional[Union[Msg, Sequence[Msg]]]`, defaults to `None`): + The input message(s) to the agent, which also can be omitted if + the agent doesn't need any input. required_keys \ (`Optional[Union[list[str], str]]`, defaults to `None`): Strings that requires user to input, which will be used as @@ -59,9 +59,7 @@ def reply( for no limit. Returns: - `dict`: A dictionary representing the message object that contains - the user's input and any additional details. This is also - stored in the object's memory. + `Msg`: The output message generated by the agent. """ if self.memory: self.memory.add(x) @@ -149,4 +147,4 @@ def speak( f"object, got {type(content)} instead.", ) - logger.chat(msg) + logger.chat(msg, disable_gradio=True) diff --git a/src/agentscope/logging.py b/src/agentscope/logging.py index 61a89e43d..eff7f21b4 100644 --- a/src/agentscope/logging.py +++ b/src/agentscope/logging.py @@ -73,7 +73,7 @@ def _get_speaker_color(speaker: str) -> tuple[str, str]: def _chat( message: dict, *args: Any, - disable_studio: bool = False, + disable_gradio: bool = False, **kwargs: Any, ) -> None: """ @@ -142,15 +142,15 @@ def _chat( **kwargs, ) - if hasattr(thread_local_data, "uid") and not disable_studio: - log_studio(message, thread_local_data.uid, **kwargs) + if hasattr(thread_local_data, "uid") and not disable_gradio: + log_gradio(message, thread_local_data.uid, **kwargs) return logger.log(LEVEL_DISPLAY_MSG, message, *args, **kwargs) logger.log(LEVEL_SAVE_LOG, message, *args, **kwargs) -def log_studio(message: dict, uid: str, **kwargs: Any) -> None: +def log_gradio(message: dict, uid: str, **kwargs: Any) -> None: """Send chat message to studio. Args: diff --git a/src/agentscope/prompt/_prompt_comparer.py b/src/agentscope/prompt/_prompt_comparer.py index c1d6c4dce..bc382eae2 100644 --- a/src/agentscope/prompt/_prompt_comparer.py +++ b/src/agentscope/prompt/_prompt_comparer.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """The Abtest module to show how different system prompt performs""" -from typing import List +from typing import List, Optional, Union, Sequence from loguru import logger from agentscope.models import load_model_by_config_name @@ -41,7 +41,7 @@ def enable_display(self) -> None: """Enable the display of the output message.""" self.display = True - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Reply the message with the given system prompt.""" self.memory.add(x) diff --git a/src/agentscope/studio/static/css/workstation.css b/src/agentscope/studio/static/css/workstation.css index 9ad7ac91a..b7fe4f555 100644 --- a/src/agentscope/studio/static/css/workstation.css +++ b/src/agentscope/studio/static/css/workstation.css @@ -663,10 +663,10 @@ pre[class*="language-"] { .new-action { cursor: pointer; padding: 8px; - margin-left: 10px; + margin-left: auto; border: none; border-radius: 20px; - background-color: #007bff; + background-color: var(--main-color); color: white; transition: all 0.3s ease; font-size: 14px; @@ -682,12 +682,12 @@ pre[class*="language-"] { .new-action:hover, .new-action:focus { width: auto; padding: 8px 16px; - background-color: #0056b3; + background-color: var(--main-color); box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2); } .new-action:active { - background-color: #00488a; + background-color: var(--main-color); box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); } @@ -724,7 +724,7 @@ pre[class*="language-"] { letter-spacing: 0.05em; line-height: 1.6; color: #333; - background-color: #f8f9fa; + background-color: rgba(173, 216, 230, 0.5); box-shadow: 5px 5px 15px rgba(0, 0, 0, 0.1); cursor: pointer; } @@ -758,14 +758,14 @@ pre[class*="language-"] { margin: 4px; border: none; border-radius: 4px; - background-color: #007bff; + background-color: var(--main-color); color: white; cursor: pointer; font-size: 16px; } #import-prev:hover, #import-next:hover, #import-skip:hover, #import-quit:hover { - background-color: #0056b3; + background-color: var(--main-color); transform: translateY(-2px); } diff --git a/src/agentscope/studio/static/js/workstation.js b/src/agentscope/studio/static/js/workstation.js index 88d71bfc1..e5c303eda 100644 --- a/src/agentscope/studio/static/js/workstation.js +++ b/src/agentscope/studio/static/js/workstation.js @@ -1500,7 +1500,7 @@ function showExportPyPopup() { '

Save as main.py
' + 'Then run the following command in your terminal:
' + '

python main.py

' + - 'or
as_studio main.py

' + + 'or
as_gradio main.py

' + '
' +
                             data.py_code +
                             '
', @@ -1863,6 +1863,7 @@ function importExample(index) { editor.import(dataToImport); Object.keys(dataToImport.drawflow.Home.data).forEach((nodeId) => { setupNodeListeners(nodeId); + setupTextInputListeners(nodeId); const nodeElement = document.getElementById(`node-${nodeId}`); if (nodeElement) { const copyButton = nodeElement.querySelector('.button.copy-button'); @@ -1879,11 +1880,13 @@ function importExample(index) { function importExample_step(index) { fetchExample(index, data => { const dataToImportStep = data.json; - clearModuleSelected(); - descriptionStep = ["Readme", "Model", "UserAgent", - "DialogAgent"]; - initializeImport(dataToImportStep); - }) + addHtmlAndReplacePlaceHolderBeforeImport(dataToImportStep).then(() => { + clearModuleSelected(); + descriptionStep = ["Readme", "Model", "UserAgent", + "DialogAgent"]; + initializeImport(dataToImportStep); + }) + }); } @@ -1907,13 +1910,13 @@ function createElement(tag, id, html = '', parent = document.body) { function initializeImport(data) { - ['buttons-container', 'buttons-container-html'].forEach(cls => { + ['menu-btn', 'menu-btn-svg'].forEach(cls => { let containers = document.getElementsByClassName(cls); Array.from(containers).forEach(container => container.style.display = 'none'); }); createElement('div', 'left-sidebar-blur', '', document.body).style.cssText = ` - position: fixed; top: 67px; left: 0; bottom: 0; width: 250px; + position: fixed; top: 60px; left: 0; bottom: 0; width: 250px; background: rgba(128, 128, 128, 0.7); filter: blur(2px); z-index: 1000; cursor: not-allowed; `; @@ -1926,19 +1929,19 @@ function initializeImport(data) { const importButtonsDiv = document.getElementById('import-buttons'); createElement('div', 'step-info', '', importButtonsDiv); createElement('button', 'import-prev', - ' {{_("Previous")}}', + ' Previous', importButtonsDiv).onclick = importPreviousComponent; createElement('button', 'import-next', - ' {{_("Next")}}', + ' Next', importButtonsDiv).onclick = importNextComponent; createElement('button', 'import-skip', - ' {{_("Skip")}}', + ' Skip', importButtonsDiv).onclick = importSkipComponent; createElement('button', 'import-quit', - ' {{_("Quit")}}', + ' Quit', importButtonsDiv).onclick = importQuitComponent; createElement('div', 'step-warning', - '{{_("Caution: You are currently in the tutorial mode where modifications are restricted.
Please click Quit to exit and start creating your custom multi-agent applications.")}}', document.body); + 'Caution: You are currently in the tutorial mode where modifications are restricted.
Please click Quit to exit and start creating your custom multi-agent applications.', document.body); accumulatedImportData = {}; currentImportIndex = 0; @@ -1962,6 +1965,7 @@ function importPreviousComponent() { function importNextComponent() { const nodeId = importQueue[currentImportIndex]; accumulatedImportData[nodeId] = dataToImportStep.drawflow.Home.data[nodeId]; + editor.import({drawflow: {Home: {data: accumulatedImportData}}}); currentImportIndex++; updateStepInfo(); @@ -1980,7 +1984,7 @@ function importSkipComponent() { function importQuitComponent() { clearModuleSelected(); - ['buttons-container', 'buttons-container-html'].forEach(cls => { + ['menu-btn', 'menu-btn-svg'].forEach(cls => { let containers = document.getElementsByClassName(cls); Array.from(containers).forEach(container => container.style.display = ''); }); @@ -1991,7 +1995,7 @@ function updateStepInfo() { let stepInfoDiv = document.getElementById('step-info'); if (stepInfoDiv && currentImportIndex > 0) { stepInfoDiv.innerHTML = - `{{_("Current Step")}} (${currentImportIndex}/${importQueue.length})
${descriptionStep[currentImportIndex - 1]}`; + `Current Step (${currentImportIndex}/${importQueue.length})
${descriptionStep[currentImportIndex - 1]}`; } else if (stepInfoDiv) { stepInfoDiv.innerHTML = 'No steps to display.'; } diff --git a/src/agentscope/studio/templates/workstation.html b/src/agentscope/studio/templates/workstation.html index 1bc264937..741a7848c 100644 --- a/src/agentscope/studio/templates/workstation.html +++ b/src/agentscope/studio/templates/workstation.html @@ -51,15 +51,35 @@ diff --git a/tests/msghub_test.py b/tests/msghub_test.py index 75f61dccc..9859c364e 100644 --- a/tests/msghub_test.py +++ b/tests/msghub_test.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """ Unit test for msghub.""" import unittest -from typing import Optional +from typing import Optional, Union, Sequence from agentscope.agents import AgentBase from agentscope import msghub @@ -11,7 +11,7 @@ class TestAgent(AgentBase): """Test agent class for msghub.""" - def reply(self, x: Optional[dict] = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Reply function for agent.""" if x is not None: self.memory.add(x) diff --git a/tests/rpc_agent_test.py b/tests/rpc_agent_test.py index 321087a44..7462dd388 100644 --- a/tests/rpc_agent_test.py +++ b/tests/rpc_agent_test.py @@ -6,6 +6,8 @@ import time import os import shutil +from typing import Optional, Union, Sequence + from loguru import logger import agentscope @@ -26,7 +28,7 @@ def __init__(self, **kwargs) -> None: # type: ignore[no-untyped-def] super().__init__(**kwargs) self.id = 0 - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Response after 2s""" x.id = self.id self.id += 1 @@ -37,7 +39,7 @@ def reply(self, x: dict = None) -> dict: class DemoRpcAgentAdd(AgentBase): """A demo Rpc agent for test usage""" - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """add the value, wait 1s""" x.content["value"] += 1 time.sleep(1) @@ -47,7 +49,7 @@ def reply(self, x: dict = None) -> dict: class DemoLocalAgentAdd(AgentBase): """A demo local agent for test usage""" - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """add the value, wait 1s""" x.content["value"] += 1 time.sleep(1) @@ -57,7 +59,7 @@ def reply(self, x: dict = None) -> dict: class DemoRpcAgentWithMemory(AgentBase): """A demo Rpc agent that count its memory""" - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: self.memory.add(x) msg = Msg( name=self.name, @@ -72,7 +74,7 @@ def reply(self, x: dict = None) -> dict: class DemoRpcAgentWithMonitor(AgentBase): """A demo Rpc agent that use monitor""" - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: monitor = MonitorFactory.get_monitor() try: monitor.update({"msg_num": 1}) @@ -103,7 +105,7 @@ def __init__(self, name: str, value: int) -> None: super().__init__(name) self.value = value - def reply(self, _: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: time.sleep(1) return Msg( name=self.name, @@ -126,7 +128,7 @@ def __init__( super().__init__(name, to_dist=to_dist) self.agents = agents - def reply(self, _: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: result = [] stime = time.time() for agent in self.agents: @@ -148,7 +150,7 @@ def reply(self, _: dict = None) -> dict: class DemoErrorAgent(AgentBase): """A demo Rpc agent that raise Error""" - def reply(self, x: dict = None) -> dict: + def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: raise RuntimeError("Demo Error")