diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 9e4b9ba63..7c7d802ad 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -50,14 +50,14 @@ jobs: run: | pip install -q -e .[full] pip install coverage + - name: CPP gRPC Server Tests + if: ${{ matrix.os != 'windows-latest' }} + run: | + redis-server &> redis.log & + AGENTSCOPE_ENABLE_PY_LOGGER=True AGENTSCOPE_USE_CPP_SERVER=YES AGENTSCOPE_NUM_WORKERS=5 python -m unittest tests/rpc_agent_test.py - name: Run tests with coverage run: | coverage run tests/run.py - name: Generate coverage report run: | coverage report -m - - name: CPP gRPC Server Tests - if: ${{ matrix.os != 'windows-latest' }} - run: | - redis-server &> redis.log & - AGENTSCOPE_ENABLE_PY_LOGGER=True AGENTSCOPE_USE_CPP_SERVER=YES AGENTSCOPE_NUM_WORKERS=5 python -m unittest tests/rpc_agent_test.py diff --git a/README.md b/README.md index a5fc5eb90..f15df7418 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -English | [**中文**](https://github.com/modelscope/agentscope/blob/main/README_ZH.md) +English | [**中文**](https://github.com/modelscope/agentscope/blob/main/README_ZH.md) | [**日本語**](https://github.com/modelscope/agentscope/blob/main/README_JA.md) # AgentScope diff --git a/README_JA.md b/README_JA.md new file mode 100644 index 000000000..0f1fd2c78 --- /dev/null +++ b/README_JA.md @@ -0,0 +1,396 @@ +[English](https://github.com/modelscope/agentscope/blob/main/README.md) | [中文](https://github.com/modelscope/agentscope/blob/main/README_ZH.md) | 日本語 + +# AgentScope + +

+agentscope-logo +

+ +LLMを活用したマルチエージェントアプリケーションをより簡単に構築する。 + +[![](https://img.shields.io/badge/cs.MA-2402.14034-B31C1C?logo=arxiv&logoColor=B31C1C)](https://arxiv.org/abs/2402.14034) +[![](https://img.shields.io/badge/python-3.9+-blue)](https://pypi.org/project/agentscope/) +[![](https://img.shields.io/badge/pypi-v0.1.0-blue?logo=pypi)](https://pypi.org/project/agentscope/) +[![](https://img.shields.io/badge/Docs-English%7C%E4%B8%AD%E6%96%87-blue?logo=markdown)](https://modelscope.github.io/agentscope/#welcome-to-agentscope-tutorial-hub) +[![](https://img.shields.io/badge/Docs-API_Reference-blue?logo=markdown)](https://modelscope.github.io/agentscope/) +[![](https://img.shields.io/badge/ModelScope-Demos-4e29ff.svg?logo=data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjI0IDEyMS4zMyIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCTxwYXRoIGQ9Im0wIDQ3Ljg0aDI1LjY1djI1LjY1aC0yNS42NXoiIGZpbGw9IiM2MjRhZmYiIC8+Cgk8cGF0aCBkPSJtOTkuMTQgNzMuNDloMjUuNjV2MjUuNjVoLTI1LjY1eiIgZmlsbD0iIzYyNGFmZiIgLz4KCTxwYXRoIGQ9Im0xNzYuMDkgOTkuMTRoLTI1LjY1djIyLjE5aDQ3Ljg0di00Ny44NGgtMjIuMTl6IiBmaWxsPSIjNjI0YWZmIiAvPgoJPHBhdGggZD0ibTEyNC43OSA0Ny44NGgyNS42NXYyNS42NWgtMjUuNjV6IiBmaWxsPSIjMzZjZmQxIiAvPgoJPHBhdGggZD0ibTAgMjIuMTloMjUuNjV2MjUuNjVoLTI1LjY1eiIgZmlsbD0iIzM2Y2ZkMSIgLz4KCTxwYXRoIGQ9Im0xOTguMjggNDcuODRoMjUuNjV2MjUuNjVoLTI1LjY1eiIgZmlsbD0iIzYyNGFmZiIgLz4KCTxwYXRoIGQ9Im0xOTguMjggMjIuMTloMjUuNjV2MjUuNjVoLTI1LjY1eiIgZmlsbD0iIzM2Y2ZkMSIgLz4KCTxwYXRoIGQ9Im0xNTAuNDQgMHYyMi4xOWgyNS42NXYyNS42NWgyMi4xOXYtNDcuODR6IiBmaWxsPSIjNjI0YWZmIiAvPgoJPHBhdGggZD0ibTczLjQ5IDQ3Ljg0aDI1LjY1djI1LjY1aC0yNS42NXoiIGZpbGw9IiMzNmNmZDEiIC8+Cgk8cGF0aCBkPSJtNDcuODQgMjIuMTloMjUuNjV2LTIyLjE5aC00Ny44NHY0Ny44NGgyMi4xOXoiIGZpbGw9IiM2MjRhZmYiIC8+Cgk8cGF0aCBkPSJtNDcuODQgNzMuNDloLTIyLjE5djQ3Ljg0aDQ3Ljg0di0yMi4xOWgtMjUuNjV6IiBmaWxsPSIjNjI0YWZmIiAvPgo8L3N2Zz4K)](https://modelscope.cn/studios?name=agentscope&page=1&sort=latest) + +[![](https://img.shields.io/badge/Drag_and_drop_UI-WorkStation-blue?logo=html5&logoColor=green&color=dark-green)](https://agentscope.io/) +[![](https://img.shields.io/badge/license-Apache--2.0-black)](./LICENSE) +[![](://img.shields.io/badge/Contribute-Welcome-green)](https://modelscope.github.io/agentscope/tutorial/contribute.html) + +- 私たちの仕事が役に立った場合は、[論文](https://arxiv.org/abs/2402.14034)を引用してください。 + +- [agentscope.io](https://agentscope.io/)にアクセスして、ドラッグアンドドロップでマルチエージェントアプリケーションを構築してください。 + +
+ + agentscope-workstation + +
+ +- 私たちのコミュニティに参加してください + +| [Discord](https://discord.gg/eYMpfnkG8h) | DingTalk | +|---------|----------| +| | | + +---- + +## ニュース + +- new**[2024-09-06]** AgentScopeバージョン0.1.0がリリースされました。 + +- new**[2024-09-03]** AgentScopeは**Webブラウザ制御**をサポートしています。詳細については、[例](https://github.com/modelscope/agentscope/tree/main/examples/conversation_with_web_browser_agent)を参照してください。 + +
+ +
+ +- new**[2024-07-18]** AgentScopeはストリーミングモードをサポートしています。詳細については、[チュートリアル](https://modelscope.github.io/agentscope/en/tutorial/203-stream.html)および[ストリーミングモードでの会話の例](https://github.com/modelscope/agentscope/tree/main/examples/conversation_in_stream_mode)を参照してください。 + +
+agentscope-logo +agentscope-logo +
+ +- new**[2024-07-15]** AgentScopeはMixture-of-Agentsアルゴリズムを実装しました。詳細については、[MoAの例](https://github.com/modelscope/agentscope/blob/main/examples/conversation_mixture_of_agents)を参照してください。 + +- **[2024-06-14]** 新しいプロンプトチューニングモジュールがAgentScopeに追加され、開発者がエージェントのシステムプロンプトを生成および最適化するのに役立ちます。詳細については、[チュートリアル](https://modelscope.github.io/agentscope/en/tutorial/209-prompt_opt.html)を参照してください。 + +- **[2024-06-11]** RAG機能が**AgentScope**に追加されました。エージェントに外部知識を装備するための[**AgentScopeのRAGの簡単な紹介**](https://modelscope.github.io/agentscope/en/tutorial/210-rag.html)を参照してください。 + +- **[2024-06-09]** **AgentScope** v0.0.5がリリースされました。この新しいバージョンでは、[**AgentScope Workstation**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)(オンラインバージョンは[agentscope.io](https://agentscope.io)で実行されています)がリファクタリングされた[**AgentScope Studio**](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)とともにオープンソース化されました。 + +
+完全なニュース + +- **[2024-05-24]** **AgentScope Workstation**に関連する機能がまもなくオープンソース化されることをお知らせします。オンラインウェブサイトサービスは一時的にオフラインになっています。オンラインウェブサイトサービスはアップグレードされ、まもなく再開されます。お楽しみに... + +- **[2024-05-15]** **フォーマットされた応答**のための新しい**パーサーモジュール**がAgentScopeに追加されました。詳細については、[チュートリアル](https://modelscope.github.io/agentscope/en/tutorial/203-parser.html)を参照してください。[`DictDialogAgent`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/agents/dict_dialog_agent.py)および[人狼ゲーム](https://github.com/modelscope/agentscope/tree/main/examples/game_werewolf)の例も同時に更新されました。 + +- **[2024-05-14]** 親愛なるAgentScopeユーザーの皆様、**AgentScope Workstation & Copilot**のユーザーエクスペリエンスに関するアンケートを実施しています。現在、AgentScopeのドラッグアンドドロップマルチエージェントアプリケーション開発とCopilotのエクスペリエンスを改善するために、貴重なフィードバックが必要です。フィードバックは貴重であり、アンケートには約3〜5分かかります。アンケート調査に参加するには、[URL](https://survey.aliyun.com/apps/zhiliao/vgpTppn22)をクリックしてください。ご支援とご協力に感謝します。 + +- **[2024-05-14]** AgentScopeは**gpt-4o**および他のOpenAIビジョンモデルをサポートしています。gpt-4oを[モデル構成](./examples/model_configs_template/openai_chat_template.json)と新しい例[Conversation with gpt-4o](./examples/conversation_with_gpt-4o)で試してください。 + +- **[2024-04-30]** **AgentScope** v0.0.4がリリースされました。 + +- **[2024-04-27]** [AgentScope Workstation](https://agentscope.io/)がオンラインになりました。*ドラッグアンドドロッププラットフォーム*を使用してマルチエージェントアプリケーションを構築し、*copilot*にAgentScopeに関する質問をすることができます。 + +- **[2024-04-19]** AgentScopeはLlama3をサポートしています。クイックセットアップのための[スクリプト](https://github.com/modelscope/agentscope/blob/main/examples/model_llama3)と[モデル構成](https://github.com/modelscope/agentscope/blob/main/examples/model_llama3)を提供しています。例でllama3を試してみてください。 + +- **[2024-04-06]** **AgentScope** v0.0.3がリリースされました。 + +- **[2024-04-06]** 新しい例[五目並べ](https://github.com/modelscope/agentscope/blob/main/examples/game_gomoku)、[ReActエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_react_agent)、[RAGエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_RAG_agents)、および[分散並列最適化](https://github.com/modelscope/agentscope/blob/main/examples/distributed_parallel_optimization)が利用可能になりました。 + +- **[2024-03-19]** **AgentScope** v0.0.2がリリースされました。この新しいバージョンでは、AgentScopeは[ollama](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#supported-models)(ローカルCPU推論エンジン)、[DashScope](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#supported-models)およびGoogle[Gemini](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#supported-models)APIをサポートしています。 + +- **[2024-03-19]** 新しい例「[メンション付きの自律会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_mentions)」および「[LangChainライブラリを使用した基本的な会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_langchain)」が利用可能になりました。 + +- **[2024-03-19]** AgentScopeの[中国語チュートリアル](https://modelscope.github.io/agentscope/zh_CN/index.html)がオンラインになりました。 + +- **[2024-02-27]** **AgentScope v0.0.1**がリリースされました。これは[PyPI](https://pypi.org/project/agentscope/)でも利用可能です。 + +- **[2024-02-14]** 私たちは論文「[AgentScope: A Flexible yet Robust Multi-Agent Platform](https://arxiv.org/abs/2402.14034)」を[arXiv](https://arxiv.org/abs/2402.14034)に発表しました。 + +
+ +--- + +## AgentScopeとは? + +AgentScopeは、開発者が大規模モデルを使用してマルチエージェントアプリケーションを構築する能力を提供する革新的なマルチエージェントプラットフォームです。 +それは3つの高レベルの機能を備えています: + +- 🤝 **使いやすさ**:開発者向けに設計されており、[豊富なコンポーネント](https://modelscope.github.io/agentscope/en/tutorial/204-service.html#)、[包括的なドキュメント](https://modelscope.github.io/agentscope/en/index.html)、および広範な互換性を提供します。さらに、[AgentScope Workstation](https://agentscope.io/)は、初心者向けの*ドラッグアンドドロッププログラミングプラットフォーム*と*copilot*を提供します。 + +- ✅ **高い堅牢性**:カスタマイズ可能なフォールトトレランス制御と再試行メカニズムをサポートし、アプリケーションの安定性を向上させます。 + +- 🚀 **アクターベースの分散**:集中型プログラミング方式で分散マルチエージェントアプリケーションを構築し、開発を簡素化します。 + +**サポートされているモデルライブラリ** + +AgentScopeは、ローカルモデルサービスとサードパーティのモデルAPIの両方をサポートするための`ModelWrapper`のリストを提供します。 + +| API | タスク | モデルラッパー | 構成 | サポートされているモデルの一部 | +|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------| +| OpenAI API | チャット | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... | +| | 埋め込み | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... | +| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 | +| DashScope API | チャット | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... | +| | 画像生成 | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 | +| | テキスト埋め込み | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... | +| | マルチモーダル | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat | +| Gemini API | チャット | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... | +| | 埋め込み | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... | +| ZhipuAI API | チャット | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... | +| | 埋め込み | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... | +| ollama | チャット | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... | +| | 埋め込み | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... | +| | 生成 | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... | +| LiteLLM API | チャット | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [litellmがサポートするモデル](https://docs.litellm.ai/docs/)... | +| Yi API | チャット | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... | +| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - | + +**サポートされているローカルモデルのデプロイ** + +AgentScopeは、次のライブラリを使用してローカルモデルサービスを迅速にデプロイするためのサポートを提供します。 + +- [ollama (CPU推論)](https://github.com/modelscope/agentscope/blob/main/scripts/README.md#ollama) +- [Flask + Transformers](https://github.com/modelscope/agentscope/blob/main/scripts/README.md#with-transformers-library) +- [Flask + ModelScope](https://github.com/modelscope/agentscope/blob/main/scripts/README.md#with-modelscope-library) +- [FastChat](https://github.com/modelscope/agentscope/blob/main/scripts/README.md#fastchat) +- [vllm](https://github.com/modelscope/agentscope/blob/main/scripts/README.md#vllm) + +**サポートされているサービス** + +- ウェブ検索 +- データクエリ +- 検索 +- コード実行 +- ファイル操作 +- テキスト処理 +- マルチモーダル生成 +- Wikipedia検索と検索 +- TripAdvisor検索 +- ウェブブラウザ制御 + +**例のアプリケーション** + +- モデル + - [AgentScopeでLlama3を使用する](https://github.com/modelscope/agentscope/blob/main/examples/model_llama3) + +- 会話 + - [基本的な会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_basic) + - [メンション付きの自律会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_mentions) + - [自己組織化会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_self_organizing) + - [LangChainライブラリを使用した基本的な会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_langchain) + - [ReActエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_react_agent) + - [自然言語でSQLをクエリする会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_nl2sql/) + - [RAGエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_RAG_agents) + - [gpt-4oとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_gpt-4o) + - [ソフトウェアエンジニアリングエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_swe-agent/) + - [カスタマイズされたツールとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_customized_services/) + - new[Mixture of Agentsアルゴリズム](https://github.com/modelscope/agentscope/blob/main/examples/conversation_mixture_of_agents/) + - new[ストリーミングモードでの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_in_stream_mode/) + - new[CodeActエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_codeact_agent/) + - new[Routerエージェントとの会話](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_router_agent/) + +- ゲーム + - [五目並べ](https://github.com/modelscope/agentscope/blob/main/examples/game_gomoku) + - [人狼](https://github.com/modelscope/agentscope/blob/main/examples/game_werewolf) + +- 分散 + - [分散会話](https://github.com/modelscope/agentscope/blob/main/examples/distributed_conversation) + - [分散ディベート](https://github.com/modelscope/agentscope/blob/main/examples/distributed_debate) + - [分散並列最適化](https://github.com/modelscope/agentscope/blob/main/examples/distributed_parallel_optimization) + - [分散大規模シミュレーション](https://github.com/modelscope/agentscope/blob/main/examples/distributed_simulation) + +さらに多くのモデル、サービス、および例が近日公開予定です。 + +## インストール + +AgentScopeは**Python 3.9**以上が必要です。 + +***注:このプロジェクトは現在アクティブに開発中であり、AgentScopeをソースからインストールすることをお勧めします。*** + +### ソースから + +- AgentScopeを編集モードでインストールします: + +```bash +# GitHubからソースコードを取得 +git clone https://github.com/modelscope/agentscope.git + +# パッケージを編集モードでインストール +cd agentscope +pip install -e . +``` + +### pipを使用 + +- pipからAgentScopeをインストールします: + +```bash +pip install agentscope +``` + +### 追加の依存関係 + +さまざまなデプロイメントシナリオをサポートするために、AgentScopeはいくつかのオプションの依存関係を提供します。オプションの依存関係の完全なリストは、[チュートリアル](https://doc.agentscope.io/en/tutorial/102-installation.html)を参照してください。分散モードを例にとると、次のように依存関係をインストールできます: + +#### Windowsの場合 + +```bash +# ソースから +pip install -e .[distribute] +# pypiから +pip install agentscope[distribute] +``` + +#### Mac & Linuxの場合 + +```bash +# ソースから +pip install -e .\[distribute\] +# pypiから +pip install agentscope\[distribute\] +``` + +## クイックスタート + +### 構成 + +AgentScopeでは、モデルのデプロイメントと呼び出しは`ModelWrapper`によってデカップリングされています。 + +これらのモデルラッパーを使用するには、次のようなモデル構成ファイルを準備する必要があります。 + +```python +model_config = { + # 構成の識別子と使用されるモデルラッパー + "config_name": "{your_config_name}", # 構成を識別する名前 + "model_type": "{model_type}", # モデルラッパーを識別するタイプ + + # モデルラッパーを初期化するための詳細なパラメータ + # ... +} +``` + +OpenAI Chat APIを例にとると、モデル構成は次のようになります: + +```python +openai_model_config = { + "config_name": "my_openai_config", # 構成を識別する名前 + "model_type": "openai_chat", # モデルラッパーを識別するタイプ + + # モデルラッパーを初期化するための詳細なパラメータ + "model_name": "gpt-4", # OpenAI APIで使用されるモデル名(例:gpt-4、gpt-3.5-turboなど) + "api_key": "xxx", # OpenAI APIのAPIキー。設定されていない場合、環境変数OPENAI_API_KEYが使用されます。 + "organization": "xxx", # OpenAI APIの組織。設定されていない場合、環境変数OPENAI_ORGANIZATIONが使用されます。 +} +``` + +ローカルモデルサービスのセットアップ方法やモデル構成の準備方法の詳細については、[チュートリアル](https://modelscope.github.io/agentscope/index.html#welcome-to-agentscope-tutorial-hub)を参照してください。 + +### エージェントの作成 + +次のように組み込みのユーザーエージェントとアシスタントエージェントを作成します。 + +```python +from agentscope.agents import DialogAgent, UserAgent +import agentscope + +# モデル構成を読み込む +agentscope.init(model_configs="./model_configs.json") + +# ダイアログエージェントとユーザーエージェントを作成する +dialog_agent = DialogAgent(name="assistant", + model_config_name="my_openai_config") +user_agent = UserAgent() +``` + +### 会話の構築 + +AgentScopeでは、**メッセージ**はエージェント間の橋渡しであり、**dict**であり、2つの必要なフィールド`name`と`content`、およびローカルファイル(画像、ビデオ、またはオーディオ)またはウェブサイトへのオプションのフィールド`url`を含みます。 + +```python +from agentscope.message import Msg + +x = Msg(name="Alice", content="Hi!") +x = Msg("Bob", "What about this picture I took?", url="/path/to/picture.jpg") +``` + +次のコードを使用して、2つのエージェント(例:dialog_agentとuser_agent)間の会話を開始します: + +```python +x = None +while True: + x = dialog_agent(x) + x = user_agent(x) + if x.content == "exit": # ユーザーが"exit"と入力して会話を終了する + break +``` + +### AgentScope Studio + +AgentScopeは、テキスト、画像、オーディオ、ビデオなどのマルチモーダル出力をフロントエンドで表示できる使いやすいランタイムユーザーインターフェースを提供します。 + +詳細については、[チュートリアル](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html)を参照してください。 + +
+agentscope-logo +
+ +## チュートリアル + +- [AgentScopeについて](https://modelscope.github.io/agentscope/zh_CN/tutorial/101-agentscope.html) +- [インストール](https://modelscope.github.io/agentscope/zh_CN/tutorial/102-installation.html) +- [クイックスタート](https://modelscope.github.io/agentscope/zh_CN/tutorial/103-example.html) +- [モデル](https://modelscope.github.io/agentscope/zh_CN/tutorial/203-model.html) +- [プロンプトエンジニアリング](https://modelscope.github.io/agentscope/zh_CN/tutorial/206-prompt.html) +- [エージェント](https://modelscope.github.io/agentscope/zh_CN/tutorial/201-agent.html) +- [メモリ](https://modelscope.github.io/agentscope/zh_CN/tutorial/205-memory.html) +- [応答パーサー](https://modelscope.github.io/agentscope/zh_CN/tutorial/203-parser.html) +- [ツール](https://modelscope.github.io/agentscope/zh_CN/tutorial/204-service.html) +- [パイプラインとMsgHub](https://modelscope.github.io/agentscope/zh_CN/tutorial/202-pipeline.html) +- [分散](https://modelscope.github.io/agentscope/zh_CN/tutorial/208-distribute.html) +- [AgentScope Studio](https://modelscope.github.io/agentscope/zh_CN/tutorial/209-gui.html) +- [ログ](https://modelscope.github.io/agentscope/zh_CN/tutorial/105-logging.html) +- [モニター](https://modelscope.github.io/agentscope/zh_CN/tutorial/207-monitor.html) +- [例:人狼ゲーム](https://modelscope.github.io/agentscope/zh_CN/tutorial/104-usecase.html) + +## ライセンス + +AgentScopeはApache License 2.0の下でリリースされています。 + +## 貢献 + +貢献は常に歓迎されます! + +公式バージョンと比較して、追加のプリコミットフックを使用してチェックを実行する開発者バージョンを提供します: + +```bash +# Windowsの場合 +pip install -e .[dev] +# Macの場合 +pip install -e .\[dev\] + +# プリコミットフックをインストール +pre-commit install +``` + +詳細については、[貢献ガイド](https://modelscope.github.io/agentscope/en/tutorial/302-contribute.html)を参照してください。 + +## 出版物 + +私たちの仕事があなたの研究やアプリケーションに役立つ場合は、私たちの論文を引用してください。 + +1. [AgentScope: A Flexible yet Robust Multi-Agent Platform](https://arxiv.org/abs/2402.14034) + + ``` + @article{agentscope, + author = {Dawei Gao and + Zitao Li and + Xuchen Pan and + Weirui Kuang and + Zhijian Ma and + Bingchen Qian and + Fei Wei and + Wenhao Zhang and + Yuexiang Xie and + Daoyuan Chen and + Liuyi Yao and + Hongyi Peng and + Ze Yu Zhang and + Lin Zhu and + Chen Cheng and + Hongzhu Shi and + Yaliang Li and + Bolin Ding and + Jingren Zhou} + title = {AgentScope: A Flexible yet Robust Multi-Agent Platform}, + journal = {CoRR}, + volume = {abs/2402.14034}, + year = {2024}, + } + ``` diff --git a/README_ZH.md b/README_ZH.md index 27729bb7f..550eaf407 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -1,4 +1,4 @@ -[English](https://github.com/modelscope/agentscope/blob/main/README.md) | 中文 +[English](https://github.com/modelscope/agentscope/blob/main/README.md) | 中文 | [日本語](https://github.com/modelscope/agentscope/blob/main/README_JA.md) # AgentScope diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 000000000..a5f3f9473 --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,75 @@ +# WORKSPACE +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +# http_archive( +# name = "build_bazel_rules_swift", +# urls = ["https://github.com/bazelbuild/rules_swift/archive/refs/tags/2.1.1.zip"], +# strip_prefix = "rules_swift-2.1.1", +# ) + +# 引入 gRPC +http_archive( + name = "com_github_grpc_grpc", + urls = ["https://github.com/grpc/grpc/archive/refs/tags/v1.66.0.zip"], # 替换为最新版本 + strip_prefix = "grpc-1.66.0", +) + +# 引入 Protocol Buffers +http_archive( + name = "com_google_protobuf", + urls = ["https://github.com/protocolbuffers/protobuf/archive/refs/tags/v28.2.zip"], # 替换为最新版本 + strip_prefix = "protobuf-28.2", +) + +# http_archive( +# name = "rules_swift", +# urls = ["https://github.com/bazelbuild/rules_swift/archive/refs/tags/2.1.1.zip"], +# strip_prefix = "rules_swift-2.1.1", +# ) +# load("@rules_swift//swift:defs.bzl", "swift_repositories") +# swift_repositories() + +# 引入 gRPC 相关的构建设定 +# load("@grpc//:build_defs.bzl", "grpc_repositories") +# grpc_repositories() + +http_archive( + name = "rules_python_internal", + urls = ["https://github.com/bazelbuild/rules_python/archive/refs/tags/0.36.0.zip"], + strip_prefix = "rules_python-0.36.0", +) +load("@rules_python_internal//python:repositories.bzl", "python_register_toolchains") + +python_register_toolchains( + name = "python3_12", + python_version = "3.12", +) + +# load("@python3_12//:defs.bzl", "interpreter") + +# load("@rules_python//python:pip.bzl", "pip_parse") + +# pip_parse( +# python_interpreter_target = interpreter, +# ) + + +http_archive( + name = "pybind11_bazel", + urls = ["https://github.com/pybind/pybind11_bazel/archive/refs/tags/v2.12.0.zip"], + strip_prefix = "pybind11_bazel-2.12.0", +) +http_archive( + name = "pybind11", + build_file = "@pybind11_bazel//:pybind11-BUILD.bazel", + urls = ["https://github.com/pybind/pybind11/archive/refs/tags/v2.13.6.zip"], # 适配你的版本 + strip_prefix = "pybind11-2.13.6", +) +# load("@pybind11_bazel//:python_configure.bzl", "python_configure") +# load("@pybind11_bazel//:build_defs.bzl", "pybind_extension") + + +load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") +grpc_deps() +# load("@com_google_protobuf//:protobuf.bzl", "protobuf_repositories") +# protobuf_repositories() \ No newline at end of file diff --git a/docs/sphinx_doc/zh_CN/source/index.rst b/docs/sphinx_doc/zh_CN/source/index.rst index ca674a0e1..0a30339da 100644 --- a/docs/sphinx_doc/zh_CN/source/index.rst +++ b/docs/sphinx_doc/zh_CN/source/index.rst @@ -58,6 +58,7 @@ AgentScope 文档 agentscope.service agentscope.rpc agentscope.server + agentscope.environment agentscope.web agentscope.prompt agentscope.utils \ No newline at end of file diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/206-prompt.md b/docs/sphinx_doc/zh_CN/source/tutorial/206-prompt.md index 12a70cb44..1008214ff 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/206-prompt.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/206-prompt.md @@ -11,7 +11,7 @@ AgentScope内置策略的目标是**使初学者能够顺利调用模型API , ## 构建提示面临的挑战 -在多智能体应用中,LLM通常在对话中扮演不同的角色。当使用模型的Chat API时,时长会面临以下挑战: +在多智能体应用中,LLM通常在对话中扮演不同的角色。当使用模型的Chat API时,时常会面临以下挑战: 1. 大多数Chat类型的模型API是为聊天机器人场景设计的,`role`字段只支持`"user"`和`"assistant"`,不支持`name`字段,即API本身不支持角色扮演。 diff --git a/examples/distributed_simulation/participant.py b/examples/distributed_simulation/participant.py index 06dbf6a81..3dc147a3f 100644 --- a/examples/distributed_simulation/participant.py +++ b/examples/distributed_simulation/participant.py @@ -4,7 +4,6 @@ import time import re from typing import Optional, Union, Sequence -import concurrent.futures from loguru import logger diff --git a/examples/environments/chatroom/chatroom_example.py b/examples/environments/chatroom/chatroom_example.py index 7d192654e..f552f6c17 100644 --- a/examples/environments/chatroom/chatroom_example.py +++ b/examples/environments/chatroom/chatroom_example.py @@ -59,22 +59,17 @@ def main(args: argparse.Namespace) -> None: ), role="system", ) - r = ChatRoom(name="chat", announcement=ann, to_dist=args.use_dist) + r = ChatRoom( + name="chat", + announcement=ann, + model_config_name=YOUR_MODEL_CONFIGURATION_NAME, + to_dist=args.use_dist, + ) # Setup the persona of Alice, Bob and Carol alice = ChatRoomAgent( # Game Art Designer name="Alice", - sys_prompt=r"""You are a game art designer named Alice. """ - r"""Programmer Bob and game planner Carol are your colleagues, """ - r"""and you need to collaborate with them to complete an open """ - r"""world game. Please ask appropriate question to planner or """ - r"""generate appropriate responses in this work group based on """ - r"""the following chat history. When you need to mention someone, """ - r"""you can use @ to remind them. You only need to output Alice's """ - r"""possible replies, without giving anyone else's replies or """ - r"""continuing the conversation. When the discussion is complete, """ - r"""you need to reply with a message containing 'Goodbye' to """ - r"""indicate exiting the conversation.""", + sys_prompt=r"""You are a game art designer named Alice.""", model_config_name=YOUR_MODEL_CONFIGURATION_NAME, to_dist=args.use_dist, ) @@ -82,17 +77,7 @@ def main(args: argparse.Namespace) -> None: bob = ChatRoomAgent( # Game Programmer name="Bob", - sys_prompt=r"""You are a game programmer named Bob. """ - r"""Art designer Alice and game planner Carol are your colleagues, """ - r"""and you need to collaborate with them to complete an open """ - r"""world game. Please ask appropriate questions or generate """ - r"""appropriate responses in the work group based on the following """ - r"""historical records. When you need to mention someone, you can """ - r"""use @ to remind them. You only need to output Bob's possible """ - r"""replies, without giving anyone else's replies or continuing """ - r"""the conversation. When the discussion is complete, you need """ - r"""to reply with a message containing 'Goodbye' to indicate """ - r"""exiting the conversation.""", + sys_prompt=r"""You are a game programmer named Bob.""", model_config_name=YOUR_MODEL_CONFIGURATION_NAME, to_dist=args.use_dist, ) @@ -100,16 +85,7 @@ def main(args: argparse.Namespace) -> None: carol = ChatRoomAgent( # Game Designer name="Carol", - sys_prompt=r"""You are a game planner named Carol. """ - r"""Programmer Bob and art designer Alice are your colleagues, """ - r"""and you need to guide them in developing an open world game. """ - r"""Please generate a suitable response in this work group based """ - r"""on the following chat history. When you need to mention """ - r"""someone, you can use @ to remind them. You only need to output """ - r"""Carol's possible replies, without giving anyone else's replies """ - r"""or continuing the conversation. When the discussion is """ - r"""complete, you need to reply with a message containing """ - r"""'Goodbye' to indicate exiting the conversation.""", + sys_prompt=r"""You are a game planner named Carol.""", model_config_name=YOUR_MODEL_CONFIGURATION_NAME, to_dist=args.use_dist, ) diff --git a/examples/environments/chatroom/chatroom_with_assistant_example.py b/examples/environments/chatroom/chatroom_with_assistant_example.py index 599c34947..3983a2711 100644 --- a/examples/environments/chatroom/chatroom_with_assistant_example.py +++ b/examples/environments/chatroom/chatroom_with_assistant_example.py @@ -70,12 +70,12 @@ def main(args: argparse.Namespace) -> None: bob = ChatRoomAgentWithAssistant( name="Bob", - sys_prompt=r"""You are Bob's chat room assistant and he is """ + sys_prompt=r"""You are Bob's chat room assistant and Bob is """ r"""currently unable to reply to messages. Please generate a """ - r"""suitable response based on the following chat history. """ - r"""The content you reply to must be based on the chat history. """ - r"""Please refuse to reply to questions that are beyond the scope """ - r"""of the chat history.""", + r"""suitable response based on the following chat history without """ + r"""reasoning. The content you reply to must be based on the chat """ + r"""history. Please refuse to reply to questions that are beyond """ + r"""the scope of the chat history.""", model_config_name=YOUR_MODEL_CONFIGURATION_NAME, to_dist=args.use_dist, timeout=args.timeout, @@ -176,18 +176,21 @@ def main(args: argparse.Namespace) -> None: # Setup the persona of Carol carol = ChatRoomAgent( name="Carol", - sys_prompt=r"""You are Carol, and now you need to interview Bob. """ - r"""Just ask him where he is from, which school he graduated from, """ - r"""his profession, and his hobbies. At the end of the interview, """ - r"""please output a reply containing Goodbye to indicate the end """ - r"""of the conversation.""", + sys_prompt="""You are Carol, and now you need to interview Bob. """ + """Just ask him where he is from, which school he graduated from, """ + """his profession, and his hobbies. You'd better only ask one """ + """question at a time.""", model_config_name=YOUR_MODEL_CONFIGURATION_NAME, to_dist=args.use_dist, ) carol.join(r) # Start the chat - r.chatting(delay={carol.agent_id: 0, bob.agent_id: 5}) + r.chat_freely( + delay=10, + interval=10, + max_round=10, + ) if __name__ == "__main__": diff --git a/examples/environments/chatroom/envs/chatroom.py b/examples/environments/chatroom/envs/chatroom.py index 430fc15c3..7e1104199 100644 --- a/examples/environments/chatroom/envs/chatroom.py +++ b/examples/environments/chatroom/envs/chatroom.py @@ -21,6 +21,7 @@ event_func, ) from agentscope.models import ModelResponse +from agentscope.manager import ModelManager from agentscope.studio._client import _studio_client from agentscope.web.gradio.utils import user_input @@ -38,6 +39,22 @@ """ +def format_messages(msgs: Union[Msg, List[Msg]]) -> list[dict]: + """Format the messages""" + messages = [] + if isinstance(msgs, Msg): + msgs = [msgs] + for msg in msgs: + messages.append( + { + "role": msg.role, + "name": msg.name, + "content": str(msg.content), + }, + ) + return messages + + class ChatRoomMember(BasicEnv): """A member of chatroom.""" @@ -92,6 +109,7 @@ class ChatRoom(BasicEnv): def __init__( self, name: str = None, + model_config_name: str = None, announcement: Msg = None, participants: List[AgentBase] = None, all_history: bool = False, @@ -126,6 +144,12 @@ def __init__( ) self.history = [] self.announcement = announcement + self.member_introduction = {} + if model_config_name is not None: + model_manager = ModelManager.get_instance() + self.model = model_manager.get_model_by_config_name( + model_config_name, + ) @event_func def join(self, agent: AgentBase) -> bool: @@ -137,15 +161,17 @@ def join(self, agent: AgentBase) -> bool: agent=agent, history_idx=len(self.history), ) + self.member_introduction[agent.name] = agent.introduction self.add_listener("speak", Notifier()) return True @event_func def leave(self, agent: AgentBase) -> bool: """Remove the participant agent from the chatroom.""" - if agent.agent_id not in self.children: + if agent.name not in self.children: return False - del self.children[agent.agent_id] + del self.children[agent.name] + del self.member_introduction[agent.name] return True @event_func @@ -166,9 +192,33 @@ def get_history(self, agent_name: str) -> List[Msg]: history_idx = self.children[agent_name].history_idx return deepcopy(self.history[history_idx:]) + def get_history_length(self, agent_name: str) -> int: + """Get the length of the history of the agent.""" + if agent_name not in self.children: + return 0 + if self.all_history: + history_idx = 0 + else: + history_idx = self.children[agent_name].history_idx + return len(self.history) - history_idx + def describe(self, agent_name: str, **kwargs: Any) -> str: """Get the description of the chatroom.""" - ann = self.announcement if self.announcement else "EMPTY" + ann = self.announcement.content if self.announcement.content else "" + members_introduction = "\n\n".join( + [ + f"{name}: {introduction}" + for name, introduction in self.member_introduction.items() + ], + ) + ann += f"\n{members_introduction}\n\n" + ann += ( + """Please generate a suitable response in this work group based""" + """ on the following chat history. When you need to mention """ + """someone, you can use @ to remind them. You only need to """ + f"""output {agent_name}'s possible replies, without giving """ + """anyone else's replies or continuing the conversation.""" + ) history = "\n\n".join( [ f"{msg.name}: {msg.content}" @@ -238,7 +288,7 @@ def chatting_parse_func(self, response: ModelResponse) -> ModelResponse: pattern = re.compile(pattern_str, re.DOTALL) logger.debug(repr(pattern_str)) logger.debug(response.text) - texts = [s.strip() for s in pattern.split(response.text)] + texts = [s.strip() for s in pattern.split(response.text) if s.strip()] logger.debug(texts) return ModelResponse(text=texts[0]) @@ -247,10 +297,13 @@ def chat_freely( delay: float = 1, interval: float = 5, max_round: int = 10, + agent_name_list: List[str] = None, ) -> None: """Let all agents to chat freely without any preset order""" tasks = [] - for agent_name in self.children.keys(): + if agent_name_list is None: + agent_name_list = list(self.children.keys()) + for agent_name in agent_name_list: task = threading.Thread( target=self.children[agent_name].chat_freely, kwargs={ @@ -265,11 +318,12 @@ def chat_freely( task.join() def chat_in_sequence(self, agent_name_order: List[str] = None) -> None: - """Let all agents to chat in a sequence + """Let all agents chat in sequence Args: - sequence (`List[str]`): Order of speakers' names. + agent_name_order (`List[str]`): Order of speakers' names. """ + agent_name_order = agent_name_order or list(self.children.keys()) for agent_name in agent_name_order: self.children[agent_name].chat() @@ -285,6 +339,7 @@ def __init__( def __call__(self, room: Env, event: Event) -> None: names = self.pattern.findall(str(event.args["message"].content)) + names = list(set(names)) for name in names: if name in room.children: @@ -313,6 +368,35 @@ def __init__( # pylint: disable=W0613 sys_prompt=sys_prompt, model_config_name=model_config_name, ) + if self.sys_prompt: + prompt = format_messages( + [ + Msg( + name="user", + role="user", + content=( + f"Please generate a brief character introduction " + f"in one sentence, which based on the following " + f"prompt:\n" + f"Prompt: {sys_prompt}\n" + f"The generated description needs to follow the " + f"following format:\n" + f"[PERSONA BEGIN]\n" + f"Description: One sentence introduction\n" + f"[PERSONA END]" + ), + ), + ], + ) + raw_introduction = self.model(prompt).text + raw_introduction = raw_introduction.split("[PERSONA BEGIN]", 1)[1] + raw_introduction = raw_introduction.split("[PERSONA END]")[0] + self.introduction = raw_introduction.strip() + else: + self.introduction = "" + logger.info(f"introduction: {self.introduction}") + self.room_history_length = 0 + self.room_slient_count = 0 self.room = None self.mentioned_messages = [] self.mentioned_messages_lock = threading.Lock() @@ -325,6 +409,7 @@ def add_mentioned_message(self, msg: Msg) -> None: def join(self, room: ChatRoom) -> bool: """Join a room""" self.room = room + self.room_history_length = self.room.get_history_length(self.name) return room.join(self) def _is_mentioned(self) -> bool: @@ -342,28 +427,35 @@ def _generate_mentioned_prompt(self) -> Tuple[bool, str]: for msg in self.mentioned_messages ], ) + self.mentioned_messages = [] return True, hint return False, "" def _want_to_speak(self, hint: str) -> bool: """Check whether the agent want to speak currently""" - prompt = self.model.format( - Msg(name="system", role="system", content=hint), - Msg( - name="user", - role="user", - content="Based on the CHATROOM." - " Do you want to speak in the chatroom now?\n" - "Speak yes or no.", - ), + hint = ( + f"{self.sys_prompt}\n\nYou are participating in a chatroom.\n" + + hint + ) + prompt = format_messages( + [ + Msg(name="system", role="system", content=hint), + Msg( + name="user", + role="user", + content="Based on the CHATROOM." + " Do you want to or need to speak in the chatroom now?\n" + "Return yes or no.", + ), + ], ) + logger.debug(prompt) response = self.model( prompt, max_retries=3, ).text - speak = "yes" in response.lower() - logger.debug(f"[SPEAK OR NOT] {self.name}: {response}") - return speak + logger.info(f"[SPEAK OR NOT] {self.name}: {response}") + return "yes" in response.lower() def speak( self, @@ -381,44 +473,45 @@ def speak( def reply(self, x: Msg = None) -> Msg: """Generate reply to chat room""" + room_history_length = self.room.get_history_length(self.name) + if room_history_length != self.room_history_length: + self.room_history_length = room_history_length + self.room_slient_count = 0 + else: + self.room_slient_count += 1 room_info = self.room.describe(self.name) - system_hint = ( - f"{self.sys_prompt}\n\nYou are participating in a chatroom.\n" - f"\n{room_info}" - ) + reply_hint = "" mentioned, mentioned_hint = self._generate_mentioned_prompt() if mentioned: - # if mentioned, response directly - prompt = self.model.format( - Msg( - name="system", - role="system", - content=system_hint, - ), - Msg( - name="user", - role="user", - content=mentioned_hint, - ), - ) + reply_hint = f"{mentioned_hint}\n{self.name}:" else: # decide whether to speak - if self._want_to_speak(room_info): - prompt = self.model.format( - Msg( - name="system", - role="system", - content=system_hint, - ), - Msg( - name="user", - role="user", - content="Please generate a response based on the " - "CHATROOM.", - ), + if self.room_history_length <= 3 or ( + self.room_slient_count <= 2 and self._want_to_speak(room_info) + ): + reply_hint = ( + f"Please generate a response based on the" + f" CHATROOM. You need only generate response without " + f"reasoning.\n{self.name}:" ) else: return Msg(name="assistant", role="assistant", content="") + user_hint = ( + # f"{self.sys_prompt}\n\n" + f"You are participating in a chatroom.\n" + f"\n{room_info}\n{reply_hint}" + ) + prompt = format_messages( + [ + Msg( + name="system", + role="system", + content=self.sys_prompt, + ), + Msg(name="user", role="user", content=user_hint), + ], + ) + prompt[-1]["content"] = prompt[-1]["content"].strip() logger.debug(prompt) response = self.model( prompt, @@ -428,6 +521,7 @@ def reply(self, x: Msg = None) -> Msg: msg = Msg(name=self.name, content=response, role="assistant") if response: self.speak(msg) + self.room_history_length = self.room.get_history_length(self.name) return msg @@ -441,6 +535,7 @@ def __init__( ) -> None: super().__init__(**kwargs) self.timeout = timeout + self.room_history_length = 0 def reply(self, x: Msg = None) -> Msg: if _studio_client.active: @@ -471,14 +566,34 @@ def reply(self, x: Msg = None) -> Msg: if content is not None: # user input response = content else: # assistant reply - msg_hint = self._generate_mentioned_prompt() - self_msg = Msg(name=self.name, content="", role="assistant") - - history = self.room.get_history(self.agent_id) - prompt = self.model.format( - msg_hint, - history, - self_msg, + room_history_length = self.room.get_history_length(self.name) + if room_history_length == self.room_history_length: + return Msg(name="assistant", role="assistant", content="") + self.room_history_length = room_history_length + room_info = self.room.describe(self.name) + reply_hint = "" + mentioned, mentioned_hint = self._generate_mentioned_prompt() + if mentioned: + reply_hint = f"{mentioned_hint}\n{self.name}:" + else: + reply_hint = ( + f"Please generate a response based on the CHATROOM." + f"\n{self.name}:" + ) + system_hint = ( + f"You are participating in a chatroom.\n" + f"\n{room_info}\n{reply_hint}" + ) + + prompt = format_messages( + [ + Msg( + name=self.name, + content=self.sys_prompt, + role="system", + ), + Msg(name="user", content=system_hint, role="user"), + ], ) logger.debug(prompt) response = self.model( @@ -490,4 +605,5 @@ def reply(self, x: Msg = None) -> Msg: response = "[auto reply] " + response msg = Msg(name=self.name, content=response, role="user") self.speak(msg) + self.room_history_length = self.room.get_history_length(self.name) return msg diff --git a/examples/paper_large_scale_simulation/tools/edu.json b/examples/paper_large_scale_simulation/tools/edu.json new file mode 100644 index 000000000..d577532f0 --- /dev/null +++ b/examples/paper_large_scale_simulation/tools/edu.json @@ -0,0 +1,43 @@ +{ + "total_num": 1000, + "distributions": [ + { + "distribution_name": "Gender", + "categories": [ + { + "category_name": "Male", + "percentage": 0.5 + }, + { + "category_name": "Female", + "percentage": 0.5 + } + ] + }, + { + "distribution_name": "Education Level", + "categories": [ + { + "category_name": "Elementary School", + "percentage": 0.2 + }, + { + "category_name": "High School", + "percentage": 0.2 + }, + { + "category_name": "Bachelor's Degree", + "percentage": 0.2 + }, + { + "category_name": "Master's Degree", + "percentage": 0.2 + }, + { + "category_name": "Ph.D.", + "percentage": 0.2 + } + ] + } + ] +} \ No newline at end of file diff --git a/examples/paper_large_scale_simulation/tools/job.json b/examples/paper_large_scale_simulation/tools/job.json new file mode 100644 index 000000000..dddb988b2 --- /dev/null +++ b/examples/paper_large_scale_simulation/tools/job.json @@ -0,0 +1,47 @@ +{ + "total_num": 600, + "distributions": [ + { + "distribution_name": "Gender", + "categories": [ + { + "category_name": "Male", + "percentage": 0.5 + }, + { + "category_name": "Female", + "percentage": 0.5 + } + ] + }, + { + "distribution_name": "Profession", + "categories": [ + { + "category_name": "Professor of Game Theory", + "percentage": 0.2 + }, + { + "category_name": "Economists", + "percentage": 0.2 + }, + { + "category_name": "Psychologists", + "percentage": 0.2 + }, + { + "category_name": "Athletes", + "percentage": 0.2 + }, + { + "category_name": "Artists", + "percentage": 0.2 + }, + { + "category_name": "Writers", + "percentage": 0.2 + } + ] + } + ] +} \ No newline at end of file diff --git a/examples/paper_large_scale_simulation/tools/persona_generator.py b/examples/paper_large_scale_simulation/tools/persona_generator.py new file mode 100644 index 000000000..9adb044e5 --- /dev/null +++ b/examples/paper_large_scale_simulation/tools/persona_generator.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +"""Generate Persona with LLM""" +import os +import json +import argparse +from typing import Any +from tqdm import tqdm + +from loguru import logger +import numpy as np +import agentscope +from agentscope.agents import AgentBase +from agentscope.message import Msg +from agentscope.server import RpcAgentServerLauncher +from agentscope.rpc.retry_strategy import RetryFixedTimes + +MODEL_CONFIG_NAME = "my_model" +MODEL_CONFIG = { + "model_type": "dashscope_chat", + "config_name": MODEL_CONFIG_NAME, + "model_name": "qwen-max", + "api_key": os.environ.get("DASH_API_KEY", ""), +} + +BEGIN_TAG = "[PERSONA BEGIN]" +END_TAG = "[PERSONA END]" + +SYS_PROMPT_ZH = """你是一个角色人格描述生成小助手,你需要基于用户提供的 JSON 格式的提示信息,将其扩展为完整的角色人格描述。生成的描述需要遵循如下格式: + +``` + [PERSONA BEGIN] + - 姓名:必填 + - 性别:男/女/不愿透露 + - 年龄:xx 岁/不愿透露 + - 人格描述:一段话简述该角色的人格 + [PERSONA END] +``` +""" # noqa + +SYS_PROMPT_EN = """ +You are a role personality description assistant, you need to generate a complete role personality description based on the provided JSON. The generated description should follow the following format: + +``` + [PERSONA BEGIN] + - Name: Required + - Gender: Male/Female/I don't want to disclose + - Age: xx years old/I don't want to disclose + - Personality Description: A brief description of the role's personality + [PERSONA END] +``` +""" # noqa + +USER_PROMPT_ZH = "请基于如下 JSON 生成角色的人格描述:\n" +USER_PROMPT_EN = ( + "Please generate a role persona based on the following JSON:\n" +) + + +class PersonaGenerator(AgentBase): + """An agent that can generate persona""" + + def __init__( + self, + name: str, + model_config_name: str = None, + language: str = "en", + ): + super().__init__( + name, + sys_prompt=None, + model_config_name=model_config_name, + use_memory=False, + ) + self.sys_prompt = Msg( + name="system", + role="system", + content=SYS_PROMPT_EN if language == "en" else SYS_PROMPT_ZH, + ) + self.user_prompt = ( + USER_PROMPT_EN if language == "en" else USER_PROMPT_ZH + ) + + def _extract_persona(self, content: str) -> str: + if BEGIN_TAG in content and END_TAG in content: + return content[ + content.find(BEGIN_TAG) + + len(BEGIN_TAG) : content.find(END_TAG) + ] + else: + raise ValueError("Invalid persona format") + + def reply(self, x: Msg) -> Msg: # pylint: disable=W0222 + desc = x.content + assert isinstance(desc, dict), "Persona description should be a dict" + prompt = self.model.format( + self.sys_prompt, + Msg( + name="user", + role="user", + content=self.user_prompt + + json.dumps(desc, indent=2, ensure_ascii=False), + ), + ) + response = self.model(prompt) + persona = RetryFixedTimes(max_retries=5, delay=2).retry( + self._extract_persona, + response.text, + ) + logger.debug(persona) + return Msg(name=self.name, role="assistant", content=persona) + + +def generate_samples(config_path: str) -> list: + """Generate samples based on the given config""" + with open(config_path, "r", encoding="utf-8") as f: + config = json.load(f) + total_num = config["total_num"] + samples = [{} for _ in range(total_num)] + for distribution in config["distributions"]: + distribution_name = distribution["name"] + categories = distribution["categories"] + + # Extract category names and percentages + category_names = [category["category_name"] for category in categories] + percentages = [category["percentage"] for category in categories] + attributes = { + category["category_name"]: category.get( + "attributes", + {distribution_name: category["category_name"]}, + ) + for category in categories + } + + # Convert percentages to actual numbers of samples + num_samples_per_category = (np.array(percentages) * total_num).astype( + int, + ) + + # Adjust any rounding errors to ensure total_num samples + while num_samples_per_category.sum() < total_num: + diff = total_num - num_samples_per_category.sum() + for i in range(diff): + # Add one to the first category that needs more samples + num_samples_per_category[ + i % len(num_samples_per_category) + ] += 1 + while num_samples_per_category.sum() > total_num: + diff = num_samples_per_category.sum() - total_num + for i in range(diff): + # Subtract one from the first category that has more samples + num_samples_per_category[ + i % len(num_samples_per_category) + ] -= 1 + + # Create samples for current distribution + category_samples = [] + for category, count in zip(category_names, num_samples_per_category): + category_samples.extend([category] * count) + + # Shuffle to avoid ordering biases + np.random.shuffle(category_samples) + + # Assign the generated samples to the overall sample list + for i in range(total_num): + samples[i].update(attributes[category_samples[i]]) + + return samples + + +def main( + config_path: str, + save_path: str, + worker_num: int = 5, + language: str = "en", +) -> None: + """The main function to generate persona""" + agentscope.init( + project="simulation", + name="persona_generation", + model_configs=MODEL_CONFIG, + ) + launcher = RpcAgentServerLauncher(custom_agent_classes=[PersonaGenerator]) + launcher.launch() + workers = [ + PersonaGenerator( + name="Generator", + model_config_name=MODEL_CONFIG_NAME, + language=language, + ).to_dist(host=launcher.host, port=launcher.port) + for _ in range(worker_num) + ] + samples = generate_samples(config_path) + print(samples) + results = [] + for i, sample in enumerate(samples): + results.append( + workers[i % worker_num]( + Msg( + name="system", + role="system", + content=sample, + ), + ), + ) + with open(save_path, "w", encoding="utf-8") as f: + for result in tqdm(results): + f.write( + json.dumps({"prompt": result.content}, ensure_ascii=False) + + "\n", + ) + launcher.shutdown() + + +def parse_args() -> Any: + """Parse args""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--config-path", + "-c", + type=str, + help="path of the config file", + ) + parser.add_argument( + "--save-path", + "-o", + type=str, + help="path of the output file", + ) + parser.add_argument( + "--worker-num", + "-w", + type=int, + default=5, + help="number of workers", + ) + parser.add_argument( + "--language", + choices=["en", "zh"], + default="en", + help="language of the config file", + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + main(args.config_path, args.save_path, args.worker_num, args.language) diff --git a/setup.py b/setup.py index 1177bf54a..196f6669a 100644 --- a/setup.py +++ b/setup.py @@ -128,7 +128,13 @@ class CMakeExtension(Extension): """An extension over CMake.""" def __init__(self, name: str, sourcedir: str = "."): - Extension.__init__(self, name, sources=[], language="c++") + Extension.__init__( + self, + name, + sources=[], + language="c++", + optional=True, + ) self.sourcedir = os.path.abspath(sourcedir) @@ -157,34 +163,34 @@ def run(self) -> None: def build_extension(self, ext: Extension) -> None: """Build CPP server.""" - try: - extdir = os.path.abspath( - os.path.dirname(self.get_ext_fullpath(ext.name)), - ) - cmake_args = [ - "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, - "-DPython3_EXECUTABLE=" + sys.executable, - "-Dpybind11_DIR=" - + os.path.join( - site.getsitepackages()[0], - "pybind11", - "share", - "cmake", - "pybind11", - ), - ] + extdir = os.path.abspath( + os.path.dirname(self.get_ext_fullpath(ext.name)), + ) + cmake_args = [ + "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, + "-DPython3_EXECUTABLE=" + sys.executable, + "-Dpybind11_DIR=" + + os.path.join( + site.getsitepackages()[0], + "pybind11", + "share", + "cmake", + "pybind11", + ), + ] - cfg = "Release" - build_args = ["--config", cfg] - if self.debug: - cmake_args += ["-DCMAKE_CXX_FLAGS=-g -DDEBUG"] - else: - cmake_args += ["-DCMAKE_CXX_FLAGS=-O3"] + cfg = "Release" + build_args = ["--config", cfg] + if self.debug: + cmake_args += ["-DCMAKE_CXX_FLAGS=-g -DDEBUG"] + else: + cmake_args += ["-DCMAKE_CXX_FLAGS=-O3"] - cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg] + cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg] - if not os.path.exists(self.build_temp): - os.makedirs(self.build_temp) + if not os.path.exists(self.build_temp): + os.makedirs(self.build_temp) + try: subprocess.check_call( ["cmake", "-B", "build", ext.sourcedir] + cmake_args, cwd=self.build_temp, @@ -198,7 +204,7 @@ def build_extension(self, ext: Extension) -> None: cwd=self.build_temp, ) except Exception as e: - print(f"CPP server build failed with {e}.") + print(f"CPP server build failed with {e}.", file=sys.stderr) setuptools.setup( diff --git a/src/agentscope/agents/agent.py b/src/agentscope/agents/agent.py index c4e02fa4b..85d3fba5f 100644 --- a/src/agentscope/agents/agent.py +++ b/src/agentscope/agents/agent.py @@ -7,7 +7,6 @@ from typing import Sequence from typing import Union from typing import Any -from typing import Type import json import uuid from loguru import logger @@ -108,39 +107,6 @@ def generate_agent_id(cls) -> str: # TODO: change cls.__name__ into a global unique agent_type return uuid.uuid4().hex - # todo: add a unique agent_type field to distinguish different agent class - @classmethod - def get_agent_class(cls, agent_class_name: str) -> Type[AgentBase]: - """Get the agent class based on the specific agent class name. - - Args: - agent_class_name (`str`): the name of the agent class. - - Raises: - ValueError: Agent class name not exits. - - Returns: - Type[AgentBase]: the AgentBase subclass. - """ - if agent_class_name not in cls._registry: - raise ValueError(f"Agent class <{agent_class_name}> not found.") - return cls._registry[agent_class_name] # type: ignore[return-value] - - @classmethod - def register_agent_class(cls, agent_class: Type[AgentBase]) -> None: - """Register the agent class into the registry. - - Args: - agent_class (Type[AgentBase]): the agent class to be registered. - """ - agent_class_name = agent_class.__name__ - if agent_class_name in cls._registry: - logger.info( - f"Agent class with name [{agent_class_name}] already exists.", - ) - else: - cls._registry[agent_class_name] = agent_class - @async_func def reply(self, x: Optional[Union[Msg, Sequence[Msg]]] = None) -> Msg: """Define the actions taken by this agent. diff --git a/src/agentscope/constants.py b/src/agentscope/constants.py index e5421d68f..7fb31338f 100644 --- a/src/agentscope/constants.py +++ b/src/agentscope/constants.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- """ Some constants used in the project""" +import os from numbers import Number from enum import IntEnum @@ -19,7 +20,16 @@ _DEFAULT_SUBDIR_CODE = "code" _DEFAULT_SUBDIR_FILE = "file" _DEFAULT_SUBDIR_INVOKE = "invoke" -_DEFAULT_CACHE_DIR = str(Path.home() / ".cache" / "agentscope") +_DEFAULT_CACHE_DIR = str( + Path( + os.environ.get( + "AS_HOME_PATH", + str(Path.home()), + ), + ) + / ".cache" + / "agentscope", +) _DEFAULT_CFG_NAME = ".config" _DEFAULT_IMAGE_NAME = "image_{}_{}.png" _DEFAULT_SQLITE_DB_NAME = "agentscope.db" diff --git a/src/agentscope/cpp_server/BUILD b/src/agentscope/cpp_server/BUILD new file mode 100644 index 000000000..56677fd4c --- /dev/null +++ b/src/agentscope/cpp_server/BUILD @@ -0,0 +1,19 @@ +load("@pybind11_bazel//:build_defs.bzl", "pybind_library") +# load("@pybind11//:workspace.bzl", "pybind11_library") + +# pybind11_library( +pybind_library( + name = "cpp_server", + srcs = [ + "worker.cc", + "rpc_agent_servicer.cc", + ], + hdrs = ["worker.h"], + deps = [ + "//src/agentscope/rpc:worker_cc_proto", + "//src/agentscope/rpc:server_grpc", + # "@pybind11//:pybind11", + # "@rules_python//python/cc:current_py_cc_libs", + "@com_github_grpc_grpc//:grpc++" + ], +) \ No newline at end of file diff --git a/src/agentscope/cpp_server/worker.cc b/src/agentscope/cpp_server/worker.cc index 136e05eba..a25ad4fb4 100644 --- a/src/agentscope/cpp_server/worker.cc +++ b/src/agentscope/cpp_server/worker.cc @@ -757,6 +757,7 @@ string Worker::call_get_agent_list() { LOG(FORMAT(result_list.size())); py::gil_scoped_acquire acquire; string final_result = _serialize(result_list).cast(); + PY_LOG("info", "call_get_agent_list = [" + final_result + "]"); return final_result; } diff --git a/src/agentscope/environment/env.py b/src/agentscope/environment/env.py index e8b6c6af5..5b3a5ca68 100644 --- a/src/agentscope/environment/env.py +++ b/src/agentscope/environment/env.py @@ -5,6 +5,7 @@ from typing import Any, List, Callable from concurrent.futures import ThreadPoolExecutor import inspect +from loguru import logger from ..exception import ( EnvNotFoundError, EnvAlreadyExistError, @@ -270,6 +271,8 @@ def set_parent(self, parent: Env) -> None: Args: parent (`Env`): The parent env. """ + if self.parent is not None: + self.parent.remove_child(self.name) self.parent = parent def get_children(self) -> dict[str, Env]: @@ -326,6 +329,13 @@ def add_listener(self, target_event: str, listener: EventListener) -> bool: if listener.name not in self.event_listeners[target_event]: self.event_listeners[target_event][listener.name] = listener return True + else: + logger.warning( + f"Listener {listener.name} already " + f"exists in {target_event}", + ) + else: + logger.warning(f"Event {target_event} does not exist") return False def remove_listener(self, target_event: str, listener_name: str) -> bool: @@ -342,6 +352,13 @@ def remove_listener(self, target_event: str, listener_name: str) -> bool: if listener_name in self.event_listeners[target_event]: del self.event_listeners[target_event][listener_name] return True + else: + logger.warning( + f"Listener {listener_name} does not" + f" exist in {target_event}", + ) + else: + logger.warning(f"Event {target_event} does not exist") return False def get_listeners(self, target_event: str) -> List[EventListener]: @@ -376,5 +393,6 @@ def __setitem__(self, env_name: str, env: Env) -> None: if env_name not in self.children: self.children[env_name] = env env.set_parent(self) + logger.debug(f"Set Env[{env_name}] as child of Env[{self.name}]") else: raise EnvAlreadyExistError(env_name) diff --git a/src/agentscope/models/response.py b/src/agentscope/models/response.py index b034a4197..661ccaeac 100644 --- a/src/agentscope/models/response.py +++ b/src/agentscope/models/response.py @@ -53,7 +53,7 @@ def text(self) -> str: if self._text is None: if self.stream is not None: for _, chunk in self.stream: - self._text += chunk + self._text = chunk return self._text @text.setter diff --git a/src/agentscope/rag/llama_index_knowledge.py b/src/agentscope/rag/llama_index_knowledge.py index 142f71068..b886825ff 100644 --- a/src/agentscope/rag/llama_index_knowledge.py +++ b/src/agentscope/rag/llama_index_knowledge.py @@ -203,8 +203,9 @@ def __init__( ) if persist_root is None: - persist_root = FileManager.get_instance().run_dir or "./" + persist_root = FileManager.get_instance().cache_dir or "./" self.persist_dir = os.path.join(persist_root, knowledge_id) + logger.info(f"** persist_dir: {self.persist_dir}") self.emb_model = emb_model self.overwrite_index = overwrite_index self.showprogress = showprogress diff --git a/src/agentscope/rpc/BUILD b/src/agentscope/rpc/BUILD new file mode 100644 index 000000000..8081e148d --- /dev/null +++ b/src/agentscope/rpc/BUILD @@ -0,0 +1,40 @@ +# load("@io_bazel_rules_protobuf//:protobuf.bzl", "proto_library", "cc_proto_library") +# load("@com_github_grpc_grpc//bazel:grpc_build_system.bzl", "grpc_cc_library") +# load("@com_github_grpc_grpc//:build_defs.bzl", "grpc_cc_library") +load("@com_google_protobuf//:protobuf.bzl", "proto_library") +load("@com_github_grpc_grpc//bazel:grpc_build_system.bzl", "grpc_cc_library") + +# 生成 protobuf 文件 +proto_library( + name = "server_proto", + srcs = ["rpc_agent.proto"], +) + +proto_library( + name = "worker_proto", + srcs = ["worker_args.proto"], +) +# proto_library( +# name = "server_proto", +# srcs = ["rpc_agent.proto", "worker_args.proto"], +# ) + +cc_proto_library( + name = "server_cc_proto", + deps = [":server_proto"] +) + +cc_proto_library( + name = "worker_cc_proto", + deps = [":worker_proto"] +) + +# 生成 gRPC C++ 文件 +grpc_cc_library( + name = "server_grpc", + srcs = [":server_proto"], + deps = [ + "@com_github_grpc_grpc//:grpc", + "@com_google_protobuf//:protobuf", + ], +) \ No newline at end of file diff --git a/src/agentscope/rpc/retry_strategy.py b/src/agentscope/rpc/retry_strategy.py index 230b57917..2646e0fd3 100644 --- a/src/agentscope/rpc/retry_strategy.py +++ b/src/agentscope/rpc/retry_strategy.py @@ -5,6 +5,7 @@ from __future__ import annotations import time import random +import inspect from abc import ABC, abstractmethod from typing import Callable, Any from functools import partial @@ -71,18 +72,26 @@ def retry( # pylint: disable=R1710 *args: Any, **kwargs: Any, ) -> Any: + exception_type = kwargs.pop("expect_exception_type", Exception) func = partial(func, *args, **kwargs) for attempt in range(self.max_retries + 1): try: return func() - except Exception as e: + except exception_type as e: if attempt == self.max_retries: raise TimeoutError("Max timeout exceeded.") from e random_delay = (random.random() + 0.5) * self.delay + frame_info = inspect.getframeinfo( + inspect.currentframe().f_back, # type: ignore[arg-type] + ) + file_name = frame_info.filename + line_number = frame_info.lineno logger.info( - f"Attempt {attempt + 1} failed: {e}. Retrying in {random_delay} seconds...", + f"Attempt {attempt + 1} at [{file_name}:{line_number}] failed:" + f"\n{e}.\nRetrying in {random_delay:.2f} seconds...", ) time.sleep(random_delay) + raise TimeoutError("Max retry exceeded.") class RetryExpential(RetryBase): @@ -130,23 +139,31 @@ def retry( # pylint: disable=R1710 *args: Any, **kwargs: Any, ) -> Any: + exception_type = kwargs.pop("expect_exception_type", Exception) func = partial(func, *args, **kwargs) delay = self.base_delay for attempt in range(self.max_retries + 1): try: return func() - except Exception as e: + except exception_type as e: if attempt == self.max_retries: raise TimeoutError("Max timeout exceeded.") from e random_delay = min( (random.random() + 0.5) * delay, self.max_delay, ) + frame_info = inspect.getframeinfo( + inspect.currentframe().f_back, # type: ignore[arg-type] + ) + file_name = frame_info.filename + line_number = frame_info.lineno logger.info( - f"Attempt {attempt + 1} failed: {e}. Retrying in {random_delay} seconds...", + f"Attempt {attempt + 1} at [{file_name}:{line_number}] failed:" + f"\n{e}.\nRetrying in {random_delay:.2f} seconds...", ) time.sleep(random_delay) delay *= 2 + raise TimeoutError("Max retry exceeded.") _DEAFULT_RETRY_STRATEGY = RetryFixedTimes(max_retries=10, delay=5) diff --git a/src/agentscope/rpc/rpc_meta.py b/src/agentscope/rpc/rpc_meta.py index b03adc11c..6f0668bca 100644 --- a/src/agentscope/rpc/rpc_meta.py +++ b/src/agentscope/rpc/rpc_meta.py @@ -4,7 +4,6 @@ from typing import Any, Callable import uuid import copy -import os from loguru import logger from .rpc_object import RpcObject, _ClassInfo @@ -55,7 +54,16 @@ def generate_oid() -> str: class RpcMeta(ABCMeta): - """The metaclass for all classes that can run on rpc server.""" + """The metaclass for all classes that can use `to_dist` and other + distributed related functionality. + + Note: + + The `RpcMeta` will automatically add the `to_dist` method and + initialization parameter to its subclasses. And it will also + detect all public functions and register them into the `_info` + attribute of the class. + """ _REGISTRY = {} _SERVER_CONFIG = {} diff --git a/src/agentscope/rpc/rpc_object.py b/src/agentscope/rpc/rpc_object.py index 10def1aa9..7d4ba5a50 100644 --- a/src/agentscope/rpc/rpc_object.py +++ b/src/agentscope/rpc/rpc_object.py @@ -74,11 +74,20 @@ def detect(self, attrs: dict) -> None: class RpcObject(ABC): - """A proxy object which represent an object located in a rpc server.""" + """A proxy object which represent an object located in an Rpc server. + + Note: + + When `to_dist` is called on an object or when using the `to_dist` + parameter to create an object, the object is moved to an Rpc server, + and an `RpcObject` instance is left behind in the local process. + The `RpcObject` will automatically forward all public method invocations + to the original object in the rpc server. + """ def __init__( # pylint: disable=R0912 self, - cls: Union[type, _ClassInfo], + cls: type, oid: str, host: str, port: int, @@ -117,10 +126,7 @@ def __init__( # pylint: disable=R0912 self.host = host self.port = port self._oid = oid - if isinstance(cls, _ClassInfo): - self._cls = cls - else: - self._cls = cls._info + self._cls = cls self.connect_existing = connect_existing self.executor = ThreadPoolExecutor(max_workers=1) if isinstance(retry_strategy, RetryBase): @@ -149,10 +155,6 @@ def __init__( # pylint: disable=R0912 studio_url = None if _studio_client.active: studio_url = _studio_client.studio_url - assert isinstance( - cls, - type, - ), "RpcAgentServer need a class as input" self.server_launcher = RpcAgentServerLauncher( host=self.host, port=self.port, @@ -184,7 +186,7 @@ def create(self, configs: dict) -> None: def __call__(self, *args: Any, **kwargs: Any) -> Any: self._check_created() - if "__call__" in self._cls.async_func: + if "__call__" in self._cls._info.async_func: return self._async_func("__call__")(*args, **kwargs) else: return self._call_func( @@ -261,11 +263,11 @@ def sync_wrapper(*args, **kwargs) -> Any: # type: ignore[no-untyped-def] def __getattr__(self, name: str) -> Callable: self._check_created() - if name in self._cls.async_func: + if name in self._cls._info.async_func: # for async functions return self._async_func(name) - elif name in self._cls.sync_func: + elif name in self._cls._info.sync_func: # for sync functions return self._sync_func(name) diff --git a/src/agentscope/server/launcher.py b/src/agentscope/server/launcher.py index a755667ed..12f6038fe 100644 --- a/src/agentscope/server/launcher.py +++ b/src/agentscope/server/launcher.py @@ -235,11 +235,11 @@ async def _setup_agent_server_async( # pylint: disable=R0912,R0915 # update agent registry for cls in custom_classes: RpcMeta.register_class(cls) - RpcMeta._SERVER_CONFIG = { + RpcMeta._SERVER_CONFIG = { # pylint: disable=W0212 "host": host, "port": port, } - RpcMeta._AUTO_DIST = auto_dist + RpcMeta._AUTO_DIST = auto_dist # pylint: disable=W0212 async def shutdown_signal_handler() -> None: logger.info( @@ -301,11 +301,11 @@ async def shutdown_signal_handler() -> None: start_event.set() if stop_event is not None: stop_event.wait() + shutdown_cpp_server() logger.info( f"CPP agent server [{server_id}] at {host}:{port} " "stopped successfully", ) - shutdown_cpp_server() return servicer = AgentServerServicer( @@ -603,7 +603,7 @@ def shutdown(self) -> None: if self.stop_event is not None: self.stop_event.set() self.stop_event = None - self.server.join() + self.server.join(10) if self.server.is_alive(): self.server.kill() logger.info( diff --git a/src/agentscope/studio/_app.py b/src/agentscope/studio/_app.py index e7f9bad73..c474b805e 100644 --- a/src/agentscope/studio/_app.py +++ b/src/agentscope/studio/_app.py @@ -30,6 +30,7 @@ from flask_socketio import SocketIO, join_room, leave_room from ..constants import ( + _DEFAULT_CACHE_DIR, _DEFAULT_SUBDIR_CODE, _DEFAULT_SUBDIR_INVOKE, FILE_SIZE_LIMIT, @@ -47,7 +48,7 @@ _app = Flask(__name__) # Set the cache directory -_cache_dir = Path.home() / ".cache" / "agentscope-studio" +_cache_dir = Path(_DEFAULT_CACHE_DIR) / "studio" _cache_db = _cache_dir / "agentscope.db" os.makedirs(str(_cache_dir), exist_ok=True) diff --git a/tests/environment_test.py b/tests/environment_test.py index a3fa2d982..636cc5ca5 100644 --- a/tests/environment_test.py +++ b/tests/environment_test.py @@ -73,6 +73,7 @@ def __init__( # pylint: disable=W0613 ) -> None: super().__init__(name=name) self.room = None + self.introduction = "" self.event_list = [] def join(self, room: ChatRoom) -> bool: diff --git a/tests/rpc_agent_test.py b/tests/rpc_agent_test.py index 5b7fbff07..fdd541332 100644 --- a/tests/rpc_agent_test.py +++ b/tests/rpc_agent_test.py @@ -931,8 +931,11 @@ def test_agent_server_management_funcs(self) -> None: }, ) self.assertIsNotNone(dia_agent) + logger.info("test_agent_server_management_funcs 1.") self.assertTrue(client.delete_all_agent()) + logger.info("test_agent_server_management_funcs 2.") self.assertEqual(len(client.get_agent_list()), 0) + logger.info("test_agent_server_management_funcs passed.") # client.stop() # time.sleep(1) # self.assertFalse(client.is_alive())