Skip to content

Commit

Permalink
feat: merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
莘权 马 committed Aug 6, 2024
2 parents 9086ccc + 22e1009 commit 8a92fa0
Show file tree
Hide file tree
Showing 404 changed files with 20,078 additions and 1,165 deletions.
7 changes: 7 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[run]
source =
./metagpt/
omit =
*/metagpt/environment/android/*
*/metagpt/ext/android_assistant/*
*/metagpt/ext/werewolf/*
4 changes: 2 additions & 2 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
{
"name": "Python 3",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/python:0-3.11",
"image": "metagpt/metagpt:latest",

// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
Expand All @@ -18,7 +18,7 @@
]
}
},

// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "./.devcontainer/postCreateCommand.sh"

Expand Down
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
*.ico binary
*.jpeg binary
*.mp3 binary
*.mp4 binary
*.zip binary
*.bin binary

Expand Down
1 change: 1 addition & 0 deletions .github/ISSUE_TEMPLATE/show_me_the_bug.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ assignees: ''
- LLM type and model name:
- System version:
- Python version:
- MetaGPT version or branch:

<!-- Dependent packagess:the packages version cause the bug(like `pydantic 1.10.8`), installation method(like `pip install metagpt` or `pip install from source` or `run in docker`) -->

Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/build-package.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
name: Build and upload python package

on:
workflow_dispatch:
release:
types: [created]
types: [created, published]

jobs:
deploy:
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/fulltest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,10 @@ jobs:
cache: 'pip'
- name: Install dependencies
run: |
sh tests/scripts/run_install_deps.sh
python -m pip install --upgrade pip
pip install -e .[test]
npm install -g @mermaid-js/mermaid-cli
playwright install --with-deps
- name: Run reverse proxy script for ssh service
if: contains(github.ref, '-debugger')
continue-on-error: true
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/pre-commit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ on:
jobs:
pre-commit-check:
runs-on: ubuntu-latest
environment: pre-commit
steps:
- name: Checkout Source Code
uses: actions/checkout@v2
Expand Down
45 changes: 41 additions & 4 deletions .github/workflows/unittest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,57 @@ jobs:
cache: 'pip'
- name: Install dependencies
run: |
sh tests/scripts/run_install_deps.sh
python -m pip install --upgrade pip
pip install -e .[test]
npm install -g @mermaid-js/mermaid-cli
playwright install --with-deps
- name: Test with pytest
run: |
export ALLOW_OPENAI_API_CALL=0
mkdir -p ~/.metagpt && cp tests/config2.yaml ~/.metagpt/config2.yaml
pytest tests/ --doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov --durations=20 | tee unittest.txt
pytest --continue-on-collection-errors tests/ \
--ignore=tests/metagpt/environment/android_env \
--ignore=tests/metagpt/ext/android_assistant \
--ignore=tests/metagpt/ext/stanford_town \
--ignore=tests/metagpt/provider/test_bedrock_api.py \
--ignore=tests/metagpt/rag/factories/test_embedding.py \
--ignore=tests/metagpt/ext/werewolf/actions/test_experience_operation.py \
--ignore=tests/metagpt/provider/test_openai.py \
--ignore=tests/metagpt/planner/test_action_planner.py \
--ignore=tests/metagpt/planner/test_basic_planner.py \
--ignore=tests/metagpt/actions/test_project_management.py \
--ignore=tests/metagpt/actions/test_write_code.py \
--ignore=tests/metagpt/actions/test_write_code_review.py \
--ignore=tests/metagpt/actions/test_write_prd.py \
--ignore=tests/metagpt/environment/werewolf_env/test_werewolf_ext_env.py \
--ignore=tests/metagpt/memory/test_brain_memory.py \
--ignore=tests/metagpt/roles/test_assistant.py \
--ignore=tests/metagpt/roles/test_engineer.py \
--ignore=tests/metagpt/serialize_deserialize/test_write_code_review.py \
--ignore=tests/metagpt/test_environment.py \
--ignore=tests/metagpt/test_llm.py \
--ignore=tests/metagpt/tools/test_metagpt_oas3_api_svc.py \
--ignore=tests/metagpt/tools/test_moderation.py \
--ignore=tests/metagpt/tools/test_search_engine.py \
--ignore=tests/metagpt/tools/test_tool_convert.py \
--ignore=tests/metagpt/tools/test_web_browser_engine_playwright.py \
--ignore=tests/metagpt/utils/test_mermaid.py \
--ignore=tests/metagpt/utils/test_redis.py \
--ignore=tests/metagpt/utils/test_tree.py \
--ignore=tests/metagpt/serialize_deserialize/test_sk_agent.py \
--ignore=tests/metagpt/utils/test_text.py \
--ignore=tests/metagpt/actions/di/test_write_analysis_code.py \
--ignore=tests/metagpt/provider/test_ark.py \
--doctest-modules --cov=./metagpt/ --cov-report=xml:cov.xml --cov-report=html:htmlcov \
--durations=20 | tee unittest.txt
- name: Show coverage report
run: |
coverage report -m
- name: Show failed tests and overall summary
run: |
grep -E "FAILED tests|ERROR tests|[0-9]+ passed," unittest.txt
failed_count=$(grep -E "FAILED|ERROR" unittest.txt | wc -l)
if [[ "$failed_count" -gt 0 ]]; then
failed_count=$(grep -E "FAILED tests|ERROR tests" unittest.txt | wc -l | tr -d '[:space:]')
if [[ $failed_count -gt 0 ]]; then
echo "$failed_count failed lines found! Task failed."
exit 1
fi
Expand Down
3 changes: 3 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
recursive-include metagpt/ext/stanford_town/prompts *.txt
recursive-include metagpt/ext/stanford_town/static_dirs *.csv
recursive-include metagpt/ext/stanford_town/static_dirs *.json
39 changes: 19 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,16 @@
</p>

## News
🚀 Mar. 29, 2024: [v0.8.0](https://github.com/geekan/MetaGPT/releases/tag/v0.8.0) released. Now you can use Data Interpreter via pypi package import. Meanwhile, we integrated RAG module and supported multiple new LLMs.

🚀 Mar. 14, 2024: Our **Data Interpreter** paper is on [arxiv](https://arxiv.org/abs/2402.18679). Check the [example](https://docs.deepwisdom.ai/main/en/DataInterpreter/) and [code](https://github.com/geekan/MetaGPT/tree/main/examples/di)!
🚀 Mar. 29, 2024: [v0.8.0](https://github.com/geekan/MetaGPT/releases/tag/v0.8.0) released. Now you can use Data Interpreter ([arxiv](https://arxiv.org/abs/2402.18679), [example](https://docs.deepwisdom.ai/main/en/DataInterpreter/), [code](https://github.com/geekan/MetaGPT/tree/main/examples/di)) via pypi package import. Meanwhile, we integrated RAG module and supported multiple new LLMs.

🚀 Feb. 08, 2024: [v0.7.0](https://github.com/geekan/MetaGPT/releases/tag/v0.7.0) released, supporting assigning different LLMs to different Roles. We also introduced [Data Interpreter](https://github.com/geekan/MetaGPT/blob/main/examples/di/README.md), a powerful agent capable of solving a wide range of real-world problems.

🚀 Jan. 16, 2024: Our paper [MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework
](https://arxiv.org/abs/2308.00352) accepted for oral presentation **(top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.
](https://openreview.net/forum?id=VtmBAGCN7o) accepted for **oral presentation (top 1.2%)** at ICLR 2024, **ranking #1** in the LLM-based Agent category.

🚀 Jan. 03, 2024: [v0.6.0](https://github.com/geekan/MetaGPT/releases/tag/v0.6.0) released, new features include serialization, upgraded OpenAI package and supported multiple LLM, provided [minimal example for debate](https://github.com/geekan/MetaGPT/blob/main/examples/debate_simple.py) etc.

🚀 Dec. 15, 2023: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) released, introducing some experimental features such as **incremental development**, **multilingual**, **multiple programming languages**, etc.
🚀 Dec. 15, 2023: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) released, introducing some experimental features such as incremental development, multilingual, multiple programming languages, etc.

🔥 Nov. 08, 2023: MetaGPT is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html).

Expand Down Expand Up @@ -85,8 +83,8 @@ You can configure `~/.metagpt/config2.yaml` according to the [example](https://g

```yaml
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
api_type: "openai" # or azure / ollama / groq etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
```
Expand All @@ -107,7 +105,7 @@ repo: ProjectRepo = generate_repo("Create a 2048 game") # or ProjectRepo("<path
print(repo) # it will print the repo structure with files
```

You can also use its [Data Interpreter](https://github.com/geekan/MetaGPT/tree/main/examples/di)
You can also use [Data Interpreter](https://github.com/geekan/MetaGPT/tree/main/examples/di) to write code:

```python
import asyncio
Expand Down Expand Up @@ -147,10 +145,13 @@ https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace

## Support

### Discard Join US
📢 Join Our [Discord Channel](https://discord.gg/ZRHeExS6xv)!
### Discord Join US

📢 Join Our [Discord Channel](https://discord.gg/ZRHeExS6xv)! Looking forward to seeing you there! 🎉

Looking forward to seeing you there! 🎉
### Contributor form

📝 [Fill out the form](https://airtable.com/appInfdG0eJ9J4NNL/pagK3Fh1sGclBvVkV/form) to become a contributor. We are looking forward to your participation!

### Contact Information

Expand All @@ -165,16 +166,15 @@ We will respond to all questions within 2-3 business days.

To stay updated with the latest research and development, follow [@MetaGPT_](https://twitter.com/MetaGPT_) on Twitter.

To cite [MetaGPT](https://arxiv.org/abs/2308.00352) or [Data Interpreter](https://arxiv.org/abs/2402.18679) in publications, please use the following BibTeX entries.
To cite [MetaGPT](https://openreview.net/forum?id=VtmBAGCN7o) or [Data Interpreter](https://arxiv.org/abs/2402.18679) in publications, please use the following BibTeX entries.

```bibtex
@misc{hong2023metagpt,
title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
primaryClass={cs.AI}
@inproceedings{hong2024metagpt,
title={Meta{GPT}: Meta Programming for A Multi-Agent Collaborative Framework},
author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and J{\"u}rgen Schmidhuber},
booktitle={The Twelfth International Conference on Learning Representations},
year={2024},
url={https://openreview.net/forum?id=VtmBAGCN7o}
}
@misc{hong2024data,
title={Data Interpreter: An LLM Agent For Data Science},
Expand All @@ -184,6 +184,5 @@ To cite [MetaGPT](https://arxiv.org/abs/2308.00352) or [Data Interpreter](https:
archivePrefix={arXiv},
primaryClass={cs.AI}
}
```

51 changes: 41 additions & 10 deletions config/config2.example.yaml
Original file line number Diff line number Diff line change
@@ -1,17 +1,24 @@
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_BASE_URL"
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
model: "gpt-4-turbo" # or gpt-3.5-turbo
proxy: "YOUR_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
pricing_plan: "" # Optional. If invalid, it will be automatically filled in with the value of the `model`.
# Azure-exclusive pricing plan mappings:
# - gpt-3.5-turbo 4k: "gpt-3.5-turbo-1106"
# - gpt-4-turbo: "gpt-4-turbo-preview"
# - gpt-4-turbo-vision: "gpt-4-vision-preview"
# - gpt-4 8k: "gpt-4"
# See for more: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's


# RAG Embedding.
# For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used.
embedding:
api_type: "" # openai / azure / gemini / ollama etc. Check EmbeddingType for more options.
base_url: ""
api_key: ""
model: ""
api_version: ""
embed_batch_size: 100
dimensions: # output dimension of embedding model

repair_llm_output: true # when the output is not a valid json, try to repair it

Expand All @@ -28,7 +35,7 @@ browser:

mermaid:
engine: "pyppeteer"
path: "/Applications/Google Chrome.app"
pyppeteer_path: "/Applications/Google Chrome.app"

redis:
host: "YOUR_HOST"
Expand All @@ -52,3 +59,27 @@ iflytek_api_key: "YOUR_API_KEY"
iflytek_api_secret: "YOUR_API_SECRET"

metagpt_tti_url: "YOUR_MODEL_URL"

omniparse:
api_key: "YOUR_API_KEY"
base_url: "YOUR_BASE_URL"

models:
# "YOUR_MODEL_NAME_1 or YOUR_API_TYPE_1": # model: "gpt-4-turbo" # or gpt-3.5-turbo
# api_type: "openai" # or azure / ollama / groq etc.
# base_url: "YOUR_BASE_URL"
# api_key: "YOUR_API_KEY"
# proxy: "YOUR_PROXY" # for LLM API requests
# # timeout: 600 # Optional. If set to 0, default value is 300.
# # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
# pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
# "YOUR_MODEL_NAME_2 or YOUR_API_TYPE_2": # api_type: "openai" # or azure / ollama / groq etc.
# api_type: "openai" # or azure / ollama / groq etc.
# base_url: "YOUR_BASE_URL"
# api_key: "YOUR_API_KEY"
# proxy: "YOUR_PROXY" # for LLM API requests
# # timeout: 600 # Optional. If set to 0, default value is 300.
# # Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
# pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's

agentops_api_key: "YOUR_AGENTOPS_API_KEY" # get key from https://app.agentops.ai/settings/projects
5 changes: 3 additions & 2 deletions config/config2.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
# Config Docs: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
api_type: "openai" # or azure / ollama / groq etc.
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
5 changes: 5 additions & 0 deletions config/examples/anthropic-claude-3-5-sonnet.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
llm:
api_type: 'claude' # or anthropic
base_url: 'https://api.anthropic.com'
api_key: 'YOUR_API_KEY'
model: 'claude-3-5-sonnet-20240620' # or 'claude-3-opus-20240229'
10 changes: 10 additions & 0 deletions config/examples/aws-bedrock.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
llm:
api_type: 'bedrock'
access_key: 'YOUR_API_KEY'
secret_key: 'YOUR_API_SECRET'

region_name: "us-east-1"
model: "meta.llama2-70b-chat-v1"
# model: "anthropic.claude-3-sonnet-20240229-v1:0"
# model: "mistral.mixtral-8x7b-instruct-v0:1"
# model: "meta.llama2-13b-chat-v1"
4 changes: 4 additions & 0 deletions config/examples/google-gemini.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
llm:
api_type: 'gemini'
api_key: 'YOUR_API_KEY'
model: 'gemini-pro'
5 changes: 5 additions & 0 deletions config/examples/groq-llama3-70b.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
llm:
# Visit https://console.groq.com/keys to create api key
base_url: "https://api.groq.com/openai/v1"
api_key: "YOUR_API_KEY"
model: "llama3-70b-8192" # llama3-8b-8192,llama3-70b-8192,llama2-70b-4096 ,mixtral-8x7b-32768,gemma-7b-it
5 changes: 5 additions & 0 deletions config/examples/huoshan_ark.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
llm:
api_type: "ark"
model: "" # your model endpoint like ep-xxx
base_url: "https://ark.cn-beijing.volces.com/api/v3"
api_key: "" # your api-key like ey……
5 changes: 5 additions & 0 deletions config/examples/openai-gpt-3.5-turbo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
llm:
api_key: "YOUR_API_KEY"
model: "gpt-3.5-turbo"
#proxy: "http://<ip>:<port>"
#base_url: "https://<forward_url>/v1"
6 changes: 6 additions & 0 deletions config/examples/openai-gpt-4-turbo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
llm:
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo"
#proxy: "http://<ip>:<port>"
#base_url: "https://<forward_url>/v1"

5 changes: 5 additions & 0 deletions config/examples/openrouter-llama3-70b-instruct.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
llm:
api_type: openrouter
base_url: "https://openrouter.ai/api/v1"
api_key: "YOUR_API_KEY"
model: meta-llama/llama-3-70b-instruct
10 changes: 10 additions & 0 deletions config/examples/spark_lite.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# 适用于讯飞星火的spark-lite 参考 https://www.xfyun.cn/doc/spark/Web.html#_2-function-call%E8%AF%B4%E6%98%8E

llm:
api_type: "spark"
# 对应模型的url 参考 https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
base_url: "ws(s)://spark-api.xf-yun.com/v1.1/chat"
app_id: ""
api_key: ""
api_secret: ""
domain: "general" # 取值为 [general,generalv2,generalv3,generalv3.5] 和url一一对应
8 changes: 3 additions & 5 deletions config/puppeteer-config.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
{
"executablePath": "/usr/bin/chromium",
"args": [
"--no-sandbox"
]
}
"executablePath": "/usr/bin/chromium",
"args": ["--no-sandbox"]
}
Loading

0 comments on commit 8a92fa0

Please sign in to comment.