Skip to content

Commit 7723b6c

Browse files
authored
Merge pull request #6 from codefuse-ai/pr_version_update
[bugfix]v0.0.4 chat and codechat bug
2 parents 8225f6b + 2c3eab3 commit 7723b6c

File tree

9 files changed

+52
-33
lines changed

9 files changed

+52
-33
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,5 @@ package.sh
1313
setup_test.py
1414
build
1515
*egg-info
16-
dist
16+
dist
17+
.ipynb_checkpoints

docs/overview/o1.muagent.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ aliases:
2121
本项目的Mutli-Agent框架汲取兼容了多个框架的优秀设计,比如metaGPT中的消息池(message pool)、autogen中的代理选择器(agent selector)等。
2222

2323
<div align=center>
24-
<img src="/docs/resources/muagent_framework.png" alt="图片" style="width: 500px; height:auto;">
24+
<img src="/docs/resources/muAgent_framework.png" alt="图片" style="width: 500px; height:auto;">
2525
</div>
2626

2727

@@ -42,7 +42,7 @@ aliases:
4242
1. BaseAgent:提供基础问答、工具使用、代码执行的功能,根据Prompt格式实现 输入 => 输出
4343

4444
<div align=center>
45-
<img src="/docs/resources/baseagent.png" alt="图片" style="width: 500px; height:auto;">
45+
<img src="/docs/resources/BaseAgent.png" alt="图片" style="width: 500px; height:auto;">
4646
</div>
4747

4848
2. ReactAgent:提供标准React的功能,根据问题实现当前任务

examples/muagent_examples/baseGroup_example.py

-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@
7070
source_file = 'D://project/gitlab/llm/external/ant_code/Codefuse-chatbot/jupyter_work/employee_data.csv'
7171
shutil.copy(source_file, JUPYTER_WORK_PATH)
7272

73-
7473
# round-1
7574
query_content = "确认本地是否存在employee_data.csv,并查看它有哪些列和数据类型;然后画柱状图"
7675
query = Message(

muagent/chat/agent_chat.py

+9-4
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ def chat(
7979
model_name: str = Body("", description="llm模型名称"),
8080
temperature: float = Body(0.2, description=""),
8181
chat_index: str = "",
82+
local_graph_path: str = "",
8283
**kargs
8384
) -> Message:
8485

@@ -122,7 +123,8 @@ def chat(
122123
code_engine_name=code_engine_name,
123124
score_threshold=score_threshold, top_k=top_k,
124125
history_node_list=history_node_list,
125-
tools=tools
126+
tools=tools,
127+
local_graph_path=local_graph_path
126128
)
127129
# history memory mangemant
128130
history = Memory(messages=[
@@ -223,6 +225,7 @@ def achat(
223225
model_name: str = Body("", description="llm模型名称"),
224226
temperature: float = Body(0.2, description=""),
225227
chat_index: str = "",
228+
local_graph_path: str = "",
226229
**kargs
227230
) -> Message:
228231

@@ -264,7 +267,8 @@ def achat(
264267
cb_search_type=cb_search_type,
265268
score_threshold=score_threshold, top_k=top_k,
266269
history_node_list=history_node_list,
267-
tools=tools
270+
tools=tools,
271+
local_graph_path=local_graph_path
268272
)
269273
# history memory mangemant
270274
history = Memory(messages=[
@@ -292,7 +296,8 @@ def achat(
292296

293297
def chat_iterator(message: Message, local_memory: Memory, isDetailed=False):
294298
step_content = local_memory.to_str_messages(content_key='step_content', filter_roles=["human"])
295-
step_content = "\n\n".join([f"{v}" for parsed_output in local_memory.get_parserd_output_list()[1:] for k, v in parsed_output.items() if k not in ["Action Status"]])
299+
step_content = "\n\n".join([f"{v}" for parsed_output in local_memory.get_parserd_output_list() for k, v in parsed_output.items() if k not in ["Action Status", "human", "user"]])
300+
# logger.debug(f"{local_memory.get_parserd_output_list()}")
296301
final_content = message.role_content
297302
result = {
298303
"answer": "",
@@ -311,7 +316,7 @@ def chat_iterator(message: Message, local_memory: Memory, isDetailed=False):
311316
if node not in has_nodes:
312317
related_nodes.append(node)
313318
result["related_nodes"] = related_nodes
314-
319+
315320
# logger.debug(f"{result['figures'].keys()}, isDetailed: {isDetailed}")
316321
message_str = step_content
317322
if self.stream:

muagent/chat/code_chat.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def check_service_status(self) -> BaseResponse:
5353
return BaseResponse(code=404, msg=f"未找到代码库 {self.engine_name}")
5454
return BaseResponse(code=200, msg=f"找到代码库 {self.engine_name}")
5555

56-
def _process(self, query: str, history: List[History], model, llm_config: LLMConfig, embed_config: EmbedConfig):
56+
def _process(self, query: str, history: List[History], model, llm_config: LLMConfig, embed_config: EmbedConfig, local_graph_path=""):
5757
'''process'''
5858

5959
codes_res = search_code(query=query, cb_name=self.engine_name, code_limit=self.code_limit,
@@ -67,7 +67,8 @@ def _process(self, query: str, history: List[History], model, llm_config: LLMCon
6767
embed_model_path=embed_config.embed_model_path,
6868
embed_engine=embed_config.embed_engine,
6969
model_device=embed_config.model_device,
70-
embed_config=embed_config
70+
embed_config=embed_config,
71+
local_graph_path=local_graph_path
7172
)
7273

7374
context = codes_res['context']
@@ -115,6 +116,7 @@ def chat(
115116
model_name: str = Body("", ),
116117
temperature: float = Body(0.5, ),
117118
model_device: str = Body("", ),
119+
local_graph_path: str=Body(", "),
118120
**kargs
119121
):
120122
params = locals()
@@ -127,9 +129,9 @@ def chat(
127129
self.local_doc_url = local_doc_url if isinstance(local_doc_url, bool) else local_doc_url.default
128130
self.request = request
129131
self.cb_search_type = cb_search_type
130-
return self._chat(query, history, llm_config, embed_config, **kargs)
132+
return self._chat(query, history, llm_config, embed_config, local_graph_path, **kargs)
131133

132-
def _chat(self, query: str, history: List[History], llm_config: LLMConfig, embed_config: EmbedConfig, **kargs):
134+
def _chat(self, query: str, history: List[History], llm_config: LLMConfig, embed_config: EmbedConfig, local_graph_path: str, **kargs):
133135
history = [History(**h) if isinstance(h, dict) else h for h in history]
134136

135137
service_status = self.check_service_status()
@@ -140,7 +142,7 @@ def chat_iterator(query: str, history: List[History]):
140142
# model = getChatModel()
141143
model = getChatModelFromConfig(llm_config)
142144

143-
result, content = self.create_task(query, history, model, llm_config, embed_config, **kargs)
145+
result, content = self.create_task(query, history, model, llm_config, embed_config, local_graph_path, **kargs)
144146
# logger.info('result={}'.format(result))
145147
# logger.info('content={}'.format(content))
146148

@@ -156,9 +158,9 @@ def chat_iterator(query: str, history: List[History]):
156158
return StreamingResponse(chat_iterator(query, history),
157159
media_type="text/event-stream")
158160

159-
def create_task(self, query: str, history: List[History], model, llm_config: LLMConfig, embed_config: EmbedConfig):
161+
def create_task(self, query: str, history: List[History], model, llm_config: LLMConfig, embed_config: EmbedConfig, local_graph_path: str):
160162
'''构建 llm 生成任务'''
161-
chain, context, result = self._process(query, history, model, llm_config, embed_config)
163+
chain, context, result = self._process(query, history, model, llm_config, embed_config, local_graph_path)
162164
logger.info('chain={}'.format(chain))
163165
try:
164166
content = chain({"context": context, "question": query})

muagent/codechat/code_analyzer/code_intepreter.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,11 @@ def get_intepretation_batch(self, code_list):
5454
messages.append(message)
5555

5656
try:
57-
chat_ress = [chat_model(messages) for message in messages]
58-
except:
57+
chat_ress = [chat_model.predict(message) for message in messages]
58+
except Exception as e:
59+
logger.exception(f"{e}")
5960
chat_ress = chat_model.batch(messages)
61+
6062
for chat_res, code in zip(chat_ress, code_list):
6163
try:
6264
res[code] = chat_res.content

muagent/connector/memory_manager.py

+21-13
Original file line numberDiff line numberDiff line change
@@ -590,19 +590,23 @@ def append_tools(self, tool_information: dict, chat_index: str, nodeid: str, use
590590
}
591591

592592
for k, v in tool_map.items():
593-
message = Message(
594-
chat_index=chat_index,
595-
message_index= f"{nodeid}-{uuid.uuid4()}",
596-
user_name=user_name,
597-
role_name = v["role_name"], # agent 名字,
598-
role_type = v["role_type"], # agent 类型,默认assistant,可选observation
599-
## llm output
600-
role_content = tool_information[k], # 输入
601-
customed_kargs = {
602-
**{kk: vv for kk, vv in tool_information.items()
603-
if kk in v.get("customed_keys", [])}
604-
} # 存储docs、tool等信息
605-
)
593+
try:
594+
message = Message(
595+
chat_index=chat_index,
596+
#message_index= f"{nodeid}-{uuid.uuid4()}",
597+
message_index= f"{nodeid}-{k}",
598+
user_name=user_name,
599+
role_name = v["role_name"], # agent 名字,
600+
role_type = v["role_type"], # agent 类型,默认assistant,可选observation
601+
## llm output
602+
role_content = tool_information[k], # 输入
603+
customed_kargs = {
604+
**{kk: vv for kk, vv in tool_information.items()
605+
if kk in v.get("customed_keys", [])}
606+
} # 存储docs、tool等信息
607+
)
608+
except:
609+
pass
606610
self.append(message)
607611

608612
def get_memory_pool(self, chat_index: str = "") -> Memory:
@@ -802,12 +806,16 @@ def tbasedoc2Memory(self, r_docs) -> Memory:
802806
for doc in r_docs.docs:
803807
tbase_message = {}
804808
for k, v in doc.__dict__.items():
809+
if k in ["role_content", "input_query"]:
810+
tbase_message[k] = v
811+
continue
805812
try:
806813
v = json.loads(v)
807814
except:
808815
pass
809816

810817
tbase_message[k] = v
818+
811819
message = Message(**tbase_message)
812820
memory.append(message)
813821

muagent/service/cb_api.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ async def create_cb(zip_file,
5656
temperature: bool = Body(..., examples=["samples"]),
5757
model_device: bool = Body(..., examples=["samples"]),
5858
embed_config: EmbedConfig = None,
59+
local_graph_path: str = '',
5960
) -> BaseResponse:
6061
logger.info('cb_name={}, zip_path={}, do_interpret={}'.format(cb_name, code_path, do_interpret))
6162

@@ -74,7 +75,7 @@ async def create_cb(zip_file,
7475

7576
try:
7677
logger.info('start build code base')
77-
cbh = CodeBaseHandler(cb_name, code_path, embed_config=embed_config, llm_config=llm_config)
78+
cbh = CodeBaseHandler(cb_name, code_path, embed_config=embed_config, llm_config=llm_config, local_graph_path=local_graph_path)
7879
vertices_num, edge_num, file_num = cbh.import_code(zip_file=zip_file, do_interpret=do_interpret)
7980
logger.info('build code base done')
8081

@@ -100,6 +101,7 @@ async def delete_cb(
100101
temperature: bool = Body(..., examples=["samples"]),
101102
model_device: bool = Body(..., examples=["samples"]),
102103
embed_config: EmbedConfig = None,
104+
local_graph_path: str="",
103105
) -> BaseResponse:
104106
logger.info('cb_name={}'.format(cb_name))
105107
embed_config: EmbedConfig = EmbedConfig(**locals()) if embed_config is None else embed_config
@@ -119,7 +121,7 @@ async def delete_cb(
119121
shutil.rmtree(CB_ROOT_PATH + os.sep + cb_name)
120122

121123
# delete from codebase
122-
cbh = CodeBaseHandler(cb_name, embed_config=embed_config, llm_config=llm_config)
124+
cbh = CodeBaseHandler(cb_name, embed_config=embed_config, llm_config=llm_config, local_graph_path=local_graph_path)
123125
cbh.delete_codebase(codebase_name=cb_name)
124126

125127
except Exception as e:

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
setuptools.setup(
77
name="codefuse-muagent",
8-
version="0.0.2",
8+
version="0.0.4",
99
author="shanshi",
1010
author_email="[email protected]",
1111
description="A multi-agent framework that facilitates the rapid construction of collaborative teams of agents.",

0 commit comments

Comments
 (0)