Skip to content

Commit e737787

Browse files
committed
When near end of turns, force LLM to wrap it up with final answer guidelines
1 parent 9349803 commit e737787

File tree

3 files changed

+12
-2
lines changed

3 files changed

+12
-2
lines changed

openai_server/autogen_2agent_backend.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,7 @@ def run_autogen_2agent(query=None,
120120
autogen_code_restrictions_level,
121121
agent_venv_dir,
122122
temp_dir,
123+
max_turns=autogen_max_turns,
123124
agent_tools_usage_hard_limits=agent_tools_usage_hard_limits,
124125
agent_tools_usage_soft_limits=agent_tools_usage_soft_limits,
125126
)
@@ -167,6 +168,7 @@ def code_writer_terminate_func(msg):
167168
human_input_mode="NEVER",
168169
is_termination_msg=code_writer_terminate_func,
169170
max_consecutive_auto_reply=autogen_max_consecutive_auto_reply,
171+
max_turns=autogen_max_turns,
170172
)
171173

172174
code_writer_agent = H2OConversableAgent("code_writer_agent", **code_writer_kwargs)
@@ -177,7 +179,7 @@ def code_writer_terminate_func(msg):
177179
# setup planning agents
178180
code_writer_kwargs_planning = code_writer_kwargs.copy()
179181
# terminate immediately
180-
update_dict = dict(max_consecutive_auto_reply=1)
182+
update_dict = dict(max_consecutive_auto_reply=1, max_turns=None)
181183
# is_termination_msg=lambda x: True
182184
code_writer_kwargs_planning.update(update_dict)
183185
code_writer_agent = H2OConversableAgent("code_writer_agent", **code_writer_kwargs_planning)

openai_server/autogen_utils.py

+8
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,10 @@ def __init__(
669669
default_auto_reply: Union[str, Dict] = "",
670670
description: Optional[str] = None,
671671
chat_messages: Optional[Dict[Agent, List[Dict]]] = None,
672+
max_turns: Optional[int] = None,
672673
):
674+
self.max_turns = max_turns
675+
self.turns = 0
673676
code_execution_config = (
674677
code_execution_config.copy() if hasattr(code_execution_config, "copy") else code_execution_config
675678
)
@@ -831,6 +834,7 @@ def _generate_code_execution_reply_using_executor(
831834
iostream = IOStream.get_default()
832835
iostream.print(delta)
833836
output += delta
837+
self.turns += 1
834838
return valid, output
835839

836840
def __generate_code_execution_reply_using_executor(
@@ -887,6 +891,10 @@ def __generate_code_execution_reply_using_executor(
887891
# force immediate termination regardless of what LLM generates
888892
self._is_termination_msg = lambda x: True
889893
return True, self.final_answer_guidelines()
894+
if self.max_turns is not None and self.turns >= self.max_turns - 1:
895+
# one before final allowed turn, force LLM to stop
896+
self._is_termination_msg = lambda x: True
897+
return True, self.final_answer_guidelines()
890898

891899
num_code_blocks = len(code_blocks)
892900
if num_code_blocks == 1:

src/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "5ea362c96da92eff13e1c2252e8030515d71c085"
1+
__version__ = "9349803e8bfd5b3d42ab1e5a2bfadcb649774891"

0 commit comments

Comments
 (0)