Skip to content

Commit

Permalink
Merge pull request cheshire-cat-ai#780 from cheshire-cat-ai/develop
Browse files Browse the repository at this point in the history
Tools and forms to work with small models; `cat.classify`
  • Loading branch information
pieroit authored Apr 21, 2024
2 parents 02d31dd + 1570a59 commit eb97809
Show file tree
Hide file tree
Showing 9 changed files with 213 additions and 144 deletions.
24 changes: 5 additions & 19 deletions core/cat/experimental/form/cat_form.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import json
from enum import Enum
from typing import List, Dict
from dataclasses import dataclass
from pydantic import BaseModel, ConfigDict, ValidationError

from langchain.chains import LLMChain
from langchain_core.prompts.prompt import PromptTemplate

#from cat.looking_glass.prompts import MAIN_PROMPT_PREFIX
from enum import Enum
from cat.utils import parse_json
from cat.log import log
import json


# Conversational Form State
Expand Down Expand Up @@ -195,19 +195,6 @@ def message(self):
"output": out
}

def stringify_convo_history(self):

user_message = self.cat.working_memory["user_message_json"]["text"]
chat_history = self.cat.working_memory["history"][-10:] # last n messages

# stringify history
history = ""
for turn in chat_history:
history += f"\n - {turn['who']}: {turn['message']}"
history += f"Human: {user_message}"

return history

# Extract model informations from user message
def extract(self):

Expand All @@ -227,7 +214,7 @@ def extract(self):

# json parser
try:
output_model = json.loads(json_str)
output_model = parse_json(json_str)
except Exception as e:
output_model = {}
log.warning(e)
Expand All @@ -236,7 +223,7 @@ def extract(self):

def extraction_prompt(self):

history = self.stringify_convo_history()
history = self.cat.stringify_chat_history()

# JSON structure
# BaseModel.__fields__['my_field'].type_
Expand All @@ -263,7 +250,6 @@ def extraction_prompt(self):
```
This is the conversation:
{history}
Updated JSON:
Expand Down
58 changes: 16 additions & 42 deletions core/cat/looking_glass/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ async def execute_procedures_agent(self, agent_input, stray):
procedures=allowed_procedures,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because it is needed to fill the scratchpad
input_variables=["input", "intermediate_steps"]
input_variables=["input", "chat_history", "intermediate_steps"]
)

# main chain
Expand All @@ -94,7 +94,7 @@ async def execute_procedures_agent(self, agent_input, stray):
agent = LLMSingleActionAgent(
llm_chain=agent_chain,
output_parser=ChooseProcedureOutputParser(),
stop=["}"],
stop=["```"], # markdown syntax ends JSON with backtick
verbose=self.verbose
)

Expand Down Expand Up @@ -181,7 +181,7 @@ async def execute_agent(self, stray):

# prepare input to be passed to the agent.
# Info will be extracted from working memory
agent_input = self.format_agent_input(stray.working_memory)
agent_input = self.format_agent_input(stray)
agent_input = self.mad_hatter.execute_hook("before_agent_starts", agent_input, cat=stray)

# should we run the default agent?
Expand Down Expand Up @@ -212,12 +212,16 @@ async def execute_agent(self, stray):
# exit agent if a return_direct procedure was executed
return procedures_result

# Adding the tools_output key in agent input, needed by the memory chain
if procedures_result.get("output"):
agent_input["tools_output"] = "## Tools output: \n" + procedures_result["output"]

# store intermediate steps to enrich memory chain
intermediate_steps = procedures_result["intermediate_steps"]

# Adding the tools_output key in agent input, needed by the memory chain
if len(intermediate_steps) > 0:
agent_input["tools_output"] = "## Tools output: \n"
for proc_res in intermediate_steps:
# ((step[0].tool, step[0].tool_input), step[1])
agent_input["tools_output"] += f" - {proc_res[0][0]}: {proc_res[1]}\n"


except Exception as e:
log.error(e)
Expand All @@ -234,7 +238,7 @@ async def execute_agent(self, stray):

return memory_chain_output

def format_agent_input(self, working_memory):
def format_agent_input(self, stray):
"""Format the input for the Agent.
The method formats the strings of recalled memories and chat history that will be provided to the Langchain
Expand Down Expand Up @@ -262,19 +266,17 @@ def format_agent_input(self, working_memory):

# format memories to be inserted in the prompt
episodic_memory_formatted_content = self.agent_prompt_episodic_memories(
working_memory["episodic_memories"]
stray.working_memory["episodic_memories"]
)
declarative_memory_formatted_content = self.agent_prompt_declarative_memories(
working_memory["declarative_memories"]
stray.working_memory["declarative_memories"]
)

# format conversation history to be inserted in the prompt
conversation_history_formatted_content = self.agent_prompt_chat_history(
working_memory["history"]
)
conversation_history_formatted_content = stray.stringify_chat_history()

return {
"input": working_memory["user_message_json"]["text"],
"input": stray.working_memory["user_message_json"]["text"], # TODO: deprecate, since it is included in chat history
"episodic_memory": episodic_memory_formatted_content,
"declarative_memory": declarative_memory_formatted_content,
"chat_history": conversation_history_formatted_content,
Expand Down Expand Up @@ -365,32 +367,4 @@ def agent_prompt_declarative_memories(self, memory_docs: List[Document]) -> str:

return memory_content

def agent_prompt_chat_history(self, chat_history: List[Dict]) -> str:
"""Serialize chat history for the agent input.
Converts to text the recent conversation turns fed to the *Agent*.
Parameters
----------
chat_history : List[Dict]
List of dictionaries collecting speaking turns.
Returns
-------
history : str
String with recent conversation turns to be provided as context to the *Agent*.
Notes
-----
Such context is placed in the `agent_prompt_suffix` in the place held by {chat_history}.
The chat history is a dictionary with keys::
'who': the name of who said the utterance;
'message': the utterance.
"""
history = ""
for turn in chat_history:
history += f"\n - {turn['who']}: {turn['message']}"

return history

24 changes: 8 additions & 16 deletions core/cat/looking_glass/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,49 +4,41 @@
from typing import Union

from cat.mad_hatter.mad_hatter import MadHatter
from cat.utils import parse_json
from cat.log import log


class ChooseProcedureOutputParser(AgentOutputParser):

def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:

log.debug(llm_output)
log.info(llm_output)

# Maing JSON valid
llm_output = llm_output + "}"
llm_output = llm_output.replace("None", "null")

try:
parsed_output = json.loads(llm_output)
parsed_output = parse_json(llm_output)
parsed_output_log = json.dumps(parsed_output, indent=4)
except Exception as e:
log.error(e)
raise OutputParserException(f"Could not parse LLM output: `{llm_output}`")

# Extract action
action = parsed_output["action"]
action_input = parsed_output["action_input"]
action_input = str(parsed_output["action_input"])

if action_input:
action_input = action_input.strip(" ").strip('"')
else:
action_input = ""

# Check if agent should finish
if action == "final_answer":
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output":action_input},
log=llm_output,
)

if action == "none_of_the_others":
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": None},
log=llm_output,
log=parsed_output_log
)

for Form in MadHatter().forms:
Expand All @@ -56,8 +48,8 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
"output": None,
"form": action
},
log=llm_output,
log=parsed_output_log
)

# Return the action and action input
return AgentAction(tool=action, tool_input=action_input, log=llm_output)
return AgentAction(tool=action, tool_input=action_input, log=parsed_output_log)
81 changes: 56 additions & 25 deletions core/cat/looking_glass/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,70 +20,101 @@ def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{json.dumps({'observations':observation}, indent=4)}\n"
thoughts += f"```json\n{action.log}\n```\n"
thoughts += f"""```json
{json.dumps({"action_output": observation}, indent=4)}
```
"""

# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = ""
kwargs["examples"] = ""
for proc in self.procedures.values():
kwargs["tools"] += f"\n- {proc.name}: {proc.description}"
kwargs["tools"] += f'\n- "{proc.name}": {proc.description}'
if len(proc.start_examples) > 0:

# At first example add this header
if len(kwargs["examples"]) == 0:
kwargs["examples"] += "Here some examples:\n"
kwargs["examples"] += "## Here some examples:\n"

# Create action example
example = {
"action": proc.name,
"action_input": "Input of the action according to it's description"
}
example = f"""{{
"action": "{proc.name}",
"action_input": // Input of the action according to it's description
}}"""

# Add a random user queston choosed from the start examples to prompt
kwargs["examples"] += f"\nQuestion: {random.choice(proc.start_examples)}\n"
kwargs["examples"] += f"\nQuestion: {random.choice(proc.start_examples)}"
# Add example
kwargs["examples"] += json.dumps(example, indent=4)
kwargs["examples"] += f"\n```json\n{example}\n```"

# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join(self.procedures.keys())
kwargs["tool_names"] = '"' + '", "'.join(self.procedures.keys()) + '"'

return self.template.format(**kwargs)


TOOL_PROMPT = """Answer the following question: {input}
You can only reply using these actions:
{tools}
- final_answer: Use this to respond to the user when you have the final answer. Input is the final answer.
- none_of_the_others: Use this action if none of the others actions help. Input is always None.
TOOL_PROMPT = """Create a JSON action to complete the action sequence, with the correct "action" and "action_input" to help the Human.
You can use one of these actions:
{tools}
- "final_answer": Use this action to finish or no relevant action is available. Input is always null.
If you want to do an action, use the following format:
## To add an action, use only the following format:
```json
{{
"action": "action_name", // The name of the action to take, should be one of [{tool_names}]
"action_input": "input of the action" // tThe input to the action shoud be a string
"action": // str - The name of the action to take, should be one of [{tool_names}, "final_answer"]
"action_input": // str or null - The input to the action
}}
```
{examples}
Begin!
## Action output
After each action there will be an action output in this format:
```json
{{
"action_output": // output of the preceding action
}}
```
## Final answer / no action available
When you have a final answer (or no tools are relevant), use the following format:
```json
{{
"action": "final_answer",
"action_input": null
}}
```
## Conversation with Human:
{chat_history}
Question: {input}
{agent_scratchpad}"""
## Actions sequence used until now:
{agent_scratchpad}
## Next action:
```json
"""


MAIN_PROMPT_PREFIX = """You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.
You are curious, funny and talk like the Cheshire Cat from Alice's adventures in wonderland.
You answer Human with a focus on the following context."""
You answer Human shortly and with a focus on the following context."""


MAIN_PROMPT_SUFFIX = """
# Context
{episodic_memory}
{declarative_memory}
{tools_output}
## Conversation until now:{chat_history}
- Human: {input}
# Conversation until now:{chat_history}
- AI: """


Loading

0 comments on commit eb97809

Please sign in to comment.