Replies: 1 comment
-
The "schema" variable in the def construct_schema(
structured_schema: Dict[str, Any],
include_types: List[str],
exclude_types: List[str],
) -> str:
"""Filter the schema based on included or excluded types"""
def filter_func(x: str) -> bool:
return x in include_types if include_types else x not in exclude_types
filtered_schema: Dict[str, Any] = {
"node_props": {
k: v
for k, v in structured_schema.get("node_props", {}).items()
if filter_func(k)
},
"rel_props": {
k: v
for k, v in structured_schema.get("rel_props", {}).items()
if filter_func(k)
},
"relationships": [
r
for r in structured_schema.get("relationships", [])
if all(filter_func(r[t]) for t in ["start", "end", "type"])
],
}
# Format node properties
formatted_node_props = []
for label, properties in filtered_schema["node_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_node_props.append(f"{label} {{{props_str}}}")
# Format relationship properties
formatted_rel_props = []
for rel_type, properties in filtered_schema["rel_props"].items():
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in properties]
)
formatted_rel_props.append(f"{rel_type} {{{props_str}}}")
# Format relationships
formatted_rels = [
f"(:{el['start']})-[:{el['type']}]->(:{el['end']})"
for el in filtered_schema["relationships"]
]
return "\n".join(
[
"Node properties are the following:",
",".join(formatted_node_props),
"Relationship properties are the following:",
",".join(formatted_rel_props),
"The relationships are the following:",
",".join(formatted_rels),
]
) To print out the finished prompt for debugging purposes, you can modify the def test_graph_cypher_qa_chain() -> None:
template = """You are a nice chatbot having a conversation with a human.
Schema:
{schema}
Previous conversation:
{chat_history}
New human question: {question}
Response:"""
prompt = PromptTemplate(
input_variables=["schema", "question", "chat_history"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
prompt1 = (
"You are a nice chatbot having a conversation with a human.\n\n "
"Schema:\n Node properties are the following:\n\nRelationship "
"properties are the following:\n\nThe relationships are the "
"following:\n\n\n "
"Previous conversation:\n \n\n New human question: "
"Test question\n Response:"
)
prompt2 = (
"You are a nice chatbot having a conversation with a human.\n\n "
"Schema:\n Node properties are the following:\n\nRelationship "
"properties are the following:\n\nThe relationships are the "
"following:\n\n\n "
"Previous conversation:\n Human: Test question\nAI: foo\n\n "
"New human question: Test new question\n Response:"
)
llm = FakeLLM(queries={prompt1: "answer1", prompt2: "answer2"})
chain = GraphCypherQAChain.from_llm(
cypher_llm=llm,
qa_llm=FakeLLM(),
graph=FakeGraphStore(),
verbose=True,
return_intermediate_steps=False,
cypher_llm_kwargs={"prompt": prompt, "memory": readonlymemory},
memory=memory,
)
# Run the chain and print the prompts for debugging
print("Running first question...")
chain.run("Test question")
print(f"Prompt after first question: {prompt1}")
print("Running second question...")
chain.run("Test new question")
print(f"Prompt after second question: {prompt2}")
# If we get here without a key error, that means memory
# was used properly to create prompts.
assert True This will print the prompts used in the |
Beta Was this translation helpful? Give feedback.
-
Checked other resources
Commit to Help
Example Code
Description
@DosuBot
How is the "question" and in-particular the "schema" variables injected into the prompt, as input variables they are declared but I don't understand how they are initialise. Obviously the invoke method initialises the question but what retrieves the schema. It seems this is done underneath the hood, ultimately i like to print out the finished prompt to check what has been constructed ? The reason being I suspect I'm generating large prompts because of the schema which is making expensive API calls. The verbose parameter does not print the prompt hence I would like to debug what is being created under the hood
System Info
langchain==0.2.5
langchain-community==0.2.5
langchain-core==0.2.7
langchain-experimental==0.0.61
langchain-google-vertexai==1.0.5
langchain-openai==0.1.8
langchain-text-splitters==0.2.1
Beta Was this translation helpful? Give feedback.
All reactions