-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathai_adapter.py
76 lines (61 loc) · 2.68 KB
/
ai_adapter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from langchain_core.messages import HumanMessage, SystemMessage
from langchain.prompts import ChatPromptTemplate
from alkemio_virtual_contributor_engine.setup_logger import setup_logger
from alkemio_virtual_contributor_engine.events.input import Input
from alkemio_virtual_contributor_engine.events.response import Response
from alkemio_virtual_contributor_engine.utils import (
history_as_text,
)
from config import env
from prompts import (
condenser_system_prompt,
)
from models import get_model
logger = setup_logger(__name__)
async def invoke(input: Input) -> Response:
try:
# important to await the result before returning
return await query_chain(input)
except Exception as inst:
logger.exception(inst)
result = f"{input.display_name} - the Alkemio's VirtualContributor is currently unavailable."
return Response(
{
"result": result,
"original_result": result,
"sources": [],
}
)
# how do we handle languages? not all spaces are in Dutch obviously
# translating the question to the data _base language_ should be a separate call
# so the translation could be used for embeddings retrieval
async def query_chain(input: Input) -> Response:
model = get_model(input.engine, input.external_config["apiKey"])
question = input.message
# # use the last N message from the history except the last one
# # as it is the question we are answering now
history = input.history[(env.history_length + 1) * -1 : -1]
# if we have history try to add context from it into the last question
# - who is Maxima?
# - Maxima is the Queen of The Netherlands
# - born? =======> rephrased to: tell me about the birth of Queen Máxima of the Netherlands
if len(history) > 0:
logger.info(f"We have history. Let's rephrase. Length is: {len(history)}.")
condenser_messages = [("system", condenser_system_prompt)]
condenser_promt = ChatPromptTemplate.from_messages(condenser_messages)
condenser_chain = condenser_promt | model
result = condenser_chain.invoke(
{"question": question, "chat_history": history_as_text(history)}
)
logger.info(
f"Original question is: '{question}'; Rephrased question is: '{result.content}'"
)
question = result.content
else:
logger.info("No history to handle, initial interaction")
messages = []
for system_message in input.prompt:
messages.append(SystemMessage(content=system_message))
messages.append(HumanMessage(content=question))
response = model.invoke(messages)
return Response({"result": response.content})