-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathquery_data_AI.py
36 lines (30 loc) · 1.28 KB
/
query_data_AI.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import subprocess
import json
from query import get_contexts
modelllm = 'qwen2:1.5b'
def send_prompt(query_text: str, model=modelllm):
contexts = get_contexts(query_text)
# Combine the contexts
context_text = "\n\n---\n\n".join(contexts)
print(context_text)
prompt = f"{context_text}\n\nExplain what the user was doing at the time this was opened: {query_text}"
# Using curl to send the request to the LLM
command = [
'curl',
'-X', 'POST',
'http://localhost:11434/api/generate',
'-d', json.dumps({"model": model, "prompt": prompt, "stream": False}),
'-H', 'Content-Type: application/json'
]
try:
response = subprocess.run(command, capture_output=True, text=True, check=True)
response_json = json.loads(response.stdout)
response_text = response_json.get('response', 'No response from the model')
print(f"Response: {response_text}")
except subprocess.CalledProcessError as e:
print(f"Error occurred: {e}")
print(f"Command output: {e.output}")
if __name__ == "__main__":
query_text = input("Enter your question: ")
model = input("Enter the model you want to use (default is 'qwen2'): ") or modelllm
send_prompt(query_text, model)