forked from matthewbolanos/sk-v1-proposal
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.py
88 lines (77 loc) · 2.62 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import asyncio
import os
import sys
from semantic_kernel.utils.settings import azure_openai_settings_from_dot_env_as_dict
# to allow the strange structure and the import of the new pieces
sys.path.append(os.getcwd())
from python.src.connectors import (
AddAssistantMessageToHistoryHook,
AzureChatCompletion,
StreamingResultToStdOutHook,
)
from python.src.kernel import newKernel as Kernel
from python.src.plugins import SemanticFunction, SKPlugin
sys.path.append(os.getcwd() + "/python/samples/04-DynamicRag/Plugins")
from MathPlugin.math import Math
async def runner():
# create services and chat
gpt35turbo = AzureChatCompletion(
**azure_openai_settings_from_dot_env_as_dict(include_api_version=True),
)
gpt4 = AzureChatCompletion(
deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME_GPT4"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
)
math_plugin = SKPlugin.from_class(
"Math",
Math(),
)
math_plugin.add_function(
SemanticFunction.from_path(
path=os.getcwd()
+ "/python/samples/04-DynamicRag/Plugins/MathPlugin/GenerateMathProblem.prompt.yaml"
)
)
intent_plugin = SKPlugin(
"Intent",
functions=[
SemanticFunction.from_path(
path=os.getcwd()
+ "/python/samples/04-DynamicRag/Plugins/IntentPlugin/GetNextStep.prompt.yaml"
)
],
)
chat_function = SemanticFunction.from_path(
path=os.getcwd()
+ "/python/samples/04-DynamicRag/Plugins/ChatPlugin/Chat.prompt.yaml"
)
# create kernel
kernel = Kernel(
ai_services=[gpt35turbo, gpt4],
plugins=[math_plugin, intent_plugin],
hooks=[StreamingResultToStdOutHook(), AddAssistantMessageToHistoryHook()],
)
# create chat_history
chat_history = gpt4.create_new_chat()
# loop with input
while True:
user_input = input("User:> ")
if user_input == "exit":
break
chat_history.add_user_message(user_input)
# get response
await kernel.run_async(
chat_function,
variables={
"persona": "You are a snarky (yet helpful) teenage assistant. Make sure to use hip slang in every response.",
"messages": chat_history,
},
kernel=kernel,
)
print(f"Total messages in chat_history: {len(chat_history.messages)}")
def __main__():
asyncio.run(runner())
if __name__ == "__main__":
__main__()