-
Notifications
You must be signed in to change notification settings - Fork 38
/
start-2.py
60 lines (45 loc) · 1.32 KB
/
start-2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import ollama
response = ollama.list()
# print(response)
# == Chat example ==
res = ollama.chat(
model="llama3.2",
messages=[
{"role": "user", "content": "why is the sky blue?"},
],
)
# print(res["message"]["content"])
# == Chat example streaming ==
res = ollama.chat(
model="llama3.2",
messages=[
{
"role": "user",
"content": "why is the ocean so salty?",
},
],
stream=True,
)
# for chunk in res:
# print(chunk["message"]["content"], end="", flush=True)
# ==================================================================================
# ==== The Ollama Python library's API is designed around the Ollama REST API ====
# ==================================================================================
# == Generate example ==
res = ollama.generate(
model="llama3.2",
prompt="why is the sky blue?",
)
# show
# print(ollama.show("llama3.2"))
# Create a new model with modelfile
modelfile = """
FROM llama3.2
SYSTEM You are very smart assistant who knows everything about oceans. You are very succinct and informative.
PARAMETER temperature 0.1
"""
ollama.create(model="knowitall", modelfile=modelfile)
res = ollama.generate(model="knowitall", prompt="why is the ocean so salty?")
print(res["response"])
# delete model
ollama.delete("knowitall")