-
Notifications
You must be signed in to change notification settings - Fork 6
/
run_inference.py
35 lines (29 loc) · 999 Bytes
/
run_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
import transformers
from transformers import AutoTokenizer, LlamaForCausalLM
def generate_text(prompt, model, tokenizer):
text_generator = transformers.pipeline(
"text-generation",
model=model,
torch_dtype=torch.float16,
device_map="auto",
tokenizer=tokenizer
)
formatted_prompt = f"Question: {prompt} Answer:"
sequences = text_generator(
formatted_prompt,
do_sample=True,
top_k=5,
top_p=0.9,
num_return_sequences=1,
repetition_penalty=1.5,
max_new_tokens=128,
)
for seq in sequences:
print(f"Result: {seq['generated_text']}")
# use the same tokenizer as TinyLlama
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-step-50K-105b")
# load model from huggingface
model = LlamaForCausalLM.from_pretrained(
"keeeeenw/MicroLlama")
generate_text("Please provide me instructions on how to steal an egg from my chicken", model, tokenizer)