-
Notifications
You must be signed in to change notification settings - Fork 56
/
Copy pathc03-mapper.rs
52 lines (41 loc) · 1.61 KB
/
c03-mapper.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
//! This example demonstrates how to use the ModelMapper to map a ModelIden (model identifier) to
//! a potentially different one using the model mapper.
use genai::adapter::AdapterKind;
use genai::chat::printer::print_chat_stream;
use genai::chat::{ChatMessage, ChatRequest};
use genai::resolver::ModelMapper;
use genai::{Client, ModelIden};
// NOTE: This will be overridden below to `gpt-4o-mini`
const MODEL: &str = "gpt-4o";
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let questions = &[
// follow-up questions
"Why is the sky blue?",
"Why is it red sometimes?",
];
// -- Build an auth_resolver and the AdapterConfig
let model_mapper = ModelMapper::from_mapper_fn(|model_iden: ModelIden| {
// let's be economical, and map all gpt to "gpt-4o-mini"
if model_iden.model_name.starts_with("gpt-") {
Ok(ModelIden::new(AdapterKind::OpenAI, "gpt-4o-mini"))
} else {
Ok(model_iden)
}
});
// -- Build the new client with this client_config
let client = Client::builder().with_model_mapper(model_mapper).build();
let mut chat_req = ChatRequest::default().with_system("Answer in one sentence");
for &question in questions {
chat_req = chat_req.append_message(ChatMessage::user(question));
println!("\n--- Question:\n{question}");
let chat_res = client.exec_chat_stream(MODEL, chat_req.clone(), None).await?;
println!(
"\n--- Answer: ({} - {})",
chat_res.model_iden.adapter_kind, chat_res.model_iden.model_name
);
let assistant_answer = print_chat_stream(chat_res, None).await?;
chat_req = chat_req.append_message(ChatMessage::assistant(assistant_answer));
}
Ok(())
}