From 95f469a5928871ecd2d3ba85c0fdacebff11178f Mon Sep 17 00:00:00 2001 From: Mike Dallas Date: Fri, 10 Nov 2023 01:04:26 +0000 Subject: [PATCH] use the new GPT-4-turbo --- src/gpt.rs | 2 +- src/util.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/gpt.rs b/src/gpt.rs index 7673d16..1b173e3 100644 --- a/src/gpt.rs +++ b/src/gpt.rs @@ -4,7 +4,7 @@ use anyhow::Result; use crate::util::Summary; const OPENAI_API_URL: &str = "https://api.openai.com/v1/chat/completions"; -const OPENAI_MODEL: &str = "gpt-3.5-turbo"; +const OPENAI_MODEL: &str = "gpt-4-1106-preview"; #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/src/util.rs b/src/util.rs index 160844c..8796547 100644 --- a/src/util.rs +++ b/src/util.rs @@ -26,7 +26,7 @@ pub async fn split_file(path: PathBuf, tmp_dir: &TempDir) -> Result .stderr(Stdio::null()) .spawn()?; - let status = command.wait().await?; + let _status = command.wait().await?; let mut filenames: Vec = tmp_dir.path().read_dir()?.map(|entry| { @@ -50,7 +50,7 @@ pub struct Summary { } -const WORDS_PER_CHUNK: usize = 2500; +const WORDS_PER_CHUNK: usize = 2500 * 16; // 8k tokens is around 2500 words. The new GPT-4-turbo model has a 128k context pub fn split_text_to_chunks(text: String) -> Vec { let mut chunks = Vec::new();