From 5e65870e2921442a6b27d9d2b2b030654dbe479b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daphn=C3=A9=20Popin?= Date: Fri, 10 Nov 2023 11:59:02 +0100 Subject: [PATCH] GPT4 turbo for free plan (#2479) --- core/src/providers/azure_openai.rs | 3 +++ core/src/providers/openai.rs | 3 +++ front/lib/api/assistant/global_agents.ts | 24 +++++++++++++++++++----- front/lib/assistant.ts | 10 ++++++++++ 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/core/src/providers/azure_openai.rs b/core/src/providers/azure_openai.rs index ea17601f79cb..08a221f78013 100644 --- a/core/src/providers/azure_openai.rs +++ b/core/src/providers/azure_openai.rs @@ -251,6 +251,9 @@ impl LLM for AzureOpenAILLM { if model_id.starts_with("gpt-4-32k") { return 32768; } + if model_id.starts_with("gpt-4-1106-preview") { + return 128000; + } if model_id.starts_with("gpt-4") { return 8192; } diff --git a/core/src/providers/openai.rs b/core/src/providers/openai.rs index 8f8d31812a87..55b7b15d34e1 100644 --- a/core/src/providers/openai.rs +++ b/core/src/providers/openai.rs @@ -1150,6 +1150,9 @@ impl LLM for OpenAILLM { if self.id.starts_with("gpt-4-32k") { return 32768; } + if self.id.starts_with("gpt-4-1106-preview") { + return 128000; + } if self.id.starts_with("gpt-4") { return 8192; } diff --git a/front/lib/api/assistant/global_agents.ts b/front/lib/api/assistant/global_agents.ts index 452895289c2d..d882f5d17dda 100644 --- a/front/lib/api/assistant/global_agents.ts +++ b/front/lib/api/assistant/global_agents.ts @@ -9,6 +9,7 @@ import { CLAUDE_INSTANT_DEFAULT_MODEL_CONFIG, GPT_3_5_TURBO_16K_MODEL_CONFIG, GPT_4_32K_MODEL_CONFIG, + GPT_4_TURBO_MODEL_CONFIG, MISTRAL_7B_DEFAULT_MODEL_CONFIG, } from "@app/lib/assistant"; import { GLOBAL_AGENTS_SID } from "@app/lib/assistant"; @@ -16,11 +17,13 @@ import { Authenticator, prodAPICredentialsForOwner } from "@app/lib/auth"; import { ConnectorProvider } from "@app/lib/connectors_api"; import { DustAPI } from "@app/lib/dust_api"; import { GlobalAgentSettings } from "@app/lib/models/assistant/agent"; +import { FREE_TEST_PLAN_CODE } from "@app/lib/plans/plan_codes"; import logger from "@app/logger/logger"; import { AgentConfigurationType, GlobalAgentStatus, } from "@app/types/assistant/agent"; +import { PlanType } from "@app/types/plan"; class HelperAssistantPrompt { private static instance: HelperAssistantPrompt; @@ -133,13 +136,20 @@ async function _getGPT35TurboGlobalAgent({ }; } -async function _getGPT4GlobalAgent(): Promise { +async function _getGPT4GlobalAgent({ + plan, +}: { + plan: PlanType; +}): Promise { + const isFreePlan = plan.code === FREE_TEST_PLAN_CODE; return { id: -1, sId: GLOBAL_AGENTS_SID.GPT4, version: 0, name: "gpt4", - description: "OpenAI's most powerful model (32k context).", + description: isFreePlan + ? "OpenAI's cost-effective and high throughput model (128K context)." + : "OpenAI's most powerful model (32k context).", pictureUrl: "https://dust.tt/static/systemavatar/gpt4_avatar_full.png", status: "active", scope: "global", @@ -147,8 +157,12 @@ async function _getGPT4GlobalAgent(): Promise { id: -1, prompt: "", model: { - providerId: GPT_4_32K_MODEL_CONFIG.providerId, - modelId: GPT_4_32K_MODEL_CONFIG.modelId, + providerId: isFreePlan + ? GPT_4_TURBO_MODEL_CONFIG.providerId + : GPT_4_32K_MODEL_CONFIG.providerId, + modelId: isFreePlan + ? GPT_4_TURBO_MODEL_CONFIG.modelId + : GPT_4_32K_MODEL_CONFIG.modelId, }, temperature: 0.7, }, @@ -548,7 +562,7 @@ export async function getGlobalAgent( agentConfiguration = await _getGPT35TurboGlobalAgent({ settings }); break; case GLOBAL_AGENTS_SID.GPT4: - agentConfiguration = await _getGPT4GlobalAgent(); + agentConfiguration = await _getGPT4GlobalAgent({ plan }); break; case GLOBAL_AGENTS_SID.CLAUDE_INSTANT: agentConfiguration = await _getClaudeInstantGlobalAgent({ settings }); diff --git a/front/lib/assistant.ts b/front/lib/assistant.ts index 4f906173908b..b449ec9fcc0f 100644 --- a/front/lib/assistant.ts +++ b/front/lib/assistant.ts @@ -7,6 +7,7 @@ import { AgentConfigurationType } from "@app/types/assistant/agent"; export const GPT_4_32K_MODEL_ID = "gpt-4-32k" as const; export const GPT_4_MODEL_ID = "gpt-4" as const; +export const GPT_4_TURBO_MODEL_ID = "gpt-4-1106-preview" as const; export const GPT_4_32K_MODEL_CONFIG = { providerId: "openai", @@ -24,6 +25,14 @@ export const GPT_4_MODEL_CONFIG = { recommendedTopK: 16, }; +export const GPT_4_TURBO_MODEL_CONFIG = { + providerId: "openai", + modelId: GPT_4_TURBO_MODEL_ID, + displayName: "GPT 4 Turbo", + contextSize: 128000, + recommendedTopK: 32, +} as const; + export const GPT_3_5_TURBO_16K_MODEL_CONFIG = { providerId: "openai", modelId: "gpt-3.5-turbo-16k", @@ -69,6 +78,7 @@ export const SUPPORTED_MODEL_CONFIGS = [ GPT_3_5_TURBO_MODEL_CONFIG, GPT_4_32K_MODEL_CONFIG, GPT_4_MODEL_CONFIG, + GPT_4_TURBO_MODEL_CONFIG, CLAUDE_DEFAULT_MODEL_CONFIG, CLAUDE_INSTANT_DEFAULT_MODEL_CONFIG, MISTRAL_7B_DEFAULT_MODEL_CONFIG,