From fcb1a657e333980495b691dc1f61b5bc08f55097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E4=BA=91=E7=99=BD=E5=9C=9F?= Date: Wed, 17 Apr 2024 16:24:11 +0800 Subject: [PATCH 1/5] Update constant.ts --- app/constant.ts | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/app/constant.ts b/app/constant.ts index aaa33bdcf24..8b6549566d7 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -99,7 +99,6 @@ export const Azure = { export const Google = { ExampleEndpoint: "https://generativelanguage.googleapis.com/", ChatPath: (modelName: string) => `v1beta/models/${modelName}:generateContent`, - VisionChatPath: (modelName: string) => `v1beta/models/${modelName}:generateContent`, }; export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang @@ -128,8 +127,6 @@ export const KnowledgeCutOffDate: Record = { "gpt-4-turbo": "2023-12", "gpt-4-turbo-2024-04-09": "2023-12", "gpt-4-turbo-preview": "2023-12", - "gpt-4-1106-preview": "2023-04", - "gpt-4-0125-preview": "2023-12", "gpt-4-vision-preview": "2023-04", // After improvements, // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously. @@ -139,19 +136,11 @@ export const KnowledgeCutOffDate: Record = { const openaiModels = [ "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", "gpt-4", - "gpt-4-0314", "gpt-4-0613", - "gpt-4-1106-preview", - "gpt-4-0125-preview", "gpt-4-32k", - "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-4-turbo", "gpt-4-turbo-preview", From b7aab3c10272e076bd84b7a871de02f528283abc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E4=BA=91=E7=99=BD=E5=9C=9F?= Date: Wed, 17 Apr 2024 17:16:31 +0800 Subject: [PATCH 2/5] Update google.ts --- app/client/platforms/google.ts | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 1ab36db25e0..a786f5275f4 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -21,11 +21,10 @@ export class GeminiProApi implements LLMApi { } async chat(options: ChatOptions): Promise { // const apiClient = this; - const visionModel = isVisionModel(options.config.model); let multimodal = false; const messages = options.messages.map((v) => { let parts: any[] = [{ text: getMessageTextContent(v) }]; - if (visionModel) { + if (isVisionModel(options.config.model)) { const images = getMessageImages(v); if (images.length > 0) { multimodal = true; @@ -117,17 +116,12 @@ export class GeminiProApi implements LLMApi { const controller = new AbortController(); options.onController?.(controller); try { - let googleChatPath = visionModel - ? Google.VisionChatPath(modelConfig.model) - : Google.ChatPath(modelConfig.model); - let chatPath = this.path(googleChatPath); - // let baseUrl = accessStore.googleUrl; if (!baseUrl) { baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath - : chatPath; + ? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model) + : this.path(Google.ChatPath(modelConfig.model)); } if (isApp) { @@ -145,6 +139,7 @@ export class GeminiProApi implements LLMApi { () => controller.abort(), REQUEST_TIMEOUT_MS, ); + if (shouldStream) { let responseText = ""; let remainText = ""; From c96e4b79667cc3335bf5ee225914f43b5918c62f Mon Sep 17 00:00:00 2001 From: Wayland Zhan Date: Fri, 19 Apr 2024 06:57:15 +0000 Subject: [PATCH 3/5] feat: Support a way to define default model by adding DEFAULT_MODEL env. --- app/api/config/route.ts | 1 + app/components/chat.tsx | 29 +++++++++++++++++++------ app/config/server.ts | 4 ++++ app/store/access.ts | 9 ++++++++ app/utils/hooks.ts | 5 +++-- app/utils/model.ts | 48 +++++++++++++++++++++++++++++++++++------ 6 files changed, 81 insertions(+), 15 deletions(-) diff --git a/app/api/config/route.ts b/app/api/config/route.ts index db84fba175a..b0d9da03103 100644 --- a/app/api/config/route.ts +++ b/app/api/config/route.ts @@ -13,6 +13,7 @@ const DANGER_CONFIG = { hideBalanceQuery: serverConfig.hideBalanceQuery, disableFastLink: serverConfig.disableFastLink, customModels: serverConfig.customModels, + defaultModel: serverConfig.defaultModel, }; declare global { diff --git a/app/components/chat.tsx b/app/components/chat.tsx index b9750f2851d..85df5b9a82c 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -448,10 +448,20 @@ export function ChatActions(props: { // switch model const currentModel = chatStore.currentSession().mask.modelConfig.model; const allModels = useAllModels(); - const models = useMemo( - () => allModels.filter((m) => m.available), - [allModels], - ); + const models = useMemo(() => { + const filteredModels = allModels.filter((m) => m.available); + const defaultModel = filteredModels.find((m) => m.isDefault); + + if (defaultModel) { + const arr = [ + defaultModel, + ...filteredModels.filter((m) => m !== defaultModel), + ]; + return arr; + } else { + return filteredModels; + } + }, [allModels]); const [showModelSelector, setShowModelSelector] = useState(false); const [showUploadImage, setShowUploadImage] = useState(false); @@ -467,7 +477,10 @@ export function ChatActions(props: { // switch to first available model const isUnavaliableModel = !models.some((m) => m.name === currentModel); if (isUnavaliableModel && models.length > 0) { - const nextModel = models[0].name as ModelType; + // show next model to default model if exist + let nextModel: ModelType = ( + models.find((model) => model.isDefault) || models[0] + ).name; chatStore.updateCurrentSession( (session) => (session.mask.modelConfig.model = nextModel), ); @@ -1102,11 +1115,13 @@ function _Chat() { }; // eslint-disable-next-line react-hooks/exhaustive-deps }, []); - + const handlePaste = useCallback( async (event: React.ClipboardEvent) => { const currentModel = chatStore.currentSession().mask.modelConfig.model; - if(!isVisionModel(currentModel)){return;} + if (!isVisionModel(currentModel)) { + return; + } const items = (event.clipboardData || window.clipboardData).items; for (const item of items) { if (item.kind === "file" && item.type.startsWith("image/")) { diff --git a/app/config/server.ts b/app/config/server.ts index c27ef5e4440..618112172ab 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -21,6 +21,7 @@ declare global { ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not DISABLE_FAST_LINK?: string; // disallow parse settings from url or not CUSTOM_MODELS?: string; // to control custom models + DEFAULT_MODEL?: string; // to cnntrol default model in every new chat window // azure only AZURE_URL?: string; // https://{azure-url}/openai/deployments/{deploy-name} @@ -59,12 +60,14 @@ export const getServerSideConfig = () => { const disableGPT4 = !!process.env.DISABLE_GPT4; let customModels = process.env.CUSTOM_MODELS ?? ""; + let defaultModel = process.env.DEFAULT_MODEL ?? ""; if (disableGPT4) { if (customModels) customModels += ","; customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4")) .map((m) => "-" + m.name) .join(","); + if (defaultModel.startsWith("gpt-4")) defaultModel = ""; } const isAzure = !!process.env.AZURE_URL; @@ -116,6 +119,7 @@ export const getServerSideConfig = () => { hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY, disableFastLink: !!process.env.DISABLE_FAST_LINK, customModels, + defaultModel, whiteWebDevEndpoints, }; }; diff --git a/app/store/access.ts b/app/store/access.ts index 16366640257..64909609e05 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -8,6 +8,7 @@ import { getHeaders } from "../client/api"; import { getClientConfig } from "../config/client"; import { createPersistStore } from "../utils/store"; import { ensure } from "../utils/clone"; +import { DEFAULT_CONFIG } from "./config"; let fetchState = 0; // 0 not fetch, 1 fetching, 2 done @@ -48,6 +49,7 @@ const DEFAULT_ACCESS_STATE = { disableGPT4: false, disableFastLink: false, customModels: "", + defaultModel: "", }; export const useAccessStore = createPersistStore( @@ -100,6 +102,13 @@ export const useAccessStore = createPersistStore( }, }) .then((res) => res.json()) + .then((res) => { + // Set default model from env request + let defaultModel = res.defaultModel ?? ""; + DEFAULT_CONFIG.modelConfig.model = + defaultModel !== "" ? defaultModel : "gpt-3.5-turbo"; + return res; + }) .then((res: DangerConfig) => { console.log("[Config] got config from server", res); set(() => ({ ...res })); diff --git a/app/utils/hooks.ts b/app/utils/hooks.ts index 35d1f53a4c9..55d5d4fca7d 100644 --- a/app/utils/hooks.ts +++ b/app/utils/hooks.ts @@ -1,14 +1,15 @@ import { useMemo } from "react"; import { useAccessStore, useAppConfig } from "../store"; -import { collectModels } from "./model"; +import { collectModels, collectModelsWithDefaultModel } from "./model"; export function useAllModels() { const accessStore = useAccessStore(); const configStore = useAppConfig(); const models = useMemo(() => { - return collectModels( + return collectModelsWithDefaultModel( configStore.models, [configStore.customModels, accessStore.customModels].join(","), + accessStore.defaultModel, ); }, [accessStore.customModels, configStore.customModels, configStore.models]); diff --git a/app/utils/model.ts b/app/utils/model.ts index 378fc498e5f..6477640aad1 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -1,5 +1,11 @@ import { LLMModel } from "../client/api"; +const customProvider = (modelName: string) => ({ + id: modelName, + providerName: "", + providerType: "custom", +}); + export function collectModelTable( models: readonly LLMModel[], customModels: string, @@ -11,6 +17,7 @@ export function collectModelTable( name: string; displayName: string; provider?: LLMModel["provider"]; // Marked as optional + isDefault?: boolean; } > = {}; @@ -22,12 +29,6 @@ export function collectModelTable( }; }); - const customProvider = (modelName: string) => ({ - id: modelName, - providerName: "", - providerType: "custom", - }); - // server custom models customModels .split(",") @@ -52,6 +53,27 @@ export function collectModelTable( }; } }); + + return modelTable; +} + +export function collectModelTableWithDefaultModel( + models: readonly LLMModel[], + customModels: string, + defaultModel: string, +) { + let modelTable = collectModelTable(models, customModels); + if (defaultModel && defaultModel !== "") { + delete modelTable[defaultModel]; + modelTable[defaultModel] = { + name: defaultModel, + displayName: defaultModel, + available: true, + provider: + modelTable[defaultModel]?.provider ?? customProvider(defaultModel), + isDefault: true, + }; + } return modelTable; } @@ -67,3 +89,17 @@ export function collectModels( return allModels; } + +export function collectModelsWithDefaultModel( + models: readonly LLMModel[], + customModels: string, + defaultModel: string, +) { + const modelTable = collectModelTableWithDefaultModel( + models, + customModels, + defaultModel, + ); + const allModels = Object.values(modelTable); + return allModels; +} From 1cd0beb231d98bc14ff660d98bc78b1ba2df43b3 Mon Sep 17 00:00:00 2001 From: Roy Date: Tue, 23 Apr 2024 11:48:54 +0800 Subject: [PATCH 4/5] chore: No outline when element is in `:focus-visible` state --- app/styles/globals.scss | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/app/styles/globals.scss b/app/styles/globals.scss index aa22b7d4fd6..20792cda526 100644 --- a/app/styles/globals.scss +++ b/app/styles/globals.scss @@ -86,6 +86,7 @@ @include dark; } } + html { height: var(--full-height); @@ -110,6 +111,10 @@ body { @media only screen and (max-width: 600px) { background-color: var(--second); } + + *:focus-visible { + outline: none; + } } ::-webkit-scrollbar { From dd4648ed9a803568b839e2510ca01cf7f1c6f740 Mon Sep 17 00:00:00 2001 From: "l.tingting" Date: Wed, 24 Apr 2024 22:59:14 +0800 Subject: [PATCH 5/5] remove max_tokens from the official version of gpt4-turbo --- app/client/platforms/openai.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index ca8bc2ebe6f..f3599263023 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -129,7 +129,7 @@ export class ChatGPTApi implements LLMApi { }; // add max_tokens to vision model - if (visionModel) { + if (visionModel && modelConfig.model.includes("preview")) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); }