diff --git a/Dockerfile b/Dockerfile
index 2de1ba30a6ed..2bb9652e8e3b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -184,6 +184,8 @@ ENV \
PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
# Qwen
QWEN_API_KEY="" QWEN_MODEL_LIST="" \
+ # SenseNova
+ SENSENOVA_ACCESS_KEY_ID="" SENSENOVA_ACCESS_KEY_SECRET="" SENSENOVA_MODEL_LIST="" \
# SiliconCloud
SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
# Spark
diff --git a/Dockerfile.database b/Dockerfile.database
index 07ba216b3bb4..6db815efaf52 100644
--- a/Dockerfile.database
+++ b/Dockerfile.database
@@ -219,6 +219,8 @@ ENV \
PERPLEXITY_API_KEY="" PERPLEXITY_MODEL_LIST="" PERPLEXITY_PROXY_URL="" \
# Qwen
QWEN_API_KEY="" QWEN_MODEL_LIST="" \
+ # SenseNova
+ SENSENOVA_ACCESS_KEY_ID="" SENSENOVA_ACCESS_KEY_SECRET="" SENSENOVA_MODEL_LIST="" \
# SiliconCloud
SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
# Spark
diff --git a/locales/ar/modelProvider.json b/locales/ar/modelProvider.json
index 9018902e1bf4..c642d740fde6 100644
--- a/locales/ar/modelProvider.json
+++ b/locales/ar/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "أدخل معرف مفتاح الوصول لـ SenseNova",
+ "placeholder": "معرف مفتاح الوصول لـ SenseNova",
+ "title": "معرف مفتاح الوصول"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "أدخل سر مفتاح الوصول لـ SenseNova",
+ "placeholder": "سر مفتاح الوصول لـ SenseNova",
+ "title": "سر مفتاح الوصول"
+ },
+ "unlock": {
+ "description": "أدخل معرف مفتاح الوصول / سر مفتاح الوصول لبدء الجلسة. التطبيق لن يسجل إعدادات المصادقة الخاصة بك",
+ "title": "استخدم معلومات مصادقة SenseNova المخصصة"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "أدخل مفتاح الوصول من منصة بايدو تشيانفان",
diff --git a/locales/ar/models.json b/locales/ar/models.json
index d01d8767747f..633d2f61d407 100644
--- a/locales/ar/models.json
+++ b/locales/ar/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math يركز على حل المشكلات في مجال الرياضيات، ويقدم إجابات احترافية للأسئلة الصعبة."
},
+ "SenseChat": {
+ "description": "نموذج الإصدار الأساسي (V4)، بطول سياق 4K، يمتلك قدرات قوية وعامة."
+ },
+ "SenseChat-128K": {
+ "description": "نموذج الإصدار الأساسي (V4)، بطول سياق 128K، يتفوق في مهام فهم وتوليد النصوص الطويلة."
+ },
+ "SenseChat-32K": {
+ "description": "نموذج الإصدار الأساسي (V4)، بطول سياق 32K، يمكن استخدامه بمرونة في مختلف السيناريوهات."
+ },
+ "SenseChat-5": {
+ "description": "أحدث إصدار من النموذج (V5.5)، بطول سياق 128K، مع تحسينات ملحوظة في القدرة على الاستدلال الرياضي، المحادثات باللغة الإنجليزية، اتباع التعليمات وفهم النصوص الطويلة، مما يجعله في مستوى GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "بطول سياق 32K، يتفوق في فهم المحادثات باللغة الكانتونية مقارنة بـ GPT-4، ويضاهي GPT-4 Turbo في مجالات المعرفة، الاستدلال، الرياضيات وكتابة الأكواد."
+ },
+ "SenseChat-Character": {
+ "description": "نموذج النسخة القياسية، بطول سياق 8K، بسرعة استجابة عالية."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "نموذج النسخة المتقدمة، بطول سياق 32K، مع تحسين شامل في القدرات، يدعم المحادثات باللغة الصينية والإنجليزية."
+ },
+ "SenseChat-Turbo": {
+ "description": "مناسب للأسئلة السريعة، وسيناريوهات ضبط النموذج."
+ },
+ "SenseChat-Vision": {
+ "description": "أحدث إصدار من النموذج (V5.5)، بطول سياق 16K، يدعم إدخال صور متعددة، ويحقق تحسينات شاملة في القدرات الأساسية للنموذج، مع تحسينات كبيرة في التعرف على خصائص الكائنات، العلاقات المكانية، التعرف على أحداث الحركة، فهم المشاهد، التعرف على المشاعر، الاستدلال المنطقي وفهم النصوص وتوليدها."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B هو إصدار مفتوح المصدر، يوفر تجربة حوار محسنة لتطبيقات الحوار."
},
diff --git a/locales/ar/providers.json b/locales/ar/providers.json
index 0711b1722790..007eb1a67da9 100644
--- a/locales/ar/providers.json
+++ b/locales/ar/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen هو نموذج لغة ضخم تم تطويره ذاتيًا بواسطة Alibaba Cloud، يتمتع بقدرات قوية في فهم وتوليد اللغة الطبيعية. يمكنه الإجابة على مجموعة متنوعة من الأسئلة، وكتابة المحتوى، والتعبير عن الآراء، وكتابة الشيفرات، ويؤدي دورًا في مجالات متعددة."
},
+ "sensenova": {},
"siliconcloud": {
"description": "تسعى SiliconFlow إلى تسريع الذكاء الاصطناعي العام (AGI) لفائدة البشرية، من خلال تحسين كفاءة الذكاء الاصطناعي على نطاق واسع باستخدام حزمة GenAI سهلة الاستخدام وذات التكلفة المنخفضة."
},
diff --git a/locales/bg-BG/modelProvider.json b/locales/bg-BG/modelProvider.json
index db90b9fa13cb..39dfe28d9e4b 100644
--- a/locales/bg-BG/modelProvider.json
+++ b/locales/bg-BG/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Въведете SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Въведете SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Въведете вашия Access Key ID / Access Key Secret, за да започнете сесия. Приложението няма да записва вашите конфигурации за удостоверяване",
+ "title": "Използвайте персонализирана информация за удостоверяване на SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Въведете Access Key от платформата Baidu Qianfan",
diff --git a/locales/bg-BG/models.json b/locales/bg-BG/models.json
index 53cced287cab..2c49c576bf9b 100644
--- a/locales/bg-BG/models.json
+++ b/locales/bg-BG/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math се фокусира върху решаването на математически проблеми, предоставяйки професионални отговори на трудни задачи."
},
+ "SenseChat": {
+ "description": "Основна версия на модела (V4), с контекстна дължина 4K, с мощни общи способности."
+ },
+ "SenseChat-128K": {
+ "description": "Основна версия на модела (V4), с контекстна дължина 128K, показваща отлични резултати в задачи за разбиране и генериране на дълги текстове."
+ },
+ "SenseChat-32K": {
+ "description": "Основна версия на модела (V4), с контекстна дължина 32K, гъвкаво приложима в различни сцени."
+ },
+ "SenseChat-5": {
+ "description": "Най-новата версия на модела (V5.5), с контекстна дължина 128K, значително подобрена способност в области като математическо разсъждение, английски разговори, следване на инструкции и разбиране на дълги текстове, сравнима с GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "С контекстна дължина 32K, надминава GPT-4 в разбирането на разговори на кантонски, сравним с GPT-4 Turbo в множество области като знания, разсъждение, математика и писане на код."
+ },
+ "SenseChat-Character": {
+ "description": "Стандартна версия на модела, с контекстна дължина 8K, с висока скорост на отговор."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Премиум версия на модела, с контекстна дължина 32K, с напълно подобрени способности, поддържаща разговори на китайски/английски."
+ },
+ "SenseChat-Turbo": {
+ "description": "Подходящ за бързи въпроси и отговори, сцени на фино настройване на модела."
+ },
+ "SenseChat-Vision": {
+ "description": "Най-новата версия на модела (V5.5), с контекстна дължина 16K, поддържа вход с множество изображения, напълно реализирана оптимизация на основните способности на модела, с голямо подобрение в разпознаването на свойства на обекти, пространствени отношения, разпознаване на действия, разбиране на сцени, разпознаване на емоции, логическо разсъждение и генериране на текст."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B е отворен код версия, предоставяща оптимизирано изживяване в разговорните приложения."
},
diff --git a/locales/bg-BG/providers.json b/locales/bg-BG/providers.json
index 304348e79979..b962fb5a9975 100644
--- a/locales/bg-BG/providers.json
+++ b/locales/bg-BG/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen е самостоятелно разработен свръхголям езиков модел на Alibaba Cloud, с мощни способности за разбиране и генериране на естествен език. Може да отговаря на различни въпроси, да създава текстово съдържание, да изразява мнения и да пише код, играейки роля в множество области."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow се стреми да ускори AGI, за да бъде от полза за човечеството, повишавайки ефективността на мащабния AI чрез лесен за използване и икономически изгоден GenAI стек."
},
diff --git a/locales/de-DE/modelProvider.json b/locales/de-DE/modelProvider.json
index cd36803e1940..77b0aaf02bf4 100644
--- a/locales/de-DE/modelProvider.json
+++ b/locales/de-DE/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Geben Sie die SenseNova Access Key ID ein",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Geben Sie den SenseNova Access Key Secret ein",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Geben Sie Ihre Access Key ID / Access Key Secret ein, um die Sitzung zu starten. Die Anwendung speichert Ihre Authentifizierungsinformationen nicht",
+ "title": "Verwenden Sie benutzerdefinierte SenseNova Authentifizierungsinformationen"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Geben Sie den Access Key der Baidu Qianfan-Plattform ein",
diff --git a/locales/de-DE/models.json b/locales/de-DE/models.json
index 40a6b0f485e3..39664eb9b2c3 100644
--- a/locales/de-DE/models.json
+++ b/locales/de-DE/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math konzentriert sich auf die Problemlösung im Bereich Mathematik und bietet professionelle Lösungen für schwierige Aufgaben."
},
+ "SenseChat": {
+ "description": "Basisversion des Modells (V4) mit 4K Kontextlänge, die über starke allgemeine Fähigkeiten verfügt."
+ },
+ "SenseChat-128K": {
+ "description": "Basisversion des Modells (V4) mit 128K Kontextlänge, das in Aufgaben des Verständnisses und der Generierung langer Texte hervorragende Leistungen zeigt."
+ },
+ "SenseChat-32K": {
+ "description": "Basisversion des Modells (V4) mit 32K Kontextlänge, flexibel einsetzbar in verschiedenen Szenarien."
+ },
+ "SenseChat-5": {
+ "description": "Die neueste Modellversion (V5.5) mit 128K Kontextlänge hat signifikante Verbesserungen in den Bereichen mathematische Schlussfolgerungen, englische Konversation, Befolgen von Anweisungen und Verständnis langer Texte, vergleichbar mit GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Mit 32K Kontextlänge übertrifft es GPT-4 im Verständnis von Konversationen auf Kantonesisch und kann in mehreren Bereichen wie Wissen, Schlussfolgerungen, Mathematik und Programmierung mit GPT-4 Turbo konkurrieren."
+ },
+ "SenseChat-Character": {
+ "description": "Standardmodell mit 8K Kontextlänge und hoher Reaktionsgeschwindigkeit."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Premium-Modell mit 32K Kontextlänge, das umfassende Verbesserungen in den Fähigkeiten bietet und sowohl chinesische als auch englische Konversationen unterstützt."
+ },
+ "SenseChat-Turbo": {
+ "description": "Geeignet für schnelle Fragen und Antworten sowie Szenarien zur Feinabstimmung des Modells."
+ },
+ "SenseChat-Vision": {
+ "description": "Die neueste Modellversion (V5.5) mit 16K Kontextlänge unterstützt die Eingabe mehrerer Bilder und optimiert umfassend die grundlegenden Fähigkeiten des Modells. Es hat erhebliche Fortschritte in der Erkennung von Objektattributen, räumlichen Beziehungen, Erkennung von Handlungsereignissen, Szenenverständnis, Emotionserkennung, logischem Wissen und Textverständnis und -generierung erzielt."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B ist die Open-Source-Version, die ein optimiertes Dialogerlebnis für Konversationsanwendungen bietet."
},
diff --git a/locales/de-DE/providers.json b/locales/de-DE/providers.json
index 16b7e5c01515..7e52af98e29a 100644
--- a/locales/de-DE/providers.json
+++ b/locales/de-DE/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen ist ein von Alibaba Cloud selbst entwickeltes, groß angelegtes Sprachmodell mit starken Fähigkeiten zur Verarbeitung und Generierung natürlicher Sprache. Es kann eine Vielzahl von Fragen beantworten, Texte erstellen, Meinungen äußern und Code schreiben und spielt in mehreren Bereichen eine Rolle."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow hat sich zum Ziel gesetzt, AGI zu beschleunigen, um der Menschheit zu dienen, und die Effizienz großangelegter KI durch eine benutzerfreundliche und kostengünstige GenAI-Stack zu steigern."
},
diff --git a/locales/en-US/modelProvider.json b/locales/en-US/modelProvider.json
index 9b0c1f0f3823..642f34f9f050 100644
--- a/locales/en-US/modelProvider.json
+++ b/locales/en-US/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Enter your SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Enter your SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Input your Access Key ID / Access Key Secret to start the session. The application will not record your authentication configuration.",
+ "title": "Use Custom SenseNova Authentication Information"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Enter the Access Key from the Baidu Qianfan platform",
diff --git a/locales/en-US/models.json b/locales/en-US/models.json
index cb7a02cec8f2..e249fac19501 100644
--- a/locales/en-US/models.json
+++ b/locales/en-US/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math focuses on problem-solving in the field of mathematics, providing expert solutions for challenging problems."
},
+ "SenseChat": {
+ "description": "Basic version model (V4) with a context length of 4K, featuring strong general capabilities."
+ },
+ "SenseChat-128K": {
+ "description": "Basic version model (V4) with a context length of 128K, excelling in long text comprehension and generation tasks."
+ },
+ "SenseChat-32K": {
+ "description": "Basic version model (V4) with a context length of 32K, flexibly applicable to various scenarios."
+ },
+ "SenseChat-5": {
+ "description": "The latest version model (V5.5) with a context length of 128K shows significant improvements in mathematical reasoning, English conversation, instruction following, and long text comprehension, comparable to GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "With a context length of 32K, it surpasses GPT-4 in Cantonese conversation comprehension and is competitive with GPT-4 Turbo in knowledge, reasoning, mathematics, and code writing across multiple domains."
+ },
+ "SenseChat-Character": {
+ "description": "Standard version model with an 8K context length and high response speed."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Advanced version model with a context length of 32K, offering comprehensive capability enhancements and supporting both Chinese and English conversations."
+ },
+ "SenseChat-Turbo": {
+ "description": "Suitable for fast question answering and model fine-tuning scenarios."
+ },
+ "SenseChat-Vision": {
+ "description": "The latest version model (V5.5) with a context length of 16K supports multi-image input and fully optimizes the model's foundational capabilities, achieving substantial improvements in object attribute recognition, spatial relationships, action event recognition, scene understanding, emotion recognition, logical reasoning, and text comprehension and generation."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B is an open-source version that provides an optimized conversational experience for chat applications."
},
diff --git a/locales/en-US/providers.json b/locales/en-US/providers.json
index 7f9386d40251..9ec316518b57 100644
--- a/locales/en-US/providers.json
+++ b/locales/en-US/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen is a large-scale language model independently developed by Alibaba Cloud, featuring strong natural language understanding and generation capabilities. It can answer various questions, create written content, express opinions, and write code, playing a role in multiple fields."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow is dedicated to accelerating AGI for the benefit of humanity, enhancing large-scale AI efficiency through an easy-to-use and cost-effective GenAI stack."
},
diff --git a/locales/es-ES/modelProvider.json b/locales/es-ES/modelProvider.json
index 5dc34101077d..163598936e25 100644
--- a/locales/es-ES/modelProvider.json
+++ b/locales/es-ES/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Introduce el ID de clave de acceso de SenseNova",
+ "placeholder": "ID de clave de acceso de SenseNova",
+ "title": "ID de clave de acceso"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Introduce la clave secreta de acceso de SenseNova",
+ "placeholder": "Clave secreta de acceso de SenseNova",
+ "title": "Clave secreta de acceso"
+ },
+ "unlock": {
+ "description": "Introduce tu ID de clave de acceso / clave secreta de acceso para comenzar la sesión. La aplicación no registrará tu configuración de autenticación",
+ "title": "Usar información de autenticación personalizada de SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Introduce la Access Key de la plataforma Qianfan de Baidu",
diff --git a/locales/es-ES/models.json b/locales/es-ES/models.json
index d022db855154..4bea1b8c4ecf 100644
--- a/locales/es-ES/models.json
+++ b/locales/es-ES/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math se centra en la resolución de problemas en el ámbito de las matemáticas, proporcionando respuestas profesionales a preguntas de alta dificultad."
},
+ "SenseChat": {
+ "description": "Modelo de versión básica (V4), longitud de contexto de 4K, con potentes capacidades generales."
+ },
+ "SenseChat-128K": {
+ "description": "Modelo de versión básica (V4), longitud de contexto de 128K, se destaca en tareas de comprensión y generación de textos largos."
+ },
+ "SenseChat-32K": {
+ "description": "Modelo de versión básica (V4), longitud de contexto de 32K, aplicable de manera flexible en diversos escenarios."
+ },
+ "SenseChat-5": {
+ "description": "Modelo de última versión (V5.5), longitud de contexto de 128K, con capacidades significativamente mejoradas en razonamiento matemático, diálogos en inglés, seguimiento de instrucciones y comprensión de textos largos, comparable a GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Longitud de contexto de 32K, supera a GPT-4 en la comprensión de diálogos en cantonés, siendo comparable a GPT-4 Turbo en múltiples áreas como conocimiento, razonamiento, matemáticas y programación."
+ },
+ "SenseChat-Character": {
+ "description": "Modelo estándar, longitud de contexto de 8K, alta velocidad de respuesta."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Modelo de versión avanzada, longitud de contexto de 32K, con capacidades completamente mejoradas, admite diálogos en chino/inglés."
+ },
+ "SenseChat-Turbo": {
+ "description": "Adecuado para preguntas rápidas y escenarios de ajuste fino del modelo."
+ },
+ "SenseChat-Vision": {
+ "description": "Modelo de última versión (V5.5), longitud de contexto de 16K, admite entradas de múltiples imágenes, optimizando las capacidades básicas del modelo, logrando mejoras significativas en el reconocimiento de propiedades de objetos, relaciones espaciales, identificación de eventos de acción, comprensión de escenas, reconocimiento de emociones, razonamiento lógico y generación de comprensión de texto."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B es una versión de código abierto, que proporciona una experiencia de conversación optimizada para aplicaciones de diálogo."
},
diff --git a/locales/es-ES/providers.json b/locales/es-ES/providers.json
index d85b7556df6a..757d7f58d6d4 100644
--- a/locales/es-ES/providers.json
+++ b/locales/es-ES/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen es un modelo de lenguaje de gran escala desarrollado de forma independiente por Alibaba Cloud, con potentes capacidades de comprensión y generación de lenguaje natural. Puede responder a diversas preguntas, crear contenido escrito, expresar opiniones y redactar código, desempeñando un papel en múltiples campos."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow se dedica a acelerar la AGI para beneficiar a la humanidad, mejorando la eficiencia de la IA a gran escala a través de un stack GenAI fácil de usar y de bajo costo."
},
diff --git a/locales/fr-FR/modelProvider.json b/locales/fr-FR/modelProvider.json
index 2d762da3595f..11b2e0d0182e 100644
--- a/locales/fr-FR/modelProvider.json
+++ b/locales/fr-FR/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Entrez l'ID de clé d'accès SenseNova",
+ "placeholder": "ID de clé d'accès SenseNova",
+ "title": "ID de clé d'accès"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Entrez le secret de clé d'accès SenseNova",
+ "placeholder": "Secret de clé d'accès SenseNova",
+ "title": "Secret de clé d'accès"
+ },
+ "unlock": {
+ "description": "Entrez votre ID de clé d'accès / secret de clé d'accès pour commencer la session. L'application ne conservera pas vos configurations d'authentification",
+ "title": "Utiliser des informations d'authentification SenseNova personnalisées"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Entrez la clé d'accès de la plateforme Qianfan de Baidu",
diff --git a/locales/fr-FR/models.json b/locales/fr-FR/models.json
index c448ad68bf2f..45dc1ff89c28 100644
--- a/locales/fr-FR/models.json
+++ b/locales/fr-FR/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math se concentre sur la résolution de problèmes dans le domaine des mathématiques, fournissant des réponses professionnelles pour des questions de haute difficulté."
},
+ "SenseChat": {
+ "description": "Modèle de version de base (V4), longueur de contexte de 4K, avec de puissantes capacités générales."
+ },
+ "SenseChat-128K": {
+ "description": "Modèle de version de base (V4), longueur de contexte de 128K, excellent dans les tâches de compréhension et de génération de longs textes."
+ },
+ "SenseChat-32K": {
+ "description": "Modèle de version de base (V4), longueur de contexte de 32K, appliqué de manière flexible à divers scénarios."
+ },
+ "SenseChat-5": {
+ "description": "Modèle de dernière version (V5.5), longueur de contexte de 128K, avec des capacités significativement améliorées dans le raisonnement mathématique, les dialogues en anglais, le suivi d'instructions et la compréhension de longs textes, rivalisant avec GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Longueur de contexte de 32K, surpassant GPT-4 dans la compréhension des dialogues en cantonais, rivalisant avec GPT-4 Turbo dans plusieurs domaines tels que les connaissances, le raisonnement, les mathématiques et la rédaction de code."
+ },
+ "SenseChat-Character": {
+ "description": "Modèle standard, longueur de contexte de 8K, avec une grande rapidité de réponse."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Modèle avancé, longueur de contexte de 32K, avec des capacités globalement améliorées, prenant en charge les dialogues en chinois et en anglais."
+ },
+ "SenseChat-Turbo": {
+ "description": "Conçu pour des questions-réponses rapides et des scénarios de micro-ajustement du modèle."
+ },
+ "SenseChat-Vision": {
+ "description": "Modèle de dernière version (V5.5), longueur de contexte de 16K, prenant en charge l'entrée de plusieurs images, optimisant les capacités fondamentales du modèle, avec des améliorations significatives dans la reconnaissance des attributs d'objets, les relations spatiales, la reconnaissance d'événements d'action, la compréhension de scènes, la reconnaissance des émotions, le raisonnement logique et la génération de texte."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B est une version open source, offrant une expérience de dialogue optimisée pour les applications de conversation."
},
diff --git a/locales/fr-FR/providers.json b/locales/fr-FR/providers.json
index 4631271ea047..1bb13c0d7387 100644
--- a/locales/fr-FR/providers.json
+++ b/locales/fr-FR/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen est un modèle de langage à grande échelle développé de manière autonome par Alibaba Cloud, doté de puissantes capacités de compréhension et de génération du langage naturel. Il peut répondre à diverses questions, créer du contenu écrit, exprimer des opinions, rédiger du code, etc., jouant un rôle dans plusieurs domaines."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow s'engage à accélérer l'AGI pour le bénéfice de l'humanité, en améliorant l'efficacité de l'IA à grande échelle grâce à une pile GenAI facile à utiliser et à faible coût."
},
diff --git a/locales/it-IT/modelProvider.json b/locales/it-IT/modelProvider.json
index 7d39f4224321..097531c5f245 100644
--- a/locales/it-IT/modelProvider.json
+++ b/locales/it-IT/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Scarica il modello Ollama specificato"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Inserisci l'ID chiave di accesso SenseNova",
+ "placeholder": "ID chiave di accesso SenseNova",
+ "title": "ID chiave di accesso"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Inserisci il segreto della chiave di accesso SenseNova",
+ "placeholder": "Segreto della chiave di accesso SenseNova",
+ "title": "Segreto della chiave di accesso"
+ },
+ "unlock": {
+ "description": "Inserisci il tuo ID chiave di accesso / segreto della chiave di accesso per iniziare la sessione. L'app non registrerà la tua configurazione di autenticazione",
+ "title": "Utilizza informazioni di autenticazione personalizzate di SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Inserisci l'Access Key della piattaforma Qianfan di Baidu",
diff --git a/locales/it-IT/models.json b/locales/it-IT/models.json
index 7cd99e9f1a9a..e2aae32414e1 100644
--- a/locales/it-IT/models.json
+++ b/locales/it-IT/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math si concentra sulla risoluzione di problemi nel campo della matematica, fornendo risposte professionali a domande di alta difficoltà."
},
+ "SenseChat": {
+ "description": "Modello di base (V4), lunghezza del contesto di 4K, con potenti capacità generali."
+ },
+ "SenseChat-128K": {
+ "description": "Modello di base (V4), lunghezza del contesto di 128K, si distingue in compiti di comprensione e generazione di testi lunghi."
+ },
+ "SenseChat-32K": {
+ "description": "Modello di base (V4), lunghezza del contesto di 32K, applicabile in vari scenari."
+ },
+ "SenseChat-5": {
+ "description": "Modello dell'ultima versione (V5.5), lunghezza del contesto di 128K, con capacità significativamente migliorate in ragionamento matematico, conversazioni in inglese, seguire istruzioni e comprensione di testi lunghi, paragonabile a GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Lunghezza del contesto di 32K, supera GPT-4 nella comprensione delle conversazioni in cantonese, paragonabile a GPT-4 Turbo in vari ambiti come conoscenza, ragionamento, matematica e scrittura di codice."
+ },
+ "SenseChat-Character": {
+ "description": "Modello standard, lunghezza del contesto di 8K, alta velocità di risposta."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Modello avanzato, lunghezza del contesto di 32K, capacità complessivamente migliorate, supporta conversazioni in cinese/inglese."
+ },
+ "SenseChat-Turbo": {
+ "description": "Adatto per domande e risposte rapide, scenari di micro-ottimizzazione del modello."
+ },
+ "SenseChat-Vision": {
+ "description": "Modello dell'ultima versione (V5.5), lunghezza del contesto di 16K, supporta input multipli di immagini, ottimizzazione completa delle capacità di base del modello, con notevoli miglioramenti nel riconoscimento delle proprietà degli oggetti, relazioni spaziali, riconoscimento di eventi, comprensione delle scene, riconoscimento delle emozioni, ragionamento logico e generazione di testi."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B è una versione open source, progettata per fornire un'esperienza di dialogo ottimizzata per applicazioni conversazionali."
},
diff --git a/locales/it-IT/providers.json b/locales/it-IT/providers.json
index f466ba355562..5906275951cf 100644
--- a/locales/it-IT/providers.json
+++ b/locales/it-IT/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen è un modello di linguaggio di grande scala sviluppato autonomamente da Alibaba Cloud, con potenti capacità di comprensione e generazione del linguaggio naturale. Può rispondere a varie domande, creare contenuti testuali, esprimere opinioni e scrivere codice, svolgendo un ruolo in vari settori."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow si impegna ad accelerare l'AGI per il bene dell'umanità, migliorando l'efficienza dell'AI su larga scala attraverso stack GenAI facili da usare e a basso costo."
},
diff --git a/locales/ja-JP/modelProvider.json b/locales/ja-JP/modelProvider.json
index 1bf563e51ad0..7d61b7e570a9 100644
--- a/locales/ja-JP/modelProvider.json
+++ b/locales/ja-JP/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "SenseNova アクセスキー ID を入力してください",
+ "placeholder": "SenseNova アクセスキー ID",
+ "title": "アクセスキー ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "SenseNova アクセスキーシークレットを入力してください",
+ "placeholder": "SenseNova アクセスキーシークレット",
+ "title": "アクセスキーシークレット"
+ },
+ "unlock": {
+ "description": "あなたのアクセスキー ID / アクセスキーシークレットを入力すると、セッションが開始されます。アプリはあなたの認証設定を記録しません",
+ "title": "カスタム SenseNova 認証情報を使用"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "百度千帆プラットフォームのAccess Keyを入力してください",
diff --git a/locales/ja-JP/models.json b/locales/ja-JP/models.json
index 44b969ad07f0..0b52234851d1 100644
--- a/locales/ja-JP/models.json
+++ b/locales/ja-JP/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Mathは、数学分野の問題解決に特化しており、高難度の問題に対して専門的な解答を提供します。"
},
+ "SenseChat": {
+ "description": "基本バージョンのモデル (V4)、4Kのコンテキスト長で、汎用能力が強力です。"
+ },
+ "SenseChat-128K": {
+ "description": "基本バージョンのモデル (V4)、128Kのコンテキスト長で、長文理解や生成などのタスクで優れたパフォーマンスを発揮します。"
+ },
+ "SenseChat-32K": {
+ "description": "基本バージョンのモデル (V4)、32Kのコンテキスト長で、さまざまなシーンに柔軟に適用できます。"
+ },
+ "SenseChat-5": {
+ "description": "最新バージョンのモデル (V5.5)、128Kのコンテキスト長で、数学的推論、英語の対話、指示のフォロー、長文理解などの分野での能力が大幅に向上し、GPT-4oに匹敵します。"
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "32Kのコンテキスト長で、広東語の対話理解においてGPT-4を超え、知識、推論、数学、コード作成などの複数の分野でGPT-4 Turboに匹敵します。"
+ },
+ "SenseChat-Character": {
+ "description": "スタンダード版モデル、8Kのコンテキスト長で、高速な応答速度を持っています。"
+ },
+ "SenseChat-Character-Pro": {
+ "description": "ハイエンド版モデル、32Kのコンテキスト長で、能力が全面的に向上し、中国語/英語の対話をサポートしています。"
+ },
+ "SenseChat-Turbo": {
+ "description": "迅速な質問応答やモデルの微調整シーンに適しています。"
+ },
+ "SenseChat-Vision": {
+ "description": "最新バージョンのモデル (V5.5)、16Kのコンテキスト長で、複数の画像入力をサポートし、モデルの基本能力を全面的に最適化しています。物体の属性認識、空間関係、動作イベントの認識、シーン理解、感情認識、論理的常識推論、テキスト理解生成において大幅な向上を実現しました。"
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9Bはオープンソース版で、会話アプリケーションに最適化された対話体験を提供します。"
},
diff --git a/locales/ja-JP/providers.json b/locales/ja-JP/providers.json
index 732ff1daaac3..e95b6b9b6a0b 100644
--- a/locales/ja-JP/providers.json
+++ b/locales/ja-JP/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "通義千問は、アリババクラウドが独自に開発した超大規模言語モデルであり、強力な自然言語理解と生成能力を持っています。さまざまな質問に答えたり、文章を創作したり、意見を表現したり、コードを執筆したりすることができ、さまざまな分野で活躍しています。"
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlowは、AGIを加速させ、人類に利益をもたらすことを目指し、使いやすくコスト効率の高いGenAIスタックを通じて大規模AIの効率を向上させることに取り組んでいます。"
},
diff --git a/locales/ko-KR/modelProvider.json b/locales/ko-KR/modelProvider.json
index 13ef7ac1085d..b1f15789e388 100644
--- a/locales/ko-KR/modelProvider.json
+++ b/locales/ko-KR/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "SenseNova Access Key ID를 입력하세요",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "SenseNova Access Key Secret를 입력하세요",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Access Key ID / Access Key Secret를 입력하면 대화를 시작할 수 있습니다. 애플리케이션은 인증 구성을 기록하지 않습니다.",
+ "title": "사용자 정의 SenseNova 인증 정보 사용"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "바이두 천범 플랫폼의 Access Key를 입력하세요",
diff --git a/locales/ko-KR/models.json b/locales/ko-KR/models.json
index 8ba1447230b5..73fb9642f1b9 100644
--- a/locales/ko-KR/models.json
+++ b/locales/ko-KR/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math는 수학 분야의 문제 해결에 중점을 두고 있으며, 고난이도 문제에 대한 전문적인 해답을 제공합니다."
},
+ "SenseChat": {
+ "description": "기본 버전 모델(V4), 4K 컨텍스트 길이, 일반적인 능력이 강력합니다."
+ },
+ "SenseChat-128K": {
+ "description": "기본 버전 모델(V4), 128K 컨텍스트 길이, 긴 텍스트 이해 및 생성 작업에서 뛰어난 성능을 발휘합니다."
+ },
+ "SenseChat-32K": {
+ "description": "기본 버전 모델(V4), 32K 컨텍스트 길이, 다양한 시나리오에 유연하게 적용됩니다."
+ },
+ "SenseChat-5": {
+ "description": "최신 버전 모델(V5.5), 128K 컨텍스트 길이, 수학적 추론, 영어 대화, 지시 따르기 및 긴 텍스트 이해 등 분야에서 능력이 크게 향상되어 GPT-4o와 견줄 수 있습니다."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "32K 컨텍스트 길이, 광둥어 대화 이해에서 GPT-4를 초월하며, 지식, 추론, 수학 및 코드 작성 등 여러 분야에서 GPT-4 Turbo와 견줄 수 있습니다."
+ },
+ "SenseChat-Character": {
+ "description": "표준 버전 모델, 8K 컨텍스트 길이, 높은 응답 속도를 자랑합니다."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "고급 버전 모델, 32K 컨텍스트 길이, 능력이 전반적으로 향상되었으며, 중/영어 대화를 지원합니다."
+ },
+ "SenseChat-Turbo": {
+ "description": "빠른 질문 응답 및 모델 미세 조정 시나리오에 적합합니다."
+ },
+ "SenseChat-Vision": {
+ "description": "최신 버전 모델(V5.5), 16K 컨텍스트 길이, 다중 이미지 입력을 지원하며, 모델의 기본 능력 최적화를 전면적으로 구현하여 객체 속성 인식, 공간 관계, 동작 사건 인식, 장면 이해, 감정 인식, 논리 상식 추론 및 텍스트 이해 생성에서 큰 향상을 이루었습니다."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B 오픈 소스 버전으로, 대화 응용을 위한 최적화된 대화 경험을 제공합니다."
},
diff --git a/locales/ko-KR/providers.json b/locales/ko-KR/providers.json
index 22461e1befb3..7c0bac693eff 100644
--- a/locales/ko-KR/providers.json
+++ b/locales/ko-KR/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "통의천문은 알리바바 클라우드가 자주 개발한 초대형 언어 모델로, 강력한 자연어 이해 및 생성 능력을 갖추고 있습니다. 다양한 질문에 답변하고, 텍스트 콘텐츠를 창작하며, 의견을 표현하고, 코드를 작성하는 등 여러 분야에서 활용됩니다."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow는 AGI를 가속화하여 인류에 혜택을 주기 위해 사용하기 쉽고 비용이 저렴한 GenAI 스택을 통해 대규모 AI 효율성을 향상시키는 데 전념하고 있습니다."
},
diff --git a/locales/nl-NL/modelProvider.json b/locales/nl-NL/modelProvider.json
index 4bbc14f1373f..52fa264e7588 100644
--- a/locales/nl-NL/modelProvider.json
+++ b/locales/nl-NL/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Vul je SenseNova Access Key ID in",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Vul je SenseNova Access Key Secret in",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Voer je Access Key ID / Access Key Secret in om de sessie te starten. De applicatie registreert je authenticatie-instellingen niet",
+ "title": "Gebruik aangepaste SenseNova authenticatie-informatie"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Vul de Access Key van het Baidu Qianfan-platform in",
diff --git a/locales/nl-NL/models.json b/locales/nl-NL/models.json
index 5ec11c0a87ac..11729c46b503 100644
--- a/locales/nl-NL/models.json
+++ b/locales/nl-NL/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math richt zich op het oplossen van wiskundige vraagstukken en biedt professionele antwoorden op moeilijke vragen."
},
+ "SenseChat": {
+ "description": "Basisversie van het model (V4), met een contextlengte van 4K, heeft sterke algemene capaciteiten."
+ },
+ "SenseChat-128K": {
+ "description": "Basisversie van het model (V4), met een contextlengte van 128K, presteert uitstekend in taken van begrip en generatie van lange teksten."
+ },
+ "SenseChat-32K": {
+ "description": "Basisversie van het model (V4), met een contextlengte van 32K, flexibel toepasbaar in verschillende scenario's."
+ },
+ "SenseChat-5": {
+ "description": "De nieuwste versie van het model (V5.5), met een contextlengte van 128K, heeft aanzienlijke verbeteringen in wiskundig redeneren, Engelse conversatie, instructievolging en begrip van lange teksten, en kan zich meten met GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Met een contextlengte van 32K overtreft het de conversatiebegrip in het Kantonees van GPT-4 en kan het zich in verschillende domeinen zoals kennis, redeneren, wiskunde en coderen meten met GPT-4 Turbo."
+ },
+ "SenseChat-Character": {
+ "description": "Standaardversie van het model, met een contextlengte van 8K, hoge responsnelheid."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Geavanceerde versie van het model, met een contextlengte van 32K, met uitgebreide verbeteringen in capaciteiten, ondersteunt zowel Chinese als Engelse conversaties."
+ },
+ "SenseChat-Turbo": {
+ "description": "Geschikt voor snelle vraag-en-antwoord en modelafstemming."
+ },
+ "SenseChat-Vision": {
+ "description": "De nieuwste versie van het model (V5.5), met een contextlengte van 16K, ondersteunt meerdere afbeeldingsinvoeren en heeft de basiscapaciteiten van het model geoptimaliseerd, met aanzienlijke verbeteringen in objecteigenschappenherkenning, ruimtelijke relaties, actie-evenementherkenning, scènebegrip, emotieherkenning, logische redenering en tekstbegrip en -generatie."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B is de open-source versie die een geoptimaliseerde gesprekservaring biedt voor gespreksapplicaties."
},
diff --git a/locales/nl-NL/providers.json b/locales/nl-NL/providers.json
index 1ecd5b48920d..ee00928c0ec1 100644
--- a/locales/nl-NL/providers.json
+++ b/locales/nl-NL/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen is een door Alibaba Cloud zelf ontwikkeld grootschalig taalmodel met krachtige mogelijkheden voor natuurlijke taalbegrip en -generatie. Het kan verschillende vragen beantwoorden, tekstinhoud creëren, meningen uiten, code schrijven, en speelt een rol in verschillende domeinen."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow streeft ernaar AGI te versnellen ten behoeve van de mensheid, door de efficiëntie van grootschalige AI te verbeteren met een gebruiksvriendelijke en kosteneffectieve GenAI-stack."
},
diff --git a/locales/pl-PL/modelProvider.json b/locales/pl-PL/modelProvider.json
index dff460a7dc94..706293b052fd 100644
--- a/locales/pl-PL/modelProvider.json
+++ b/locales/pl-PL/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Wprowadź SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Wprowadź SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Wprowadź swój Access Key ID / Access Key Secret, aby rozpocząć sesję. Aplikacja nie zapisze twojej konfiguracji autoryzacji",
+ "title": "Użyj niestandardowych informacji autoryzacyjnych SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Wprowadź Access Key z platformy Baidu Qianfan",
diff --git a/locales/pl-PL/models.json b/locales/pl-PL/models.json
index f34e23368901..f6086f171962 100644
--- a/locales/pl-PL/models.json
+++ b/locales/pl-PL/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math koncentruje się na rozwiązywaniu problemów w dziedzinie matematyki, oferując profesjonalne odpowiedzi na trudne pytania."
},
+ "SenseChat": {
+ "description": "Podstawowa wersja modelu (V4), długość kontekstu 4K, silne zdolności ogólne."
+ },
+ "SenseChat-128K": {
+ "description": "Podstawowa wersja modelu (V4), długość kontekstu 128K, doskonałe wyniki w zadaniach związanych z rozumieniem i generowaniem długich tekstów."
+ },
+ "SenseChat-32K": {
+ "description": "Podstawowa wersja modelu (V4), długość kontekstu 32K, elastycznie stosowana w różnych scenariuszach."
+ },
+ "SenseChat-5": {
+ "description": "Najnowsza wersja modelu (V5.5), długość kontekstu 128K, znacznie poprawione zdolności w zakresie rozumowania matematycznego, rozmów w języku angielskim, podążania za instrukcjami oraz rozumienia długich tekstów, dorównująca GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Długość kontekstu 32K, w rozumieniu rozmów w języku kantońskim przewyższa GPT-4, w wielu dziedzinach, takich jak wiedza, rozumowanie, matematyka i programowanie, dorównuje GPT-4 Turbo."
+ },
+ "SenseChat-Character": {
+ "description": "Standardowa wersja modelu, długość kontekstu 8K, wysoka szybkość reakcji."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Zaawansowana wersja modelu, długość kontekstu 32K, znacznie poprawione zdolności, obsługuje rozmowy w języku chińskim i angielskim."
+ },
+ "SenseChat-Turbo": {
+ "description": "Idealny do szybkich odpowiedzi i scenariuszy dostosowywania modelu."
+ },
+ "SenseChat-Vision": {
+ "description": "Najnowsza wersja modelu (V5.5), długość kontekstu 16K, obsługuje wiele obrazów jako wejście, w pełni realizuje optymalizację podstawowych zdolności modelu, osiągając znaczne postępy w rozpoznawaniu atrybutów obiektów, relacjach przestrzennych, rozpoznawaniu zdarzeń, rozumieniu scen, rozpoznawaniu emocji, rozumowaniu logicznym oraz generowaniu i rozumieniu tekstu."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B to otwarta wersja, oferująca zoptymalizowane doświadczenie dialogowe dla aplikacji konwersacyjnych."
},
diff --git a/locales/pl-PL/providers.json b/locales/pl-PL/providers.json
index afb5d085dd8f..42007e5b4b26 100644
--- a/locales/pl-PL/providers.json
+++ b/locales/pl-PL/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen to samodzielnie opracowany przez Alibaba Cloud model językowy o dużej skali, charakteryzujący się silnymi zdolnościami rozumienia i generowania języka naturalnego. Może odpowiadać na różnorodne pytania, tworzyć treści pisemne, wyrażać opinie, pisać kod i działać w wielu dziedzinach."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow dąży do przyspieszenia AGI, aby przynieść korzyści ludzkości, poprawiając wydajność dużych modeli AI dzięki łatwemu w użyciu i niskokosztowemu stosowi GenAI."
},
diff --git a/locales/pt-BR/modelProvider.json b/locales/pt-BR/modelProvider.json
index 1beed30cfb3e..117dd10b6859 100644
--- a/locales/pt-BR/modelProvider.json
+++ b/locales/pt-BR/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Insira o SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Insira o SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Digite seu Access Key ID / Access Key Secret para iniciar a sessão. O aplicativo não registrará suas configurações de autenticação",
+ "title": "Usar informações de autenticação personalizadas do SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Insira a Access Key da plataforma Qianfan do Baidu",
diff --git a/locales/pt-BR/models.json b/locales/pt-BR/models.json
index 76572b2169e9..2a9542f0932d 100644
--- a/locales/pt-BR/models.json
+++ b/locales/pt-BR/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math foca na resolução de problemas na área de matemática, oferecendo respostas especializadas para questões de alta dificuldade."
},
+ "SenseChat": {
+ "description": "Modelo da versão básica (V4), com comprimento de contexto de 4K, com capacidades gerais poderosas."
+ },
+ "SenseChat-128K": {
+ "description": "Modelo da versão básica (V4), com comprimento de contexto de 128K, se destaca em tarefas de compreensão e geração de textos longos."
+ },
+ "SenseChat-32K": {
+ "description": "Modelo da versão básica (V4), com comprimento de contexto de 32K, aplicável de forma flexível em diversos cenários."
+ },
+ "SenseChat-5": {
+ "description": "Modelo da versão mais recente (V5.5), com comprimento de contexto de 128K, com capacidades significativamente aprimoradas em raciocínio matemático, diálogos em inglês, seguimento de instruções e compreensão de textos longos, rivalizando com o GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Comprimento de contexto de 32K, superando o GPT-4 na compreensão de diálogos em cantonês, competindo com o GPT-4 Turbo em várias áreas, incluindo conhecimento, raciocínio, matemática e programação."
+ },
+ "SenseChat-Character": {
+ "description": "Modelo padrão, com comprimento de contexto de 8K, alta velocidade de resposta."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Modelo avançado, com comprimento de contexto de 32K, com capacidades amplamente aprimoradas, suportando diálogos em chinês e inglês."
+ },
+ "SenseChat-Turbo": {
+ "description": "Adequado para perguntas rápidas e cenários de ajuste fino do modelo."
+ },
+ "SenseChat-Vision": {
+ "description": "Modelo da versão mais recente (V5.5), com comprimento de contexto de 16K, suporta entrada de múltiplas imagens, implementa otimizações abrangentes nas capacidades básicas do modelo, com grandes melhorias em reconhecimento de propriedades de objetos, relações espaciais, reconhecimento de eventos de ação, compreensão de cenas, reconhecimento de emoções, raciocínio lógico e compreensão e geração de texto."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B é uma versão de código aberto, oferecendo uma experiência de diálogo otimizada para aplicações de conversa."
},
diff --git a/locales/pt-BR/providers.json b/locales/pt-BR/providers.json
index 4eb32432aed1..57ea48fab41b 100644
--- a/locales/pt-BR/providers.json
+++ b/locales/pt-BR/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen é um modelo de linguagem de grande escala desenvolvido pela Alibaba Cloud, com forte capacidade de compreensão e geração de linguagem natural. Ele pode responder a várias perguntas, criar conteúdo escrito, expressar opiniões e escrever código, atuando em vários campos."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow se dedica a acelerar a AGI para beneficiar a humanidade, melhorando a eficiência da IA em larga escala por meio de uma pilha GenAI fácil de usar e de baixo custo."
},
diff --git a/locales/ru-RU/modelProvider.json b/locales/ru-RU/modelProvider.json
index cb0cf1ecfa96..47c763ad262b 100644
--- a/locales/ru-RU/modelProvider.json
+++ b/locales/ru-RU/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Download specified Ollama model"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Введите SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Введите SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Введите ваш Access Key ID / Access Key Secret, чтобы начать сессию. Приложение не будет записывать ваши настройки аутентификации",
+ "title": "Используйте пользовательскую аутентификацию SenseNova"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Введите Access Key платформы Baidu Qianfan",
diff --git a/locales/ru-RU/models.json b/locales/ru-RU/models.json
index 10c49f0cc278..d2cdb2374955 100644
--- a/locales/ru-RU/models.json
+++ b/locales/ru-RU/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math сосредоточен на решении математических задач, предоставляя профессиональные ответы на сложные вопросы."
},
+ "SenseChat": {
+ "description": "Базовая версия модели (V4), длина контекста 4K, обладает мощными универсальными возможностями."
+ },
+ "SenseChat-128K": {
+ "description": "Базовая версия модели (V4), длина контекста 128K, демонстрирует отличные результаты в задачах понимания и генерации длинных текстов."
+ },
+ "SenseChat-32K": {
+ "description": "Базовая версия модели (V4), длина контекста 32K, гибко применяется в различных сценариях."
+ },
+ "SenseChat-5": {
+ "description": "Последняя версия модели (V5.5), длина контекста 128K, значительно улучшенные способности в математическом рассуждении, английских диалогах, следовании инструкциям и понимании длинных текстов, сопоставимые с GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Длина контекста 32K, превосходит GPT-4 в понимании диалогов на кантонском, сопоставим с GPT-4 Turbo в таких областях, как знания, рассуждение, математика и написание кода."
+ },
+ "SenseChat-Character": {
+ "description": "Стандартная версия модели, длина контекста 8K, высокая скорость отклика."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Расширенная версия модели, длина контекста 32K, всеобъемлющие улучшения возможностей, поддерживает диалоги на китайском и английском языках."
+ },
+ "SenseChat-Turbo": {
+ "description": "Подходит для быстрого ответа на вопросы и сценариев тонкой настройки модели."
+ },
+ "SenseChat-Vision": {
+ "description": "Последняя версия модели (V5.5), длина контекста 16K, поддерживает ввод нескольких изображений, полностью реализована оптимизация базовых возможностей модели, значительно улучшены распознавание свойств объектов, пространственные отношения, распознавание событий, понимание сцен, распознавание эмоций, логическое рассуждение и генерация текста."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B — это открытая версия, обеспечивающая оптимизированный диалоговый опыт для приложений."
},
diff --git a/locales/ru-RU/providers.json b/locales/ru-RU/providers.json
index 0c18a6ee33bc..a7a705c50411 100644
--- a/locales/ru-RU/providers.json
+++ b/locales/ru-RU/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen — это сверхбольшая языковая модель, разработанная Alibaba Cloud, обладающая мощными возможностями понимания и генерации естественного языка. Она может отвечать на различные вопросы, создавать текстовый контент, выражать мнения и писать код, играя важную роль в различных областях."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow стремится ускорить AGI, чтобы принести пользу человечеству, повышая эффективность масштабного AI с помощью простого и экономичного стека GenAI."
},
diff --git a/locales/tr-TR/modelProvider.json b/locales/tr-TR/modelProvider.json
index e9d7c5f5c9d5..586005014022 100644
--- a/locales/tr-TR/modelProvider.json
+++ b/locales/tr-TR/modelProvider.json
@@ -119,6 +119,22 @@
"title": "下载指定的 Ollama 模型"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "SenseNova Erişim Anahtar Kimliği girin",
+ "placeholder": "SenseNova Erişim Anahtar Kimliği",
+ "title": "Erişim Anahtar Kimliği"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "SenseNova Erişim Anahtar Gizli Anahtarını girin",
+ "placeholder": "SenseNova Erişim Anahtar Gizli Anahtarı",
+ "title": "Erişim Anahtar Gizli Anahtarı"
+ },
+ "unlock": {
+ "description": "Oturuma başlamak için Erişim Anahtar Kimliğinizi / Erişim Anahtar Gizli Anahtarınızı girin. Uygulama kimlik doğrulama yapılandırmanızı kaydetmeyecek",
+ "title": "Özel SenseNova kimlik doğrulama bilgilerini kullan"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Baidu Qianfan platform için Access Key girin",
diff --git a/locales/tr-TR/models.json b/locales/tr-TR/models.json
index bc8deff67c19..21e7aaf30ace 100644
--- a/locales/tr-TR/models.json
+++ b/locales/tr-TR/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math, matematik alanındaki sorunları çözmeye odaklanır ve yüksek zorlukta sorulara profesyonel yanıtlar sunar."
},
+ "SenseChat": {
+ "description": "Temel sürüm model (V4), 4K bağlam uzunluğu ile genel yetenekleri güçlüdür."
+ },
+ "SenseChat-128K": {
+ "description": "Temel sürüm model (V4), 128K bağlam uzunluğu ile uzun metin anlama ve üretme görevlerinde mükemmel performans sergilemektedir."
+ },
+ "SenseChat-32K": {
+ "description": "Temel sürüm model (V4), 32K bağlam uzunluğu ile çeşitli senaryolarda esnek bir şekilde uygulanabilir."
+ },
+ "SenseChat-5": {
+ "description": "En son sürüm model (V5.5), 128K bağlam uzunluğu, matematiksel akıl yürütme, İngilizce diyalog, talimat takibi ve uzun metin anlama gibi alanlarda önemli gelişmeler göstermektedir ve GPT-4o ile karşılaştırılabilir."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "32K bağlam uzunluğu ile, Kantonca diyalog anlama konusunda GPT-4'ü aşmakta, bilgi, akıl yürütme, matematik ve kod yazma gibi birçok alanda GPT-4 Turbo ile rekabet edebilmektedir."
+ },
+ "SenseChat-Character": {
+ "description": "Standart sürüm model, 8K bağlam uzunluğu ile yüksek yanıt hızı sunmaktadır."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Gelişmiş sürüm model, 32K bağlam uzunluğu ile yetenekleri tamamen geliştirilmiş, Çince/İngilizce diyalogları desteklemektedir."
+ },
+ "SenseChat-Turbo": {
+ "description": "Hızlı soru-cevap ve model ince ayar senaryoları için uygundur."
+ },
+ "SenseChat-Vision": {
+ "description": "En son sürüm model (V5.5), 16K bağlam uzunluğu, çoklu görüntü girişini destekler, modelin temel yetenek optimizasyonunu tam olarak gerçekleştirir ve nesne özellik tanıma, mekansal ilişkiler, eylem olayı tanıma, sahne anlama, duygu tanıma, mantıksal bilgi akıl yürütme ve metin anlama üretiminde önemli iyileştirmeler sağlamıştır."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B açık kaynak versiyonu, diyalog uygulamaları için optimize edilmiş bir diyalog deneyimi sunar."
},
diff --git a/locales/tr-TR/providers.json b/locales/tr-TR/providers.json
index 2703d8b3bd19..20846d7837dd 100644
--- a/locales/tr-TR/providers.json
+++ b/locales/tr-TR/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Tongyi Qianwen, Alibaba Cloud tarafından geliştirilen büyük ölçekli bir dil modelidir ve güçlü doğal dil anlama ve üretme yeteneklerine sahiptir. Çeşitli soruları yanıtlayabilir, metin içeriği oluşturabilir, görüşlerini ifade edebilir ve kod yazabilir. Birçok alanda etkili bir şekilde kullanılmaktadır."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow, insanlığa fayda sağlamak amacıyla AGI'yi hızlandırmaya odaklanmakta ve kullanıcı dostu ve maliyet etkin GenAI yığınları ile büyük ölçekli yapay zeka verimliliğini artırmayı hedeflemektedir."
},
diff --git a/locales/vi-VN/modelProvider.json b/locales/vi-VN/modelProvider.json
index f5b18cbbb819..55cdbe74caa1 100644
--- a/locales/vi-VN/modelProvider.json
+++ b/locales/vi-VN/modelProvider.json
@@ -119,6 +119,22 @@
"title": "Tải xuống mô hình Ollama đã chỉ định"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "Nhập SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "Nhập SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "Nhập Access Key ID / Access Key Secret của bạn để bắt đầu phiên. Ứng dụng sẽ không ghi lại cấu hình xác thực của bạn",
+ "title": "Sử dụng thông tin xác thực SenseNova tùy chỉnh"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "Nhập Access Key từ nền tảng Qianfan của Baidu",
diff --git a/locales/vi-VN/models.json b/locales/vi-VN/models.json
index ba1889f80aa4..11582f02856e 100644
--- a/locales/vi-VN/models.json
+++ b/locales/vi-VN/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math tập trung vào việc giải quyết các vấn đề trong lĩnh vực toán học, cung cấp giải pháp chuyên nghiệp cho các bài toán khó."
},
+ "SenseChat": {
+ "description": "Mô hình phiên bản cơ bản (V4), độ dài ngữ cảnh 4K, khả năng tổng quát mạnh mẽ."
+ },
+ "SenseChat-128K": {
+ "description": "Mô hình phiên bản cơ bản (V4), độ dài ngữ cảnh 128K, thể hiện xuất sắc trong các nhiệm vụ hiểu và sinh văn bản dài."
+ },
+ "SenseChat-32K": {
+ "description": "Mô hình phiên bản cơ bản (V4), độ dài ngữ cảnh 32K, linh hoạt áp dụng trong nhiều tình huống."
+ },
+ "SenseChat-5": {
+ "description": "Phiên bản mô hình mới nhất (V5.5), độ dài ngữ cảnh 128K, khả năng cải thiện đáng kể trong suy luận toán học, đối thoại tiếng Anh, theo dõi chỉ dẫn và hiểu biết văn bản dài, ngang tầm với GPT-4o."
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "Độ dài ngữ cảnh 32K, vượt qua GPT-4 trong hiểu biết đối thoại tiếng Quảng Đông, có thể so sánh với GPT-4 Turbo trong nhiều lĩnh vực như kiến thức, suy luận, toán học và lập trình mã."
+ },
+ "SenseChat-Character": {
+ "description": "Mô hình phiên bản tiêu chuẩn, độ dài ngữ cảnh 8K, tốc độ phản hồi cao."
+ },
+ "SenseChat-Character-Pro": {
+ "description": "Mô hình phiên bản cao cấp, độ dài ngữ cảnh 32K, khả năng được cải thiện toàn diện, hỗ trợ đối thoại tiếng Trung/tiếng Anh."
+ },
+ "SenseChat-Turbo": {
+ "description": "Phù hợp cho các tình huống hỏi đáp nhanh và tinh chỉnh mô hình."
+ },
+ "SenseChat-Vision": {
+ "description": "Phiên bản mô hình mới nhất (V5.5), độ dài ngữ cảnh 16K, hỗ trợ đầu vào nhiều hình ảnh, hoàn thiện khả năng cơ bản của mô hình, đạt được sự cải thiện lớn trong nhận diện thuộc tính đối tượng, mối quan hệ không gian, nhận diện sự kiện hành động, hiểu biết cảnh, nhận diện cảm xúc, suy luận kiến thức logic và hiểu biết sinh văn bản."
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B là phiên bản mã nguồn mở, cung cấp trải nghiệm đối thoại tối ưu cho các ứng dụng hội thoại."
},
diff --git a/locales/vi-VN/providers.json b/locales/vi-VN/providers.json
index d608f5d1545c..54fb1f1341c5 100644
--- a/locales/vi-VN/providers.json
+++ b/locales/vi-VN/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "Qwen là mô hình ngôn ngữ quy mô lớn tự phát triển của Alibaba Cloud, có khả năng hiểu và tạo ngôn ngữ tự nhiên mạnh mẽ. Nó có thể trả lời nhiều câu hỏi, sáng tác nội dung văn bản, bày tỏ quan điểm, viết mã, v.v., hoạt động trong nhiều lĩnh vực."
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow cam kết tăng tốc AGI để mang lại lợi ích cho nhân loại, nâng cao hiệu quả AI quy mô lớn thông qua một ngăn xếp GenAI dễ sử dụng và chi phí thấp."
},
diff --git a/locales/zh-CN/modelProvider.json b/locales/zh-CN/modelProvider.json
index 37f8d83077df..2474379c7c90 100644
--- a/locales/zh-CN/modelProvider.json
+++ b/locales/zh-CN/modelProvider.json
@@ -119,6 +119,22 @@
"title": "下载指定的 Ollama 模型"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "填入 SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "填入 SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "输入你的 Access Key ID / Access Key Secret 即可开始会话。应用不会记录你的鉴权配置",
+ "title": "使用自定义 SenseNova 鉴权信息"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "填入百度千帆平台的 Access Key",
diff --git a/locales/zh-CN/models.json b/locales/zh-CN/models.json
index 52e17ed606c7..c77085f48d8f 100644
--- a/locales/zh-CN/models.json
+++ b/locales/zh-CN/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math 专注于数学领域的问题求解,为高难度题提供专业解答。"
},
+ "SenseChat": {
+ "description": "基础版本模型 (V4),4K上下文长度,通用能力强大"
+ },
+ "SenseChat-128K": {
+ "description": "基础版本模型 (V4),128K上下文长度,在长文本理解及生成等任务中表现出色"
+ },
+ "SenseChat-32K": {
+ "description": "基础版本模型 (V4),32K上下文长度,灵活应用于各类场景"
+ },
+ "SenseChat-5": {
+ "description": "最新版本模型 (V5.5),128K上下文长度,在数学推理、英文对话、指令跟随以及长文本理解等领域能力显著提升,比肩GPT-4o"
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "32K上下文长度,在粤语的对话理解上超越了GPT-4,在知识、推理、数学及代码编写等多个领域均能与GPT-4 Turbo相媲美"
+ },
+ "SenseChat-Character": {
+ "description": "标准版模型,8K上下文长度,高响应速度"
+ },
+ "SenseChat-Character-Pro": {
+ "description": "高级版模型,32K上下文长度,能力全面提升,支持中/英文对话"
+ },
+ "SenseChat-Turbo": {
+ "description": "适用于快速问答、模型微调场景"
+ },
+ "SenseChat-Vision": {
+ "description": "最新版本模型 (V5.5),16K上下文长度,支持多图的输入,全面实现模型基础能力优化,在对象属性识别、空间关系、动作事件识别、场景理解、情感识别、逻辑常识推理和文本理解生成上都实现了较大提升。"
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B 开放源码版本,为会话应用提供优化后的对话体验。"
},
diff --git a/locales/zh-CN/providers.json b/locales/zh-CN/providers.json
index 5c51c8c61d2b..746240e210be 100644
--- a/locales/zh-CN/providers.json
+++ b/locales/zh-CN/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "通义千问是阿里云自主研发的超大规模语言模型,具有强大的自然语言理解和生成能力。它可以回答各种问题、创作文字内容、表达观点看法、撰写代码等,在多个领域发挥作用。"
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconCloud,基于优秀开源基础模型的高性价比 GenAI 云服务"
},
diff --git a/locales/zh-TW/modelProvider.json b/locales/zh-TW/modelProvider.json
index 52d865aa6059..1e464750d9d6 100644
--- a/locales/zh-TW/modelProvider.json
+++ b/locales/zh-TW/modelProvider.json
@@ -119,6 +119,22 @@
"title": "下載指定的 Ollama 模型"
}
},
+ "sensenova": {
+ "sensenovaAccessKeyID": {
+ "desc": "填入 SenseNova Access Key ID",
+ "placeholder": "SenseNova Access Key ID",
+ "title": "Access Key ID"
+ },
+ "sensenovaAccessKeySecret": {
+ "desc": "填入 SenseNova Access Key Secret",
+ "placeholder": "SenseNova Access Key Secret",
+ "title": "Access Key Secret"
+ },
+ "unlock": {
+ "description": "輸入你的 Access Key ID / Access Key Secret 即可開始會話。應用不會記錄你的鑑權配置",
+ "title": "使用自訂的 SenseNova 鑑權資訊"
+ }
+ },
"wenxin": {
"accessKey": {
"desc": "填入百度千帆平台的 Access Key",
diff --git a/locales/zh-TW/models.json b/locales/zh-TW/models.json
index e5d2e4f7ca66..f4275722d938 100644
--- a/locales/zh-TW/models.json
+++ b/locales/zh-TW/models.json
@@ -155,6 +155,33 @@
"Qwen/Qwen2.5-Math-72B-Instruct": {
"description": "Qwen2.5-Math專注於數學領域的問題求解,為高難度題提供專業解答。"
},
+ "SenseChat": {
+ "description": "基礎版本模型 (V4),4K上下文長度,通用能力強大"
+ },
+ "SenseChat-128K": {
+ "description": "基礎版本模型 (V4),128K上下文長度,在長文本理解及生成等任務中表現出色"
+ },
+ "SenseChat-32K": {
+ "description": "基礎版本模型 (V4),32K上下文長度,靈活應用於各類場景"
+ },
+ "SenseChat-5": {
+ "description": "最新版本模型 (V5.5),128K上下文長度,在數學推理、英文對話、指令跟隨以及長文本理解等領域能力顯著提升,比肩GPT-4o"
+ },
+ "SenseChat-5-Cantonese": {
+ "description": "32K上下文長度,在粵語的對話理解上超越了GPT-4,在知識、推理、數學及程式編寫等多個領域均能與GPT-4 Turbo相媲美"
+ },
+ "SenseChat-Character": {
+ "description": "標準版模型,8K上下文長度,高響應速度"
+ },
+ "SenseChat-Character-Pro": {
+ "description": "高級版模型,32K上下文長度,能力全面提升,支持中/英文對話"
+ },
+ "SenseChat-Turbo": {
+ "description": "適用於快速問答、模型微調場景"
+ },
+ "SenseChat-Vision": {
+ "description": "最新版本模型 (V5.5),16K上下文長度,支持多圖的輸入,全面實現模型基礎能力優化,在物件屬性識別、空間關係、動作事件識別、場景理解、情感識別、邏輯常識推理和文本理解生成上都實現了較大提升。"
+ },
"THUDM/glm-4-9b-chat": {
"description": "GLM-4 9B 開放源碼版本,為會話應用提供優化後的對話體驗。"
},
diff --git a/locales/zh-TW/providers.json b/locales/zh-TW/providers.json
index 2d04a8d63e82..d4eed5540c7f 100644
--- a/locales/zh-TW/providers.json
+++ b/locales/zh-TW/providers.json
@@ -63,6 +63,7 @@
"qwen": {
"description": "通義千問是阿里雲自主研發的超大規模語言模型,具有強大的自然語言理解和生成能力。它可以回答各種問題、創作文字內容、表達觀點看法、撰寫代碼等,在多個領域發揮作用。"
},
+ "sensenova": {},
"siliconcloud": {
"description": "SiliconFlow 致力於加速 AGI,以惠及人類,通過易用與成本低的 GenAI 堆疊提升大規模 AI 效率。"
},
diff --git a/src/app/(main)/settings/llm/ProviderList/SenseNova/index.tsx b/src/app/(main)/settings/llm/ProviderList/SenseNova/index.tsx
new file mode 100644
index 000000000000..c109d5c4ee7c
--- /dev/null
+++ b/src/app/(main)/settings/llm/ProviderList/SenseNova/index.tsx
@@ -0,0 +1,44 @@
+'use client';
+
+import { Input } from 'antd';
+import { useTranslation } from 'react-i18next';
+
+import { SenseNovaProviderCard } from '@/config/modelProviders';
+import { GlobalLLMProviderKey } from '@/types/user/settings';
+
+import { KeyVaultsConfigKey } from '../../const';
+import { ProviderItem } from '../../type';
+
+const providerKey: GlobalLLMProviderKey = 'sensenova';
+
+export const useSenseNovaProvider = (): ProviderItem => {
+ const { t } = useTranslation('modelProvider');
+
+ return {
+ ...SenseNovaProviderCard,
+ apiKeyItems: [
+ {
+ children: (
+
+ ),
+ desc: t(`${providerKey}.sensenovaAccessKeyID.desc`),
+ label: t(`${providerKey}.sensenovaAccessKeyID.title`),
+ name: [KeyVaultsConfigKey, providerKey, 'sensenovaAccessKeyID'],
+ },
+ {
+ children: (
+
+ ),
+ desc: t(`${providerKey}.sensenovaAccessKeySecret.desc`),
+ label: t(`${providerKey}.sensenovaAccessKeySecret.title`),
+ name: [KeyVaultsConfigKey, providerKey, 'sensenovaAccessKeySecret'],
+ },
+ ],
+ };
+};
diff --git a/src/app/(main)/settings/llm/ProviderList/providers.tsx b/src/app/(main)/settings/llm/ProviderList/providers.tsx
index 3025c6b5fa39..a2e24524f98e 100644
--- a/src/app/(main)/settings/llm/ProviderList/providers.tsx
+++ b/src/app/(main)/settings/llm/ProviderList/providers.tsx
@@ -35,6 +35,7 @@ import { useHuggingFaceProvider } from './HuggingFace';
import { useOllamaProvider } from './Ollama';
import { useOpenAIProvider } from './OpenAI';
import { useWenxinProvider } from './Wenxin';
+import { useSenseNovaProvider } from './SenseNova';
export const useProviderList = (): ProviderItem[] => {
const AzureProvider = useAzureProvider();
@@ -44,6 +45,7 @@ export const useProviderList = (): ProviderItem[] => {
const GithubProvider = useGithubProvider();
const HuggingFaceProvider = useHuggingFaceProvider();
const WenxinProvider = useWenxinProvider();
+ const SenseNovaProvider = useSenseNovaProvider();
return useMemo(
() => [
@@ -71,6 +73,7 @@ export const useProviderList = (): ProviderItem[] => {
SparkProviderCard,
ZhiPuProviderCard,
ZeroOneProviderCard,
+ SenseNovaProvider,
StepfunProviderCard,
MoonshotProviderCard,
BaichuanProviderCard,
@@ -87,6 +90,7 @@ export const useProviderList = (): ProviderItem[] => {
GithubProvider,
WenxinProvider,
HuggingFaceProvider,
+ SenseNovaProvider,
],
);
};
diff --git a/src/config/llm.ts b/src/config/llm.ts
index 03dc70c2eeca..8060a708198f 100644
--- a/src/config/llm.ts
+++ b/src/config/llm.ts
@@ -144,6 +144,11 @@ export const getLLMConfig = () => {
HUGGINGFACE_API_KEY: z.string().optional(),
HUGGINGFACE_PROXY_URL: z.string().optional(),
HUGGINGFACE_MODEL_LIST: z.string().optional(),
+
+ ENABLED_SENSENOVA: z.boolean(),
+ SENSENOVA_ACCESS_KEY_ID: z.string().optional(),
+ SENSENOVA_ACCESS_KEY_SECRET: z.string().optional(),
+ SENSENOVA_MODEL_LIST: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -285,6 +290,11 @@ export const getLLMConfig = () => {
HUGGINGFACE_API_KEY: process.env.HUGGINGFACE_API_KEY,
HUGGINGFACE_PROXY_URL: process.env.HUGGINGFACE_PROXY_URL,
HUGGINGFACE_MODEL_LIST: process.env.HUGGINGFACE_MODEL_LIST,
+
+ ENABLED_SENSENOVA: !!process.env.SENSENOVA_ACCESS_KEY_ID && !!process.env.SENSENOVA_ACCESS_KEY_SECRET,
+ SENSENOVA_ACCESS_KEY_ID: process.env.SENSENOVA_ACCESS_KEY_ID,
+ SENSENOVA_ACCESS_KEY_SECRET: process.env.SENSENOVA_ACCESS_KEY_SECRET,
+ SENSENOVA_MODEL_LIST: process.env.SENSENOVA_MODEL_LIST,
},
});
};
diff --git a/src/config/modelProviders/index.ts b/src/config/modelProviders/index.ts
index 71cd88027e9d..2237ef877b7c 100644
--- a/src/config/modelProviders/index.ts
+++ b/src/config/modelProviders/index.ts
@@ -22,6 +22,7 @@ import OpenAIProvider from './openai';
import OpenRouterProvider from './openrouter';
import PerplexityProvider from './perplexity';
import QwenProvider from './qwen';
+import SenseNovaProvider from './sensenova';
import SiliconCloudProvider from './siliconcloud';
import SparkProvider from './spark';
import StepfunProvider from './stepfun';
@@ -63,6 +64,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
Ai21Provider.chatModels,
HunyuanProvider.chatModels,
WenxinProvider.chatModels,
+ SenseNovaProvider.chatModels,
].flat();
export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -90,6 +92,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
SparkProvider,
ZhiPuProvider,
ZeroOneProvider,
+ SenseNovaProvider,
StepfunProvider,
MoonshotProvider,
BaichuanProvider,
@@ -130,6 +133,7 @@ export { default as OpenAIProviderCard } from './openai';
export { default as OpenRouterProviderCard } from './openrouter';
export { default as PerplexityProviderCard } from './perplexity';
export { default as QwenProviderCard } from './qwen';
+export { default as SenseNovaProviderCard } from './sensenova';
export { default as SiliconCloudProviderCard } from './siliconcloud';
export { default as SparkProviderCard } from './spark';
export { default as StepfunProviderCard } from './stepfun';
diff --git a/src/config/modelProviders/sensenova.ts b/src/config/modelProviders/sensenova.ts
new file mode 100644
index 000000000000..fe4965a6897b
--- /dev/null
+++ b/src/config/modelProviders/sensenova.ts
@@ -0,0 +1,124 @@
+import { ModelProviderCard } from '@/types/llm';
+
+// ref https://platform.sensenova.cn/pricing
+// ref https://platform.sensenova.cn/release?path=/release-202409.md
+const SenseNova: ModelProviderCard = {
+ chatModels: [
+ {
+ description: '最新版本模型 (V5.5),128K上下文长度,在数学推理、英文对话、指令跟随以及长文本理解等领域能力显著提升,比肩GPT-4o',
+ displayName: 'SenseChat 5.5',
+ enabled: true,
+ functionCall: true,
+ id: 'SenseChat-5',
+ pricing: {
+ currency: 'CNY',
+ input: 40,
+ output: 100,
+ },
+ tokens: 131_072,
+ },
+ {
+ description: '最新版本模型 (V5.5),16K上下文长度,支持多图的输入,全面实现模型基础能力优化,在对象属性识别、空间关系、动作事件识别、场景理解、情感识别、逻辑常识推理和文本理解生成上都实现了较大提升。',
+ displayName: 'SenseChat 5.5 Vision',
+ enabled: true,
+ id: 'SenseChat-Vision',
+ pricing: {
+ currency: 'CNY',
+ input: 100,
+ output: 100,
+ },
+ tokens: 16_384,
+ vision: true,
+ },
+ {
+ description: '适用于快速问答、模型微调场景',
+ displayName: 'SenseChat 5.0 Turbo',
+ enabled: true,
+ id: 'SenseChat-Turbo',
+ pricing: {
+ currency: 'CNY',
+ input: 2,
+ output: 5,
+ },
+ tokens: 32_768,
+ },
+ {
+ description: '32K上下文长度,在粤语的对话理解上超越了GPT-4,在知识、推理、数学及代码编写等多个领域均能与GPT-4 Turbo相媲美',
+ displayName: 'SenseChat 5.0 Cantonese',
+ id: 'SenseChat-5-Cantonese',
+ pricing: {
+ currency: 'CNY',
+ input: 27,
+ output: 27,
+ },
+ tokens: 32_768,
+ },
+ {
+ description: '基础版本模型 (V4),128K上下文长度,在长文本理解及生成等任务中表现出色',
+ displayName: 'SenseChat 4.0 128K',
+ enabled: true,
+ id: 'SenseChat-128K',
+ pricing: {
+ currency: 'CNY',
+ input: 60,
+ output: 60,
+ },
+ tokens: 131_072,
+ },
+ {
+ description: '基础版本模型 (V4),32K上下文长度,灵活应用于各类场景',
+ displayName: 'SenseChat 4.0 32K',
+ enabled: true,
+ id: 'SenseChat-32K',
+ pricing: {
+ currency: 'CNY',
+ input: 36,
+ output: 36,
+ },
+ tokens: 32_768,
+ },
+ {
+ description: '基础版本模型 (V4),4K上下文长度,通用能力强大',
+ displayName: 'SenseChat 4.0 4K',
+ enabled: true,
+ id: 'SenseChat',
+ pricing: {
+ currency: 'CNY',
+ input: 12,
+ output: 12,
+ },
+ tokens: 4096,
+ },
+ {
+ description: '标准版模型,8K上下文长度,高响应速度',
+ displayName: 'SenseChat Character',
+ id: 'SenseChat-Character',
+ pricing: {
+ currency: 'CNY',
+ input: 12,
+ output: 12,
+ },
+ tokens: 8192,
+ },
+ {
+ description: '高级版模型,32K上下文长度,能力全面提升,支持中/英文对话',
+ displayName: 'SenseChat Character Pro',
+ id: 'SenseChat-Character-Pro',
+ pricing: {
+ currency: 'CNY',
+ input: 15,
+ output: 15,
+ },
+ tokens: 32_768,
+ },
+ ],
+ checkModel: 'SenseChat-Turbo',
+ disableBrowserRequest: true,
+ id: 'sensenova',
+ modelList: { showModelFetcher: true },
+ modelsUrl: 'https://platform.sensenova.cn/pricing',
+ name: 'SenseNova',
+ url: 'https://platform.sensenova.cn/home',
+};
+
+export default SenseNova;
diff --git a/src/const/auth.ts b/src/const/auth.ts
index 643ee56ceefe..33c0180fc766 100644
--- a/src/const/auth.ts
+++ b/src/const/auth.ts
@@ -40,6 +40,9 @@ export interface JWTPayload {
wenxinAccessKey?: string;
wenxinSecretKey?: string;
+ sensenovaAccessKeyID?: string;
+ sensenovaAccessKeySecret?: string;
+
/**
* user id
* in client db mode it's a uuid
diff --git a/src/const/settings/llm.ts b/src/const/settings/llm.ts
index 276acbef243d..9c478db2e95b 100644
--- a/src/const/settings/llm.ts
+++ b/src/const/settings/llm.ts
@@ -20,6 +20,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
+ SenseNovaProviderCard,
SiliconCloudProviderCard,
SparkProviderCard,
StepfunProviderCard,
@@ -123,6 +124,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(QwenProviderCard),
},
+ sensenova: {
+ enabled: false,
+ enabledModels: filterEnabledModels(SenseNovaProviderCard),
+ },
siliconcloud: {
enabled: false,
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
diff --git a/src/features/Conversation/Error/APIKeyForm/SenseNova.tsx b/src/features/Conversation/Error/APIKeyForm/SenseNova.tsx
new file mode 100644
index 000000000000..dbf970b1c6d6
--- /dev/null
+++ b/src/features/Conversation/Error/APIKeyForm/SenseNova.tsx
@@ -0,0 +1,49 @@
+import { SenseNova } from '@lobehub/icons';
+import { Input } from 'antd';
+import { memo } from 'react';
+import { useTranslation } from 'react-i18next';
+
+import { ModelProvider } from '@/libs/agent-runtime';
+import { useUserStore } from '@/store/user';
+import { keyVaultsConfigSelectors } from '@/store/user/selectors';
+
+import { FormAction } from '../style';
+
+const SenseNovaForm = memo(() => {
+ const { t } = useTranslation('modelProvider');
+
+ const [sensenovaAccessKeyID, sensenovaAccessKeySecret, setConfig] = useUserStore((s) => [
+ keyVaultsConfigSelectors.sensenovaConfig(s).sensenovaAccessKeyID,
+ keyVaultsConfigSelectors.sensenovaConfig(s).sensenovaAccessKeySecret,
+ s.updateKeyVaultConfig,
+ ]);
+
+ return (
+ }
+ description={t('sensenova.unlock.description')}
+ title={t('sensenova.unlock.title')}
+ >
+ {
+ setConfig(ModelProvider.SenseNova, { sensenovaAccessKeyID: e.target.value });
+ }}
+ placeholder={t('sensenova.sensenovaAccessKeyID.placeholder')}
+ type={'block'}
+ value={sensenovaAccessKeyID}
+ />
+ {
+ setConfig(ModelProvider.SenseNova, { sensenovaAccessKeySecret: e.target.value });
+ }}
+ placeholder={t('sensenova.sensenovaAccessKeySecret.placeholder')}
+ type={'block'}
+ value={sensenovaAccessKeySecret}
+ />
+
+ );
+});
+
+export default SenseNovaForm;
diff --git a/src/features/Conversation/Error/APIKeyForm/index.tsx b/src/features/Conversation/Error/APIKeyForm/index.tsx
index 5ba78f4f0ba3..7b53b69d8945 100644
--- a/src/features/Conversation/Error/APIKeyForm/index.tsx
+++ b/src/features/Conversation/Error/APIKeyForm/index.tsx
@@ -10,6 +10,7 @@ import { GlobalLLMProviderKey } from '@/types/user/settings';
import BedrockForm from './Bedrock';
import ProviderApiKeyForm from './ProviderApiKeyForm';
+import SenseNovaForm from './SenseNova';
import WenxinForm from './Wenxin';
interface APIKeyFormProps {
@@ -66,6 +67,8 @@ const APIKeyForm = memo(({ id, provider }) => {
{provider === ModelProvider.Bedrock ? (
+ ) : provider === ModelProvider.SenseNova ? (
+
) : provider === ModelProvider.Wenxin ? (
) : (
diff --git a/src/libs/agent-runtime/AgentRuntime.ts b/src/libs/agent-runtime/AgentRuntime.ts
index a55a811e56b0..ed6bc9ae16d3 100644
--- a/src/libs/agent-runtime/AgentRuntime.ts
+++ b/src/libs/agent-runtime/AgentRuntime.ts
@@ -25,6 +25,7 @@ import { LobeOpenAI } from './openai';
import { LobeOpenRouterAI } from './openrouter';
import { LobePerplexityAI } from './perplexity';
import { LobeQwenAI } from './qwen';
+import { LobeSenseNovaAI } from './sensenova';
import { LobeSiliconCloudAI } from './siliconcloud';
import { LobeSparkAI } from './spark';
import { LobeStepfunAI } from './stepfun';
@@ -146,6 +147,7 @@ class AgentRuntime {
openrouter: Partial;
perplexity: Partial;
qwen: Partial;
+ sensenova: Partial;
siliconcloud: Partial;
spark: Partial;
stepfun: Partial;
@@ -314,6 +316,11 @@ class AgentRuntime {
runtimeModel = new LobeHunyuanAI(params.hunyuan);
break;
}
+
+ case ModelProvider.SenseNova: {
+ runtimeModel = await LobeSenseNovaAI.fromAPIKey(params.sensenova);
+ break;
+ }
}
return new AgentRuntime(runtimeModel);
diff --git a/src/libs/agent-runtime/index.ts b/src/libs/agent-runtime/index.ts
index 308cd40ca452..5776b9451e2c 100644
--- a/src/libs/agent-runtime/index.ts
+++ b/src/libs/agent-runtime/index.ts
@@ -15,6 +15,7 @@ export { LobeOpenAI } from './openai';
export { LobeOpenRouterAI } from './openrouter';
export { LobePerplexityAI } from './perplexity';
export { LobeQwenAI } from './qwen';
+export { LobeSenseNovaAI } from './sensenova';
export { LobeTogetherAI } from './togetherai';
export * from './types';
export { AgentRuntimeError } from './utils/createError';
diff --git a/src/libs/agent-runtime/sensenova/authToken.test.ts b/src/libs/agent-runtime/sensenova/authToken.test.ts
new file mode 100644
index 000000000000..1539d5017b65
--- /dev/null
+++ b/src/libs/agent-runtime/sensenova/authToken.test.ts
@@ -0,0 +1,18 @@
+// @vitest-environment node
+import { generateApiToken } from './authToken';
+
+describe('generateApiToken', () => {
+ it('should throw an error if no apiKey is provided', async () => {
+ await expect(generateApiToken()).rejects.toThrow('Invalid apiKey');
+ });
+
+ it('should throw an error if apiKey is invalid', async () => {
+ await expect(generateApiToken('invalid')).rejects.toThrow('Invalid apiKey');
+ });
+
+ it('should return a token if a valid apiKey is provided', async () => {
+ const apiKey = 'id:secret';
+ const token = await generateApiToken(apiKey);
+ expect(token).toBeDefined();
+ });
+});
diff --git a/src/libs/agent-runtime/sensenova/authToken.ts b/src/libs/agent-runtime/sensenova/authToken.ts
new file mode 100644
index 000000000000..74bb32d9e7e7
--- /dev/null
+++ b/src/libs/agent-runtime/sensenova/authToken.ts
@@ -0,0 +1,27 @@
+import { SignJWT } from 'jose';
+
+// https://console.sensecore.cn/help/docs/model-as-a-service/nova/overview/Authorization
+export const generateApiToken = async (apiKey?: string): Promise => {
+ if (!apiKey) {
+ throw new Error('Invalid apiKey');
+ }
+
+ const [id, secret] = apiKey.split(':');
+ if (!id || !secret) {
+ throw new Error('Invalid apiKey');
+ }
+
+ const currentTime = Math.floor(Date.now() / 1000);
+
+ const payload = {
+ exp: currentTime + 1800,
+ iss: id,
+ nbf: currentTime - 5,
+ };
+
+ const jwt = await new SignJWT(payload)
+ .setProtectedHeader({ alg: 'HS256', typ: 'JWT' })
+ .sign(new TextEncoder().encode(secret));
+
+ return jwt;
+};
diff --git a/src/libs/agent-runtime/sensenova/index.test.ts b/src/libs/agent-runtime/sensenova/index.test.ts
new file mode 100644
index 000000000000..08760c369373
--- /dev/null
+++ b/src/libs/agent-runtime/sensenova/index.test.ts
@@ -0,0 +1,321 @@
+// @vitest-environment node
+import { OpenAI } from 'openai';
+import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+
+import { ChatStreamCallbacks, LobeOpenAI } from '@/libs/agent-runtime';
+import * as debugStreamModule from '@/libs/agent-runtime/utils/debugStream';
+
+import * as authTokenModule from './authToken';
+import { LobeSenseNovaAI } from './index';
+
+const bizErrorType = 'ProviderBizError';
+const invalidErrorType = 'InvalidProviderAPIKey';
+
+// Mock相关依赖
+vi.mock('./authToken');
+
+describe('LobeSenseNovaAI', () => {
+ beforeEach(() => {
+ // Mock generateApiToken
+ vi.spyOn(authTokenModule, 'generateApiToken').mockResolvedValue('mocked_token');
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ describe('fromAPIKey', () => {
+ it('should correctly initialize with an API key', async () => {
+ const lobeSenseNovaAI = await LobeSenseNovaAI.fromAPIKey({ apiKey: 'test_api_key' });
+ expect(lobeSenseNovaAI).toBeInstanceOf(LobeSenseNovaAI);
+ expect(lobeSenseNovaAI.baseURL).toEqual('https://api.sensenova.cn/compatible-mode/v1');
+ });
+
+ it('should throw an error if API key is invalid', async () => {
+ vi.spyOn(authTokenModule, 'generateApiToken').mockRejectedValue(new Error('Invalid API Key'));
+ try {
+ await LobeSenseNovaAI.fromAPIKey({ apiKey: 'asd' });
+ } catch (e) {
+ expect(e).toEqual({ errorType: invalidErrorType });
+ }
+ });
+ });
+
+ describe('chat', () => {
+ let instance: LobeSenseNovaAI;
+
+ beforeEach(async () => {
+ instance = await LobeSenseNovaAI.fromAPIKey({
+ apiKey: 'test_api_key',
+ });
+
+ // Mock chat.completions.create
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
+ new ReadableStream() as any,
+ );
+ });
+
+ it('should return a StreamingTextResponse on successful API call', async () => {
+ const result = await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0,
+ });
+ expect(result).toBeInstanceOf(Response);
+ });
+
+ it('should handle callback and headers correctly', async () => {
+ // 模拟 chat.completions.create 方法返回一个可读流
+ const mockCreateMethod = vi
+ .spyOn(instance['client'].chat.completions, 'create')
+ .mockResolvedValue(
+ new ReadableStream({
+ start(controller) {
+ controller.enqueue({
+ id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
+ object: 'chat.completion.chunk',
+ created: 1709125675,
+ model: 'gpt-3.5-turbo-0125',
+ system_fingerprint: 'fp_86156a94a0',
+ choices: [
+ { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
+ ],
+ });
+ controller.close();
+ },
+ }) as any,
+ );
+
+ // 准备 callback 和 headers
+ const mockCallback: ChatStreamCallbacks = {
+ onStart: vi.fn(),
+ onToken: vi.fn(),
+ };
+ const mockHeaders = { 'Custom-Header': 'TestValue' };
+
+ // 执行测试
+ const result = await instance.chat(
+ {
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0,
+ },
+ { callback: mockCallback, headers: mockHeaders },
+ );
+
+ // 验证 callback 被调用
+ await result.text(); // 确保流被消费
+
+ // 验证 headers 被正确传递
+ expect(result.headers.get('Custom-Header')).toEqual('TestValue');
+
+ // 清理
+ mockCreateMethod.mockRestore();
+ });
+
+ it('should transform messages correctly', async () => {
+ const spyOn = vi.spyOn(instance['client'].chat.completions, 'create');
+
+ await instance.chat({
+ frequency_penalty: 0,
+ messages: [
+ { content: 'Hello', role: 'user' },
+ { content: [{ type: 'text', text: 'Hello again' }], role: 'user' },
+ ],
+ model: 'SenseChat',
+ temperature: 0,
+ top_p: 1,
+ });
+
+ const calledWithParams = spyOn.mock.calls[0][0];
+
+ expect(calledWithParams.frequency_penalty).toBeUndefined(); // frequency_penalty 0 should be undefined
+ expect(calledWithParams.messages[1].content).toEqual([{ type: 'text', text: 'Hello again' }]);
+ expect(calledWithParams.temperature).toBeUndefined(); // temperature 0 should be undefined
+ expect(calledWithParams.top_p).toBeUndefined(); // top_p 1 should be undefined
+ });
+
+ describe('Error', () => {
+ it('should return SenseNovaAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
+ // Arrange
+ const apiError = new OpenAI.APIError(
+ 400,
+ {
+ status: 400,
+ error: {
+ message: 'Bad Request',
+ },
+ },
+ 'Error message',
+ {},
+ );
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
+ error: {
+ error: { message: 'Bad Request' },
+ status: 400,
+ },
+ errorType: bizErrorType,
+ provider: 'sensenova',
+ });
+ }
+ });
+
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
+ try {
+ await LobeSenseNovaAI.fromAPIKey({ apiKey: '' });
+ } catch (e) {
+ expect(e).toEqual({ errorType: invalidErrorType });
+ }
+ });
+
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
+ // Arrange
+ const errorInfo = {
+ stack: 'abc',
+ cause: {
+ message: 'api is undefined',
+ },
+ };
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0.2,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
+ error: {
+ cause: { message: 'api is undefined' },
+ stack: 'abc',
+ },
+ errorType: bizErrorType,
+ provider: 'sensenova',
+ });
+ }
+ });
+
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
+ // Arrange
+ const errorInfo = {
+ stack: 'abc',
+ cause: { message: 'api is undefined' },
+ };
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
+
+ instance = await LobeSenseNovaAI.fromAPIKey({
+ apiKey: 'test',
+
+ baseURL: 'https://abc.com/v2',
+ });
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'gpt-3.5-turbo',
+ temperature: 0,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: 'https://***.com/v2',
+ error: {
+ cause: { message: 'api is undefined' },
+ stack: 'abc',
+ },
+ errorType: bizErrorType,
+ provider: 'sensenova',
+ });
+ }
+ });
+
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
+ // Arrange
+ const genericError = new Error('Generic Error');
+
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
+
+ // Act
+ try {
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0,
+ });
+ } catch (e) {
+ expect(e).toEqual({
+ endpoint: 'https://api.sensenova.cn/compatible-mode/v1',
+ errorType: 'AgentRuntimeError',
+ provider: 'sensenova',
+ error: {
+ name: genericError.name,
+ cause: genericError.cause,
+ message: genericError.message,
+ stack: genericError.stack,
+ },
+ });
+ }
+ });
+ });
+
+ describe('DEBUG', () => {
+ it('should call debugStream and return StreamingTextResponse when DEBUG_OPENAI_CHAT_COMPLETION is 1', async () => {
+ // Arrange
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
+ const mockDebugStream = new ReadableStream({
+ start(controller) {
+ controller.enqueue('Debug stream content');
+ controller.close();
+ },
+ }) as any;
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
+
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
+ });
+
+ // 保存原始环境变量值
+ const originalDebugValue = process.env.DEBUG_SENSENOVA_CHAT_COMPLETION;
+
+ // 模拟环境变量
+ process.env.DEBUG_SENSENOVA_CHAT_COMPLETION = '1';
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
+
+ // 执行测试
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
+ // 假设的测试函数调用,你可能需要根据实际情况调整
+ await instance.chat({
+ messages: [{ content: 'Hello', role: 'user' }],
+ model: 'SenseChat',
+ temperature: 0,
+ });
+
+ // 验证 debugStream 被调用
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
+
+ // 恢复原始环境变量值
+ process.env.DEBUG_SENSENOVA_CHAT_COMPLETION = originalDebugValue;
+ });
+ });
+ });
+});
diff --git a/src/libs/agent-runtime/sensenova/index.ts b/src/libs/agent-runtime/sensenova/index.ts
new file mode 100644
index 000000000000..59e1f592b772
--- /dev/null
+++ b/src/libs/agent-runtime/sensenova/index.ts
@@ -0,0 +1,98 @@
+import OpenAI, { ClientOptions } from 'openai';
+
+import { LobeRuntimeAI } from '../BaseAI';
+import { AgentRuntimeErrorType } from '../error';
+import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
+import { AgentRuntimeError } from '../utils/createError';
+import { debugStream } from '../utils/debugStream';
+import { desensitizeUrl } from '../utils/desensitizeUrl';
+import { handleOpenAIError } from '../utils/handleOpenAIError';
+import { convertOpenAIMessages } from '../utils/openaiHelpers';
+import { StreamingResponse } from '../utils/response';
+import { OpenAIStream } from '../utils/streams';
+import { generateApiToken } from './authToken';
+
+const DEFAULT_BASE_URL = 'https://api.sensenova.cn/compatible-mode/v1';
+
+export class LobeSenseNovaAI implements LobeRuntimeAI {
+ private client: OpenAI;
+
+ baseURL: string;
+
+ constructor(oai: OpenAI) {
+ this.client = oai;
+ this.baseURL = this.client.baseURL;
+ }
+
+ static async fromAPIKey({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions = {}) {
+ const invalidSenseNovaAPIKey = AgentRuntimeError.createError(
+ AgentRuntimeErrorType.InvalidProviderAPIKey,
+ );
+
+ if (!apiKey) throw invalidSenseNovaAPIKey;
+
+ let token: string;
+
+ try {
+ token = await generateApiToken(apiKey);
+ } catch {
+ throw invalidSenseNovaAPIKey;
+ }
+
+ const header = { Authorization: `Bearer ${token}` };
+
+ const llm = new OpenAI({ apiKey, baseURL, defaultHeaders: header, ...res });
+
+ return new LobeSenseNovaAI(llm);
+ }
+
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
+ try {
+ const params = await this.buildCompletionsParams(payload);
+
+ const response = await this.client.chat.completions.create(
+ params as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
+ );
+
+ const [prod, debug] = response.tee();
+
+ if (process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1') {
+ debugStream(debug.toReadableStream()).catch(console.error);
+ }
+
+ return StreamingResponse(OpenAIStream(prod), {
+ headers: options?.headers,
+ });
+ } catch (error) {
+ const { errorResult, RuntimeError } = handleOpenAIError(error);
+
+ const errorType = RuntimeError || AgentRuntimeErrorType.ProviderBizError;
+ let desensitizedEndpoint = this.baseURL;
+
+ if (this.baseURL !== DEFAULT_BASE_URL) {
+ desensitizedEndpoint = desensitizeUrl(this.baseURL);
+ }
+ throw AgentRuntimeError.chat({
+ endpoint: desensitizedEndpoint,
+ error: errorResult,
+ errorType,
+ provider: ModelProvider.SenseNova,
+ });
+ }
+ }
+
+ private async buildCompletionsParams(payload: ChatStreamPayload) {
+ const { frequency_penalty, messages, temperature, top_p, ...params } = payload;
+
+ return {
+ messages: await convertOpenAIMessages(messages as any),
+ ...params,
+ frequency_penalty: (frequency_penalty !== undefined && frequency_penalty > 0 && frequency_penalty <= 2) ? frequency_penalty : undefined,
+ stream: true,
+ temperature: (temperature !== undefined && temperature > 0 && temperature <= 2) ? temperature : undefined,
+ top_p: (top_p !== undefined && top_p > 0 && top_p < 1) ? top_p : undefined,
+ };
+ }
+}
+
+export default LobeSenseNovaAI;
diff --git a/src/libs/agent-runtime/types/type.ts b/src/libs/agent-runtime/types/type.ts
index c870a9c462a8..db64c94f23fb 100644
--- a/src/libs/agent-runtime/types/type.ts
+++ b/src/libs/agent-runtime/types/type.ts
@@ -44,6 +44,7 @@ export enum ModelProvider {
OpenRouter = 'openrouter',
Perplexity = 'perplexity',
Qwen = 'qwen',
+ SenseNova = 'sensenova',
SiliconCloud = 'siliconcloud',
Spark = 'spark',
Stepfun = 'stepfun',
diff --git a/src/locales/default/modelProvider.ts b/src/locales/default/modelProvider.ts
index 30e0a78ca161..f3ba99530b4a 100644
--- a/src/locales/default/modelProvider.ts
+++ b/src/locales/default/modelProvider.ts
@@ -122,6 +122,23 @@ export default {
title: '下载指定的 Ollama 模型',
},
},
+ sensenova: {
+ sensenovaAccessKeyID: {
+ desc: '填入 SenseNova Access Key ID',
+ placeholder: 'SenseNova Access Key ID',
+ title: 'Access Key ID',
+ },
+ sensenovaAccessKeySecret: {
+ desc: '填入 SenseNova Access Key Secret',
+ placeholder: 'SenseNova Access Key Secret',
+ title: 'Access Key Secret',
+ },
+ unlock: {
+ description:
+ '输入你的 Access Key ID / Access Key Secret 即可开始会话。应用不会记录你的鉴权配置',
+ title: '使用自定义 SenseNova 鉴权信息',
+ },
+ },
wenxin: {
accessKey: {
desc: '填入百度千帆平台的 Access Key',
diff --git a/src/server/globalConfig/index.ts b/src/server/globalConfig/index.ts
index 5705435c8c57..09b458389310 100644
--- a/src/server/globalConfig/index.ts
+++ b/src/server/globalConfig/index.ts
@@ -25,6 +25,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
+ SenseNovaProviderCard,
SiliconCloudProviderCard,
SparkProviderCard,
StepfunProviderCard,
@@ -106,6 +107,9 @@ export const getServerGlobalConfig = () => {
ENABLED_AI360,
AI360_MODEL_LIST,
+ ENABLED_SENSENOVA,
+ SENSENOVA_MODEL_LIST,
+
ENABLED_SILICONCLOUD,
SILICONCLOUD_MODEL_LIST,
@@ -327,6 +331,14 @@ export const getServerGlobalConfig = () => {
modelString: QWEN_MODEL_LIST,
}),
},
+ sensenova: {
+ enabled: ENABLED_SENSENOVA,
+ enabledModels: extractEnabledModels(SENSENOVA_MODEL_LIST),
+ serverModelCards: transformToChatModelCards({
+ defaultChatModels: SenseNovaProviderCard.chatModels,
+ modelString: SENSENOVA_MODEL_LIST,
+ }),
+ },
siliconcloud: {
enabled: ENABLED_SILICONCLOUD,
enabledModels: extractEnabledModels(SILICONCLOUD_MODEL_LIST),
diff --git a/src/server/modules/AgentRuntime/index.ts b/src/server/modules/AgentRuntime/index.ts
index 69998915c6d4..2043e97423fe 100644
--- a/src/server/modules/AgentRuntime/index.ts
+++ b/src/server/modules/AgentRuntime/index.ts
@@ -261,6 +261,16 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
const apiKey = apiKeyManager.pick(payload?.apiKey || HUNYUAN_API_KEY);
+ return { apiKey };
+ }
+ case ModelProvider.SenseNova: {
+ const { SENSENOVA_ACCESS_KEY_ID, SENSENOVA_ACCESS_KEY_SECRET } = getLLMConfig();
+
+ const sensenovaAccessKeyID = apiKeyManager.pick(payload?.sensenovaAccessKeyID || SENSENOVA_ACCESS_KEY_ID);
+ const sensenovaAccessKeySecret = apiKeyManager.pick(payload?.sensenovaAccessKeySecret || SENSENOVA_ACCESS_KEY_SECRET);
+
+ const apiKey = sensenovaAccessKeyID + ':' + sensenovaAccessKeySecret;
+
return { apiKey };
}
}
diff --git a/src/services/_auth.ts b/src/services/_auth.ts
index 7ecfa4f64c13..7795290e7791 100644
--- a/src/services/_auth.ts
+++ b/src/services/_auth.ts
@@ -25,6 +25,20 @@ export const getProviderAuthPayload = (provider: string) => {
};
}
+ case ModelProvider.SenseNova: {
+ const { sensenovaAccessKeyID, sensenovaAccessKeySecret } = keyVaultsConfigSelectors.sensenovaConfig(
+ useUserStore.getState(),
+ );
+
+ const apiKey = (sensenovaAccessKeyID || '') + ':' + (sensenovaAccessKeySecret || '')
+
+ return {
+ apiKey,
+ sensenovaAccessKeyID: sensenovaAccessKeyID,
+ sensenovaAccessKeySecret: sensenovaAccessKeySecret,
+ };
+ }
+
case ModelProvider.Wenxin: {
const { secretKey, accessKey } = keyVaultsConfigSelectors.wenxinConfig(
useUserStore.getState(),
diff --git a/src/store/user/slices/modelList/selectors/keyVaults.ts b/src/store/user/slices/modelList/selectors/keyVaults.ts
index 8a038f8d4919..6f5047d55481 100644
--- a/src/store/user/slices/modelList/selectors/keyVaults.ts
+++ b/src/store/user/slices/modelList/selectors/keyVaults.ts
@@ -16,6 +16,7 @@ const openAIConfig = (s: UserStore) => keyVaultsSettings(s).openai || {};
const bedrockConfig = (s: UserStore) => keyVaultsSettings(s).bedrock || {};
const wenxinConfig = (s: UserStore) => keyVaultsSettings(s).wenxin || {};
const ollamaConfig = (s: UserStore) => keyVaultsSettings(s).ollama || {};
+const sensenovaConfig = (s: UserStore) => keyVaultsSettings(s).sensenova || {};
const azureConfig = (s: UserStore) => keyVaultsSettings(s).azure || {};
const getVaultByProvider = (provider: GlobalLLMProviderKey) => (s: UserStore) =>
(keyVaultsSettings(s)[provider] || {}) as OpenAICompatibleKeyVault &
@@ -43,5 +44,6 @@ export const keyVaultsConfigSelectors = {
ollamaConfig,
openAIConfig,
password,
+ sensenovaConfig,
wenxinConfig,
};
diff --git a/src/store/user/slices/modelList/selectors/modelConfig.ts b/src/store/user/slices/modelList/selectors/modelConfig.ts
index 8e2acb1421ca..a41e14a9ab8c 100644
--- a/src/store/user/slices/modelList/selectors/modelConfig.ts
+++ b/src/store/user/slices/modelList/selectors/modelConfig.ts
@@ -69,6 +69,7 @@ const openAIConfig = (s: UserStore) => currentLLMSettings(s).openai;
const bedrockConfig = (s: UserStore) => currentLLMSettings(s).bedrock;
const ollamaConfig = (s: UserStore) => currentLLMSettings(s).ollama;
const azureConfig = (s: UserStore) => currentLLMSettings(s).azure;
+const sensenovaConfig = (s: UserStore) => currentLLMSettings(s).sensenova;
const isAzureEnabled = (s: UserStore) => currentLLMSettings(s).azure.enabled;
@@ -86,4 +87,5 @@ export const modelConfigSelectors = {
ollamaConfig,
openAIConfig,
+ sensenovaConfig,
};
diff --git a/src/types/user/settings/keyVaults.ts b/src/types/user/settings/keyVaults.ts
index 518bf7bafdb7..8ff980fa055f 100644
--- a/src/types/user/settings/keyVaults.ts
+++ b/src/types/user/settings/keyVaults.ts
@@ -16,6 +16,11 @@ export interface AWSBedrockKeyVault {
sessionToken?: string;
}
+export interface SenseNovaKeyVault {
+ sensenovaAccessKeyID?: string;
+ sensenovaAccessKeySecret?: string;
+}
+
export interface WenxinKeyVault {
accessKey?: string;
secretKey?: string;
@@ -46,6 +51,7 @@ export interface UserKeyVaults {
password?: string;
perplexity?: OpenAICompatibleKeyVault;
qwen?: OpenAICompatibleKeyVault;
+ sensenova?: SenseNovaKeyVault;
siliconcloud?: OpenAICompatibleKeyVault;
spark?: OpenAICompatibleKeyVault;
stepfun?: OpenAICompatibleKeyVault;