From cf02ebaf6d87724646ca15eb20f1cb13630ba233 Mon Sep 17 00:00:00 2001 From: thepetk Date: Tue, 20 Aug 2024 09:48:01 +0100 Subject: [PATCH] Update SUPPORT_LLM default to false --- scripts/envs/audio-to-text | 1 - scripts/envs/base | 2 +- scripts/envs/chatbot | 3 +++ scripts/envs/codegen | 3 +++ scripts/envs/object-detection | 1 - 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/scripts/envs/audio-to-text b/scripts/envs/audio-to-text index 70739c29..3cfb1ef3 100644 --- a/scripts/envs/audio-to-text +++ b/scripts/envs/audio-to-text @@ -7,7 +7,6 @@ export INIT_CONTAINER="quay.io/redhat-ai-dev/whisper-small:latest" export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/whispercpp:latest" # model configurations -export SUPPORT_LLM=false export SUPPORT_ASR=true # for gitlab case, since gitlab does not have pipeline webhook pre-set to trigger the initial build diff --git a/scripts/envs/base b/scripts/envs/base index 5a8d7a5c..eacffddb 100644 --- a/scripts/envs/base +++ b/scripts/envs/base @@ -6,6 +6,6 @@ export MODEL_PATH="/model/model.file" export MODEL_SERVICE_PORT=8001 # model configurations -export SUPPORT_LLM=true +export SUPPORT_LLM=false export SUPPORT_ASR=false export SUPPORT_DETR=false \ No newline at end of file diff --git a/scripts/envs/chatbot b/scripts/envs/chatbot index 2a83a266..13fc35e3 100755 --- a/scripts/envs/chatbot +++ b/scripts/envs/chatbot @@ -10,5 +10,8 @@ export VLLM_CONTAINER="quay.io/rh-aiservices-bu/vllm-openai-ubi9:0.4.2" export VLLM_MODEL_NAME="instructlab/granite-7b-lab" export VLLM_MAX_MODEL_LEN=4096 +# model configurations +export SUPPORT_LLM=true + # for gitlab case, since gitlab does not have pipeline webhook pre-set to trigger the initial build export APP_INTERFACE_CONTAINER="quay.io/redhat-ai-dev/chatbot:latest" diff --git a/scripts/envs/codegen b/scripts/envs/codegen index 06a3f0a6..7f0683b5 100755 --- a/scripts/envs/codegen +++ b/scripts/envs/codegen @@ -10,5 +10,8 @@ export VLLM_CONTAINER="quay.io/rh-aiservices-bu/vllm-openai-ubi9:0.4.2" export VLLM_MODEL_NAME="Nondzu/Mistral-7B-code-16k-qlora" export VLLM_MAX_MODEL_LEN=6144 +# model configurations +export SUPPORT_LLM=true + # for gitlab case, since gitlab does not have pipeline webhook pre-set to trigger the initial build export APP_INTERFACE_CONTAINER="quay.io/redhat-ai-dev/codegen:latest" diff --git a/scripts/envs/object-detection b/scripts/envs/object-detection index 89c60d49..ea89e7fe 100755 --- a/scripts/envs/object-detection +++ b/scripts/envs/object-detection @@ -10,7 +10,6 @@ export MODEL_SERVICE_CONTAINER="quay.io/redhat-ai-dev/object_detection_python:la export MODEL_SERVICE_PORT=8000 # model configurations -export SUPPORT_LLM=false export SUPPORT_DETR=true # for gitlab case, since gitlab does not have pipeline webhook pre-set to trigger the initial build