diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md index b45a496755..4d41a5cd31 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/README.md +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -81,7 +81,7 @@ export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export DOCSUM_LLM_SERVER_PORT="8008" export DOCSUM_BACKEND_SERVER_PORT="8888" export DOCSUM_FRONTEND_PORT="5173" -export DocSum_COMPONENT_NAME="OPEADocSum_TGI" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" ``` Note: Please replace with `host_ip` with your external IP address, do not use localhost. diff --git a/DocSum/docker_compose/set_env.sh b/DocSum/docker_compose/set_env.sh index 3307955cc8..f116a99c3a 100644 --- a/DocSum/docker_compose/set_env.sh +++ b/DocSum/docker_compose/set_env.sh @@ -20,4 +20,4 @@ export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum" export LLM_ENDPOINT_PORT=8008 export DOCSUM_PORT=9000 export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OPEADocSum_TGI" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 10e4d0c9fa..db0977b040 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -26,7 +26,7 @@ export no_proxy="${no_proxy},${host_ip}" export LLM_ENDPOINT_PORT=8008 export DOCSUM_PORT=9000 export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OPEADocSum_TGI" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" export LOGFLAG=True WORKPATH=$(dirname "$PWD") diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index dc0baa26cb..54935f2b78 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -32,7 +32,7 @@ export ASR_SERVICE_HOST_IP=${host_ip} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" export DOCSUM_CARD_ID="card1" export DOCSUM_RENDER_ID="renderD136" -export DocSum_COMPONENT_NAME="OPEADocSum_TGI" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" export LOGFLAG=True function build_docker_images() { diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index d353fcefdb..13036fc0db 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -26,7 +26,7 @@ export no_proxy="${no_proxy},${host_ip}" export LLM_ENDPOINT_PORT=8008 export DOCSUM_PORT=9000 export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OPEADocSum_TGI" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" export LOGFLAG=True WORKPATH=$(dirname "$PWD") diff --git a/FaqGen/docker_compose/amd/gpu/rocm/README.md b/FaqGen/docker_compose/amd/gpu/rocm/README.md index b677d78354..68d8e2212f 100644 --- a/FaqGen/docker_compose/amd/gpu/rocm/README.md +++ b/FaqGen/docker_compose/amd/gpu/rocm/README.md @@ -34,7 +34,7 @@ export FAQGEN_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export FAQGEN_BACKEND_SERVER_PORT=8888 export FAGGEN_UI_PORT=5173 export LLM_ENDPOINT="http://${HOST_IP}:${FAQGEN_TGI_SERVICE_PORT}" -export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" +export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" ``` Note: Please replace with `host_ip` with your external IP address, do not use localhost. diff --git a/FaqGen/docker_compose/intel/cpu/xeon/README.md b/FaqGen/docker_compose/intel/cpu/xeon/README.md index db1d8db5a1..a961a6aa98 100644 --- a/FaqGen/docker_compose/intel/cpu/xeon/README.md +++ b/FaqGen/docker_compose/intel/cpu/xeon/README.md @@ -77,7 +77,7 @@ export https_proxy=${your_http_proxy} export host_ip=${your_host_ip} export LLM_ENDPOINT_PORT=8008 export LLM_SERVICE_PORT=9000 -export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" +export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export MEGA_SERVICE_HOST_IP=${host_ip} diff --git a/FaqGen/docker_compose/intel/hpu/gaudi/README.md b/FaqGen/docker_compose/intel/hpu/gaudi/README.md index 69a2f2bd01..7364e92387 100644 --- a/FaqGen/docker_compose/intel/hpu/gaudi/README.md +++ b/FaqGen/docker_compose/intel/hpu/gaudi/README.md @@ -157,7 +157,7 @@ export https_proxy=${your_http_proxy} export host_ip=${your_host_ip} export LLM_ENDPOINT_PORT=8008 export LLM_SERVICE_PORT=9000 -export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" +export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} export MEGA_SERVICE_HOST_IP=${host_ip} diff --git a/FaqGen/tests/test_compose_on_gaudi.sh b/FaqGen/tests/test_compose_on_gaudi.sh index ba3a4a1605..95ed2950a0 100644 --- a/FaqGen/tests/test_compose_on_gaudi.sh +++ b/FaqGen/tests/test_compose_on_gaudi.sh @@ -31,7 +31,7 @@ function start_services() { export host_ip=${ip_address} export LLM_ENDPOINT_PORT=8008 - export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" + export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} diff --git a/FaqGen/tests/test_compose_on_rocm.sh b/FaqGen/tests/test_compose_on_rocm.sh index 4b9d940ae7..726c83461b 100644 --- a/FaqGen/tests/test_compose_on_rocm.sh +++ b/FaqGen/tests/test_compose_on_rocm.sh @@ -28,7 +28,7 @@ export MEGA_SERVICE_HOST_IP=${ip_address} export LLM_SERVICE_HOST_IP=${ip_address} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/faqgen" export PATH="~/miniconda3/bin:$PATH" -export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" +export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LOGFLAG=True function build_docker_images() { diff --git a/FaqGen/tests/test_compose_on_xeon.sh b/FaqGen/tests/test_compose_on_xeon.sh index f2ed53480e..e4409358d2 100755 --- a/FaqGen/tests/test_compose_on_xeon.sh +++ b/FaqGen/tests/test_compose_on_xeon.sh @@ -31,7 +31,7 @@ function start_services() { export host_ip=${ip_address} export LLM_ENDPOINT_PORT=8008 - export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" + export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} diff --git a/ProductivitySuite/tests/test_compose_on_xeon.sh b/ProductivitySuite/tests/test_compose_on_xeon.sh index f415923905..fa0b6e2a4a 100755 --- a/ProductivitySuite/tests/test_compose_on_xeon.sh +++ b/ProductivitySuite/tests/test_compose_on_xeon.sh @@ -80,7 +80,7 @@ function start_services() { export LLM_SERVER_PORT=9009 export PROMPT_COLLECTION_NAME="prompt" export host_ip=${ip_address} - export FAQGen_COMPONENT_NAME="OPEAFAQGen_TGI" + export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LOGFLAG=True # Start Docker Containers