diff --git a/.github/workflows/_comps-workflow.yml b/.github/workflows/_comps-workflow.yml index 31116bd46..944da7c5a 100644 --- a/.github/workflows/_comps-workflow.yml +++ b/.github/workflows/_comps-workflow.yml @@ -71,7 +71,6 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git - # sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt fi - name: Get build list id: get-build-list diff --git a/.github/workflows/manual-comps-test.yml b/.github/workflows/manual-comps-test.yml index b0d9e57b8..07b66b975 100644 --- a/.github/workflows/manual-comps-test.yml +++ b/.github/workflows/manual-comps-test.yml @@ -7,7 +7,7 @@ on: inputs: services: default: "asr" - description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,web_retrievers]" + description: "List of services to test [agent,asr,chathistory,animation,dataprep,embeddings,feedback_management,finetuning,guardrails,image2image,image2video,intent_detection,llms,lvms,prompt_registry,ragas,rerankings,retrievers,text2image,text2sql,third_parties,tts,vectorstores,web_retrievers]" required: true type: string build: diff --git a/.github/workflows/manual-docker-publish.yml b/.github/workflows/manual-docker-publish.yml index b7e770ded..aae3d3ca8 100644 --- a/.github/workflows/manual-docker-publish.yml +++ b/.github/workflows/manual-docker-publish.yml @@ -7,7 +7,7 @@ on: inputs: services: default: "" - description: "List of services to test [agent,asr,chathistory,dataprep,embeddings,feedback_management,finetuning,guardrails,knowledgegraphs,llms,lvms,nginx,prompt_registry,ragas,rerankings,retrievers,tts,web_retrievers]" + description: "List of services to test [agent,asr,chathistory,animation,dataprep,embeddings,feedback_management,finetuning,guardrails,image2image,image2video,intent_detection,llms,lvms,prompt_registry,ragas,rerankings,retrievers,text2image,text2sql,third_parties,tts,vectorstores,web_retrievers]" required: false type: string images: diff --git a/.github/workflows/push-image-build.yml b/.github/workflows/push-image-build.yml index fda152806..67389a3cd 100644 --- a/.github/workflows/push-image-build.yml +++ b/.github/workflows/push-image-build.yml @@ -96,7 +96,6 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git - sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt fi - name: Build Image diff --git a/comps/cores/mega/orchestrator.py b/comps/cores/mega/orchestrator.py index 97ee2a76b..2d1957b1b 100644 --- a/comps/cores/mega/orchestrator.py +++ b/comps/cores/mega/orchestrator.py @@ -28,15 +28,13 @@ class OrchestratorMetrics: - # Need an instance ID for metric prefix because: - # - Orchestror instances are not named - # - CI creates several orchestrator instances + # Need an static class-level ID for metric prefix because: # - Prometheus requires metrics (their names) to be unique _instance_id = 0 def __init__(self) -> None: - self._instance_id += 1 - if self._instance_id > 1: + OrchestratorMetrics._instance_id += 1 + if OrchestratorMetrics._instance_id > 1: self._prefix = f"megaservice{self._instance_id}" else: self._prefix = "megaservice" diff --git a/comps/llms/src/doc-summarization/requirements.txt b/comps/llms/src/doc-summarization/requirements.txt index 169461863..6bc1bb1e5 100644 --- a/comps/llms/src/doc-summarization/requirements.txt +++ b/comps/llms/src/doc-summarization/requirements.txt @@ -1,11 +1,11 @@ docarray[full] fastapi httpx==0.27.2 -huggingface_hub -langchain #==0.1.12 +huggingface_hub==0.27.1 +langchain==0.3.14 langchain-huggingface langchain-openai -langchain_community +langchain_community==0.3.14 langchainhub opentelemetry-api opentelemetry-exporter-otlp diff --git a/comps/llms/src/faq-generation/requirements.txt b/comps/llms/src/faq-generation/requirements.txt index 36257d393..037079294 100644 --- a/comps/llms/src/faq-generation/requirements.txt +++ b/comps/llms/src/faq-generation/requirements.txt @@ -1,10 +1,10 @@ docarray[full] fastapi -huggingface_hub -langchain +huggingface_hub==0.27.1 +langchain==0.3.14 langchain-huggingface langchain-openai -langchain_community +langchain_community==0.3.14 langchainhub opentelemetry-api opentelemetry-exporter-otlp diff --git a/comps/third_parties/vllm/src/build_docker_vllm.sh b/comps/third_parties/vllm/src/build_docker_vllm.sh index bd8df2e70..bec3a0c8f 100644 --- a/comps/third_parties/vllm/src/build_docker_vllm.sh +++ b/comps/third_parties/vllm/src/build_docker_vllm.sh @@ -38,7 +38,6 @@ if [ "$hw_mode" = "hpu" ]; then git clone https://github.com/HabanaAI/vllm-fork.git cd ./vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm-fork diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index 090d1ed33..2c1235472 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -57,7 +57,6 @@ function build_vllm_docker_images() { fi cd ./vllm-fork git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy if [ $? -ne 0 ]; then echo "opea/vllm-gaudi:comps failed" diff --git a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh index 92b29827f..d040f954a 100644 --- a/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh +++ b/tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh @@ -13,7 +13,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh index d9552e9a0..a6096bd30 100644 --- a/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh index 5d489b250..8607f2c55 100644 --- a/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh index 7c32a8977..ea8c9ee6c 100644 --- a/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh @@ -20,7 +20,6 @@ function build_docker_images() { git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ git checkout v0.6.4.post2+Gaudi-1.19.0 - sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail"