Skip to content

Commit

Permalink
Merge branch 'main' into refactor_comps
Browse files Browse the repository at this point in the history
  • Loading branch information
letonghan authored Dec 27, 2024
2 parents 64c547e + 1040875 commit 9d9c919
Show file tree
Hide file tree
Showing 77 changed files with 2,340 additions and 1,344 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/docker/compose/animation-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@
services:
animation:
build:
dockerfile: comps/animation/wav2lip/Dockerfile
dockerfile: comps/animation/src/Dockerfile
image: ${REGISTRY:-opea}/animation:${TAG:-latest}
wav2lip:
build:
dockerfile: comps/animation/wav2lip/dependency/Dockerfile
dockerfile: comps/animation/src/integration/dependency/Dockerfile
image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest}
wav2lip-gaudi:
build:
dockerfile: comps/animation/wav2lip/dependency/Dockerfile.intel_hpu
dockerfile: comps/animation/src/integration/dependency/Dockerfile.intel_hpu
image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest}
16 changes: 4 additions & 12 deletions .github/workflows/docker/compose/dataprep-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,19 +55,11 @@ services:
build:
dockerfile: comps/dataprep/neo4j/llama_index/Dockerfile
image: ${REGISTRY:-opea}/dataprep-neo4j-llamaindex:${TAG:-latest}
dataprep-multimedia2text:
build:
dockerfile: comps/dataprep/multimedia2text/Dockerfile
image: ${REGISTRY:-opea}/dataprep-multimedia2text:${TAG:-latest}
dataprep-video2audio:
build:
dockerfile: comps/dataprep/multimedia2text/video2audio/Dockerfile
image: ${REGISTRY:-opea}/dataprep-video2audio:${TAG:-latest}
dataprep-audio2text:
build:
dockerfile: comps/dataprep/multimedia2text/audio2text/Dockerfile
image: ${REGISTRY:-opea}/dataprep-audio2text:${TAG:-latest}
dataprep-elasticsearch:
build:
dockerfile: comps/dataprep/elasticsearch/langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-elasticsearch:${TAG:-latest}
dataprep-opensearch:
build:
dockerfile: comps/dataprep/opensearch/langchain/Dockerfile
image: ${REGISTRY:-opea}/dataprep-opensearch:${TAG:-latest}
4 changes: 4 additions & 0 deletions .github/workflows/docker/compose/retrievers-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,7 @@ services:
build:
dockerfile: comps/retrievers/elasticsearch/langchain/Dockerfile
image: ${REGISTRY:-opea}/retriever-elasticsearch:${TAG:-latest}
retriever-opensearch:
build:
dockerfile: comps/retrievers/opensearch/langchain/Dockerfile
image: ${REGISTRY:-opea}/retriever-opensearch:${TAG:-latest}
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ ARG ARCH=cpu
COPY comps /home/user/comps

RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r /home/user/comps/animation/wav2lip/requirements.txt ;
pip install --no-cache-dir -r /home/user/comps/animation/src/requirements.txt ;

ENV PYTHONPATH=$PYTHONPATH:/home/user

WORKDIR /home/user/comps/animation/wav2lip
WORKDIR /home/user/comps/animation/src

ENTRYPOINT ["python3", "animation.py"]
ENTRYPOINT ["python3", "opea_animation_microservice.py"]
20 changes: 10 additions & 10 deletions comps/animation/wav2lip/README.md → comps/animation/src/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,19 @@ cd GenAIComps
- Xeon CPU

```bash
docker build -t opea/wav2lip:latest -f comps/animation/wav2lip/dependency/Dockerfile .
docker build -t opea/wav2lip:latest -f comps/animation/src/integration/dependency/Dockerfile .
```

- Gaudi2 HPU

```bash
docker build -t opea/wav2lip-gaudi:latest -f comps/animation/wav2lip/dependency/Dockerfile.intel_hpu .
docker build -t opea/wav2lip-gaudi:latest -f comps/animation/src/integration/dependency/Dockerfile.intel_hpu .
```

### 1.1.2 Animation server image

```bash
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip/Dockerfile .
docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/src/Dockerfile .
```

## 1.2. Set environment variables
Expand Down Expand Up @@ -78,13 +78,13 @@ export FPS=10
- Xeon CPU

```bash
docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/wav2lip -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
docker run --privileged -d --name "wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/src -e PYTHON=/usr/bin/python3.11 -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
```

- Gaudi2 HPU

```bash
docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/wav2lip -v $(pwd)/comps/animation/wav2lip/assets:/home/user/comps/animation/wav2lip/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
docker run --privileged -d --name "wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/src -v $(pwd)/comps/animation/src/assets:/home/user/comps/animation/src/assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
```

## 2.2 Run Animation Microservice
Expand All @@ -101,28 +101,28 @@ Once microservice starts, user can use below script to validate the running micr

```bash
cd GenAIComps
python3 comps/animation/wav2lip/dependency/check_wav2lip_server.py
python3 comps/animation/src/integration/dependency/check_wav2lip_server.py
```

## 3.2 Validate Animation service

```bash
cd GenAIComps
export ip_address=$(hostname -I | awk '{print $1}')
curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/wav2lip/assets/audio/sample_question.json
curl http://${ip_address}:9066/v1/animation -X POST -H "Content-Type: application/json" -d @comps/animation/src/assets/audio/sample_question.json
```

or

```bash
cd GenAIComps
python3 comps/animation/wav2lip/dependency/check_animation_server.py
python3 comps/animation/src/integration/dependency/check_animation_server.py
```

The expected output will be a message similar to the following:

```bash
{'wav2lip_result': '....../GenAIComps/comps/animation/wav2lip/assets/outputs/result.mp4'}
{'wav2lip_result': '....../GenAIComps/comps/animation/src/assets/outputs/result.mp4'}
```

Please find "comps/animation/wav2lip/assets/outputs/result.mp4" as a reference generated video.
Please find "comps/animation/src/assets/outputs/result.mp4" as a reference generated video.
File renamed without changes.
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
File renamed without changes
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
outfile = os.environ.get("OUTFILE")

# Read the JSON file
with open("comps/animation/wav2lip/assets/audio/sample_question.json", "r") as file:
with open("comps/animation/src/assets/audio/sample_question.json", "r") as file:
data = json.load(file)

response = requests.post(url=endpoint, json=data, headers={"Content-Type": "application/json"}, proxies={"http": None})
Expand Down
File renamed without changes.
2 changes: 2 additions & 0 deletions comps/animation/src/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missin
# Install GenAIComps
RUN mkdir -p /home/user/comps
COPY comps /home/user/comps
COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh

# Install ffmpeg with x264 software codec
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/wav2lip/FFmpeg
WORKDIR /home/user/comps/animation/wav2lip/FFmpeg
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/src/FFmpeg
WORKDIR /home/user/comps/animation/src/FFmpeg
RUN ./configure --enable-gpl --enable-libx264 --enable-cross-compile && \
make -j$(nproc-1) && \
make install && \
Expand All @@ -53,7 +53,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.11/site-packages/gfpgan"
WORKDIR /usr/local/lib/python3.11/site-packages

# Install pip dependencies
RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt
RUN pip install -r /home/user/comps/animation/src/requirements.txt

# Custom patches
# Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional
Expand All @@ -66,7 +66,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d
RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.11/site-packages/Wav2Lip/audio.py

# Set the working directory
WORKDIR /home/user/comps/animation/wav2lip/
WORKDIR /home/user/comps/animation/src/

# Define the command to run when the container starts
RUN chmod +x /usr/local/bin/entrypoint.sh
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ RUN rm -rf /var/lib/apt/lists/*
# Install GenAIComps
RUN mkdir -p /home/user/comps
COPY comps /home/user/comps
COPY comps/animation/wav2lip/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh
COPY comps/animation/src/integration/dependency/entrypoint.sh /usr/local/bin/entrypoint.sh

# Install ffmpeg with x264 software codec
RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/FFmpeg
Expand Down Expand Up @@ -47,7 +47,7 @@ ENV PYTHONPATH="$PYTHONPATH:/usr/local/lib/python3.10/dist-packages/gfpgan"
WORKDIR /usr/local/lib/python3.10/dist-packages

# Install pip dependencies
RUN pip install -r /home/user/comps/animation/wav2lip/requirements.txt
RUN pip install -r /home/user/comps/animation/src/requirements.txt

# Custom patches
# Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional
Expand All @@ -60,7 +60,7 @@ RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in d
RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/lib/python3.10/dist-packages/Wav2Lip/audio.py

# Set the working directory
WORKDIR /home/user/comps/animation/wav2lip
WORKDIR /home/user/comps/animation/scr

# Define the command to run when the container starts
RUN chmod +x /usr/local/bin/entrypoint.sh
Expand Down
2 changes: 2 additions & 0 deletions comps/animation/src/integration/dependency/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
outfile = os.environ.get("OUTFILE")

# Read the JSON file
with open("comps/animation/wav2lip/assets/audio/sample_whoareyou.json", "r") as file:
with open("comps/animation/src/assets/audio/sample_whoareyou.json", "r") as file:
data = json.load(file)

inputs = {"audio": data["byte_str"], "max_tokens": 64}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ export PT_HPU_LAZY_MODE=0
export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=1

# Wav2Lip, GFPGAN
cd /home/user/comps/animation/wav2lip/ || exit
cd /home/user/comps/animation/src/integration/ || exit
python3 dependency/wav2lip_server.py \
--device $DEVICE \
--port $((WAV2LIP_PORT)) \
Expand Down
File renamed without changes.
50 changes: 50 additions & 0 deletions comps/animation/src/integration/opea.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import json
import os

import requests

from comps import CustomLogger, OpeaComponent, ServiceType

logger = CustomLogger("opea_animation")
logflag = os.getenv("LOGFLAG", False)


class OpeaAnimation(OpeaComponent):
"""A specialized animation component derived from OpeaComponent."""

def __init__(self, name: str, description: str, config: dict = None):
super().__init__(name, ServiceType.ANIMATION.name.lower(), description, config)
self.base_url = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860")

def invoke(self, input: str):
"""Invokes the animation service to generate embeddings for the animation input.
Args:
input (Audio Byte Str)
"""
inputs = {"audio": input}

response = requests.post(url=f"{self.base_url}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None})

outfile = response.json()["wav2lip_result"]
return outfile

def check_health(self) -> bool:
"""Checks the health of the animation service.
Returns:
bool: True if the service is reachable and healthy, False otherwise.
"""
try:
response = requests.get(f"{self.base_url}/v1/health")
# If status is 200, the service is considered alive
if response.status_code == 200:
return True
else:
return False
except Exception as e:
# Handle connection errors, timeouts, etc.
logger.error(f"Health check failed: {e}")
return False
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,11 @@
import os
import time

import requests

# GenAIComps
from comps import CustomLogger
from comps import CustomLogger, OpeaComponentController
from comps.animation.src.integration.opea import OpeaAnimation

logger = CustomLogger("animation")
logger = CustomLogger("opea_animation")
logflag = os.getenv("LOGFLAG", False)
from comps import (
Base64ByteStrDoc,
Expand All @@ -25,6 +24,23 @@
statistics_dict,
)

# Initialize OpeaComponentController
controller = OpeaComponentController()

# Register components
try:
# Instantiate Animation component and register it to controller
opea_animation = OpeaAnimation(
name="OpeaAnimation",
description="OPEA Animation Service",
)
controller.register(opea_animation)

# Discover and activate a healthy component
controller.discover_and_activate()
except Exception as e:
logger.error(f"Failed to initialize components: {e}")


# Register the microservice
@register_microservice(
Expand All @@ -37,26 +53,17 @@
output_datatype=VideoPath,
)
@register_statistics(names=["opea_service@animation"])
async def animate(audio: Base64ByteStrDoc):
def animate(audio: Base64ByteStrDoc):
start = time.time()

byte_str = audio.byte_str
inputs = {"audio": byte_str}
if logflag:
logger.info(inputs)

response = requests.post(url=f"{wav2lip_endpoint}/v1/wav2lip", data=json.dumps(inputs), proxies={"http": None})

outfile = response.json()["wav2lip_result"]
outfile = opea_animation.invoke(audio.byte_str)
if logflag:
logger.info(response)
logger.info(f"Video generated successfully, check {outfile} for the result.")

statistics_dict["opea_service@animation"].append_latency(time.time() - start, None)
return VideoPath(video_path=outfile)


if __name__ == "__main__":
wav2lip_endpoint = os.getenv("WAV2LIP_ENDPOINT", "http://localhost:7860")
logger.info("[animation - router] Animation initialized.")
opea_microservices["opea_service@animation"].start()
File renamed without changes.
30 changes: 0 additions & 30 deletions comps/dataprep/multimedia2text/Dockerfile

This file was deleted.

Loading

0 comments on commit 9d9c919

Please sign in to comment.