Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docker: ComfyUI model downloading/caching #284

Merged
merged 5 commits into from
Nov 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 24 additions & 4 deletions runner/dl_checkpoints.sh
Original file line number Diff line number Diff line change
Expand Up @@ -77,18 +77,23 @@ function download_all_models() {
# Custom pipeline models.
huggingface-cli download facebook/sam2-hiera-large --include "*.pt" "*.yaml" --cache-dir models

# Download live-video-to-video models.
download_live_models
}

# Download models only for the live-video-to-video pipeline.
function download_live_models() {
huggingface-cli download KBlueLeaf/kohaku-v2.1 --include "*.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models
huggingface-cli download stabilityai/sd-turbo --include "*.safetensors" "*.json" "*.txt" --exclude ".onnx" ".onnx_data" --cache-dir models
huggingface-cli download warmshao/FasterLivePortrait --local-dir models/FasterLivePortrait--checkpoints
huggingface-cli download yuvraj108c/Depth-Anything-Onnx --include depth_anything_vitl14.onnx --local-dir models/ComfyUI--models/Depth-Anything-Onnx
}

function build_tensorrt_models() {
download_all_models
download_live_models

printf "\nBuilding TensorRT models...\n"

# Matrix of models and timesteps to compile StreamDiffusion TensorRT engines for.
# StreamDiffusion (compile a matrix of models and timesteps)
MODELS="stabilityai/sd-turbo KBlueLeaf/kohaku-v2.1"
TIMESTEPS="3 4" # This is basically the supported sizes for the t_index_list
docker run --rm -it -v ./models:/models --gpus all \
Expand All @@ -100,6 +105,7 @@ function build_tensorrt_models() {
done
done"

# FasterLivePortrait
docker run --rm -it -v ./models:/models --gpus all \
livepeer/ai-runner:live-app-liveportrait \
bash -c "cd /app/app/live/FasterLivePortrait && \
Expand All @@ -115,6 +121,14 @@ function build_tensorrt_models() {
else
echo 'Animal LivePortrait TensorRT engines already exist, skipping build'
fi"

# ComfyUI (only DepthAnything for now)
docker run --rm -it -v ./models:/models --gpus all \
livepeer/ai-runner:live-app-comfyui \
bash -c "cd /comfyui/models/Depth-Anything-Onnx && \
python /comfyui/custom_nodes/ComfyUI-Depth-Anything-Tensorrt/export_trt.py && \
mkdir -p /comfyui/models/tensorrt/depth-anything && \
mv *.engine /comfyui/models/tensorrt/depth-anything"
}

# Download models with a restrictive license.
Expand Down Expand Up @@ -148,6 +162,10 @@ do
MODE="restricted"
shift
;;
--live)
MODE="live"
shift
;;
--tensorrt)
MODE="tensorrt"
shift
Expand All @@ -165,7 +183,7 @@ done
echo "Starting livepeer AI subnet model downloader..."
echo "Creating 'models' directory in the current working directory..."
mkdir -p models
mkdir -p models/StreamDiffusion--engines models/FasterLivePortrait--checkpoints
mkdir -p models/StreamDiffusion--engines models/FasterLivePortrait--checkpoints models/ComfyUI--models

# Ensure 'huggingface-cli' is installed.
echo "Checking if 'huggingface-cli' is installed..."
Expand All @@ -178,6 +196,8 @@ if [ "$MODE" = "beta" ]; then
download_beta_models
elif [ "$MODE" = "restricted" ]; then
download_restricted_models
elif [ "$MODE" = "live" ]; then
download_live_models
elif [ "$MODE" = "tensorrt" ]; then
build_tensorrt_models
else
Expand Down
5 changes: 3 additions & 2 deletions runner/docker/Dockerfile.live-base-comfyui
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ RUN cd /comfyui/custom_nodes && \
pip install -r requirements.txt

# Upgrade TensorRT to 10.6.0
RUN pip uninstall -y tensorrt && \
RUN pip uninstall -y tensorrt && \
pip install tensorrt==10.6.0

RUN pip install torch==2.5.1 torchvision torchaudio tqdm
Expand All @@ -37,4 +37,5 @@ RUN git clone https://github.com/yondonfu/comfystream.git && \

# Set up ComfyUI workspace
ENV COMFY_UI_WORKSPACE="/comfyui"
RUN ln -s /comfyui/models /models
RUN ln -s /models/ComfyUI--models /comfyui/models
# TODO: Consider linking the custom nodes directory as well and set those up in the host, similar to the models directory
9 changes: 3 additions & 6 deletions runner/docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,12 @@ docker build -t livepeer/ai-runner:live-app-${PIPELINE} -f docker/Dockerfile.liv

2. Download Depth Anything model
```
mkdir models
wget https://huggingface.co/yuvraj108c/Depth-Anything-Onnx/resolve/main/depth_anything_vitl14.onnx -P models
./dl_checkpoints --live
```

3. Build Depth Anything Engine
```
docker run -it --rm --name video-to-video --gpus all -v ./models:/models livepeer/ai-runner:live-app-comfyui /bin/bash -c "cd /models; python /comfyui/custom_nodes/ComfyUI-Depth-Anything-Tensorrt/export_trt.py"
mkdir -p ./models/tensorrt/depth-anything
mv ./models/*.engine ./models/tensorrt/depth-anything
./dl_checkpoints --tensorrt
```

4. Start Docker container
Expand All @@ -76,4 +73,4 @@ docker build -t livepeer/ai-runner:live-app-${PIPELINE} -f docker/Dockerfile.liv
2. Start Docker container
```
docker run -it --rm --name video-to-video -p 8000:8000 -e PIPELINE=live-video-to-video -e MODEL_ID=noop livepeer/ai-runner:live-app-noop
```
```