diff --git a/Dockerfile b/Dockerfile index c357c30..0f81c73 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,48 +1,29 @@ -FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime +FROM ghcr.io/moriyalab/docker-ffmpeg:latest -RUN apt-get update && apt-get install -y --no-install-recommends \ - git \ - vim \ - curl \ - wget \ - xz-utils \ - libavutil-dev \ - libavcodec-dev \ - libavformat-dev \ - libswscale-dev \ - pkg-config \ - build-essential \ - libffi-dev -RUN pip install --upgrade pip setuptools - -RUN wget https://johnvansickle.com/ffmpeg/builds/ffmpeg-git-amd64-static.tar.xz \ - && tar xvf ./ffmpeg-git-amd64-static.tar.xz \ - && cp ./ffmpeg*amd64-static/ffmpeg /usr/local/bin/ +WORKDIR /workspace +RUN apt update && apt install -y git vim curl python3 python3-pip libgl1-mesa-glx +RUN pip install -U pip && \ + pip install torch==2.5.1 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124 && \ + pip install \ + matplotlib==3.7 \ + setuptools==75.6.0 \ + tikzplotlib \ + jpeg4py \ + opencv-python \ + lmdb \ + pandas \ + scipy \ + loguru \ + flake8 \ + hydra-core \ + iopath \ + ultralytics==8.2.63 \ + gradio==4.44.0 \ + ffmpeg-python==0.2.0 \ + gdown==5.2 \ + lapx==0.5.10 RUN git clone -b master --single-branch --depth=1 https://github.com/moriyalab/samurai.git -RUN cd /workspace/samurai/sam2 && \ - pip install -e . - -RUN cd /workspace/samurai/sam2 && \ - pip install -e ".[notebooks]" && \ - pip install \ - matplotlib==3.7 \ - tikzplotlib \ - jpeg4py \ - opencv-python \ - lmdb \ - pandas \ - scipy \ - loguru \ - flake8 \ - ultralytics==8.2.63 \ - gradio==4.44.0 \ - ffmpeg-python==0.2.0 \ - gdown==5.2 \ - lapx==0.5.10 - RUN cd /workspace/samurai/sam2/checkpoints && \ ./download_ckpts.sh && \ cd .. - -RUN mkdir -p /workspace/horus_inference_server diff --git a/run_dev_env.sh b/run_dev_env.sh index 0d17b07..2606890 100755 --- a/run_dev_env.sh +++ b/run_dev_env.sh @@ -32,7 +32,7 @@ PLATFORM="$(uname -m)" if [ $PLATFORM = "x86_64" ]; then echo "x86" docker pull ghcr.io/moriyalab/horus_inference_server:latest - docker run -it --rm --gpus all --runtime nvidia --shm-size=32G -v $ROOT:/workspace/horus_inference_server -w /workspace/horus_inference_server --network host ghcr.io/moriyalab/horus_inference_server:latest + docker run -it --rm --gpus all --runtime nvidia -u $(id -u):$(id -g) --shm-size=32G -v $ROOT:/workspace/horus_inference_server -w /workspace/horus_inference_server --network host ghcr.io/moriyalab/horus_inference_server:latest else echo "Not Support Platform. Only support x86." fi \ No newline at end of file