From da7af7aaf911c98a27088bb9101cd4e7bbddce37 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Wed, 13 Dec 2023 13:24:17 +0100 Subject: [PATCH] Add cpu ci (#15) * Add CI * readme * Update readme * Fix readme --------- Co-authored-by: Joshua Meyer --- .github/workflows/build-and-push-to-ghcr.yml | 44 +++++++++++ README.md | 81 +++++++++++--------- 2 files changed, 89 insertions(+), 36 deletions(-) diff --git a/.github/workflows/build-and-push-to-ghcr.yml b/.github/workflows/build-and-push-to-ghcr.yml index 3425cbb..3e6267f 100644 --- a/.github/workflows/build-and-push-to-ghcr.yml +++ b/.github/workflows/build-and-push-to-ghcr.yml @@ -93,3 +93,47 @@ jobs: cache-to: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest-cuda121 tags: ghcr.io/coqui-ai/xtts-streaming-server:latest-cuda121, ghcr.io/coqui-ai/xtts-streaming-server:main-cuda121-${{ github.sha }} #build-args: + build-and-push-to-ghcr-cpu: + runs-on: ubuntu-22.04 + steps: + - + name: Checkout + uses: actions/checkout@v3 + + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: 'Login to GitHub Container Registry' + run: | + set -xe + docker login --username ${{ github.actor }} --password ${{ secrets.GITHUB_TOKEN }} ghcr.io + + - name: 'Remove cache' + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf "/usr/local/share/boost" + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + + - name: Build only for PR CPU + if: github.ref != 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: "{{defaultContext}}:server" + file: Dockerfile.cpu + push: false # Do not push image for PR + cache-from: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest-cpu; type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-pr-cuda121-${{ github.event.number }} + cache-to: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-pr-cpu-${{ github.event.number }} + + - name: Build and Push image CPU + if: github.ref == 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: "{{defaultContext}}:server" + file: Dockerfile.cpu + push: true # Push if merged + cache-from: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest-cpu + cache-to: type=registry,ref=ghcr.io/coqui-ai/xtts-streaming-server:cache-latest-cpu + tags: ghcr.io/coqui-ai/xtts-streaming-server:latest-cpu, ghcr.io/coqui-ai/xtts-streaming-server:main-cpu-${{ github.sha }} + #build-args: diff --git a/README.md b/README.md index f4805cf..564b4ae 100644 --- a/README.md +++ b/README.md @@ -1,70 +1,79 @@ # XTTS streaming server -## Running the server +## 1) Run the server -To run a pre-built container (CUDA 11.8): +### Use a pre-built image + +CUDA 12.1: + +```bash +$ docker run --gpus=all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 ghcr.io/coqui-ai/xtts-streaming-server:latest-cuda121 +``` + +CUDA 11.8 (for older cards): ```bash $ docker run --gpus=all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 ghcr.io/coqui-ai/xtts-streaming-server:latest ``` -CUDA 12.1 version (for newer cards) +CPU (not recommended): + ```bash -$ docker run --gpus=all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 ghcr.io/coqui-ai/xtts-streaming-server:latest-cuda121 +$ docker run -e COQUI_TOS_AGREED=1 --rm -p 8000:80 ghcr.io/coqui-ai/xtts-streaming-server:latest-cpu ``` -Run with a custom XTTS v2 model (FT or previous versions): +Run with a fine-tuned model: + +Make sure the model folder `/path/to/model/folder` contains the following files: +- `config.json` +- `model.pth` +- `vocab.json` + ```bash $ docker run -v /path/to/model/folder:/app/tts_models --gpus=all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 ghcr.io/coqui-ai/xtts-streaming-server:latest` ``` Setting the `COQUI_TOS_AGREED` environment variable to `1` indicates you have read and agreed to -the terms of the [CPML license](https://coqui.ai/cpml). +the terms of the [CPML license](https://coqui.ai/cpml). (Fine-tuned XTTS models also are under the [CPML license](https://coqui.ai/cpml)) -(Fine-tuned XTTS models also are under the [CPML license](https://coqui.ai/cpml)) +### Build the image yourself -## Testing the server +To build the Docker container Pytorch 2.1 and CUDA 11.8 : -### Using the gradio demo +`DOCKERFILE` may be `Dockerfile`, `Dockerfile.cpu`, `Dockerfile.cuda121`, or your own custom Dockerfile. ```bash -$ python -m pip install -r test/requirements.txt -$ python demo.py +$ git clone git@github.com:coqui-ai/xtts-streaming-server.git +$ cd xtts-streaming-server/server +$ docker build -t xtts-stream . -f DOCKERFILE +$ docker run --gpus all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 xtts-stream ``` -### Using the test script +Setting the `COQUI_TOS_AGREED` environment variable to `1` indicates you have read and agreed to +the terms of the [CPML license](https://coqui.ai/cpml). (Fine-tuned XTTS models also are under the [CPML license](https://coqui.ai/cpml)) -```bash -$ cd test -$ python -m pip install -r requirements.txt -$ python test_streaming.py -``` +## 2) Testing the running server -## Building the container +Once your Docker container is running, you can test that it's working properly. You will need to run the following code from a fresh terminal. -1. To build the Docker container Pytorch 2.1 and CUDA 11.8 : +### Clone `xtts-streaming-server` if you haven't already ```bash -$ cd server -$ docker build -t xtts-stream . +$ git clone git@github.com:coqui-ai/xtts-streaming-server.git ``` -For Pytorch 2.1 and CUDA 12.1 : -```bash -$ cd server -docker build -t xtts-stream . -f Dockerfile.cuda121 -``` -2. Run the server container: + +### Using the gradio demo ```bash -$ docker run --gpus all -e COQUI_TOS_AGREED=1 --rm -p 8000:80 xtts-stream +$ cd xtts-streaming-server +$ python -m pip install -r test/requirements.txt +$ python demo.py ``` -Setting the `COQUI_TOS_AGREED` environment variable to `1` indicates you have read and agreed to -the terms of the [CPML license](https://coqui.ai/cpml). - - -Make sure the model folder contains the following files: -- `config.json` -- `model.pth` -- `vocab.json` +### Using the test script +```bash +$ cd xtts-streaming-server/test +$ python -m pip install -r requirements.txt +$ python test_streaming.py +``` \ No newline at end of file