-
Notifications
You must be signed in to change notification settings - Fork 19
/
docker-compose.yml
63 lines (62 loc) · 1.51 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
version: '3'
services:
nginx:
image: nginx:alpine
ports:
- "5000:80"
volumes:
- ./web-app/build:/usr/share/nginx/html
- ./nginx.conf:/etc/nginx/nginx.conf:ro
depends_on:
- llm
- tts
- asr
llm:
image: ghcr.io/huggingface/text-generation-inference:1.1.0
ports:
- "8080:8080"
environment:
- MODEL_ID=TheBloke/openchat_3.5-AWQ
- PORT=8080
- QUANTIZE=awq
- MAX_INPUT_LEN=3696
- MAX_TOTAL_TOKENS=4096
- MAX_BATCH_PREFILL_TOKENS=4096
- CUDA_MEMORY_FRACTION=0.6
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
tts:
image: ghcr.io/coqui-ai/xtts-streaming-server:main-cuda121-818a108b41be2dd43dada04bd319fdfcdabc5c6a
ports:
- "8000:80"
# Uncomment the following lines to use your own models
# volumes:
# - /media/julian/Workdisk/models/ai_voice_chat:/app/tts_models
environment:
- COQUI_TOS_AGREED=1
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
asr:
image: onerahmet/openai-whisper-asr-webservice:v1.2.4-gpu
ports:
- "9000:9000"
environment:
- ASR_ENGINE=faster_whisper
- ASR_MODEL=large-v3
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]