-
Notifications
You must be signed in to change notification settings - Fork 5
/
pyproject.toml
34 lines (30 loc) · 1.21 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-omni"
version = "1.0.0"
description = "Towards GPT-4o like large speech-language model."
readme = "README.md"
requires-python = ">=3.10"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
]
dependencies = [
"torch==2.1.2", "torchvision==0.16.2", "torchaudio==2.1.2",
"transformers==4.43.4", "tokenizers==0.19.1", "sentencepiece==0.1.99", "shortuuid",
"accelerate==0.33.0", "peft==0.11.1", "bitsandbytes==0.43.1",
"pydantic", "markdown2[all]", "numpy", "scikit-learn==1.2.2",
"gradio==4.43.0", "gradio_client==1.3.0",
"requests", "httpx==0.27.2", "uvicorn", "fastapi", "soundfile",
"einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13",
"openai-whisper", "setuptools==59.5.0", "omegaconf==2.0.6",
]
[project.optional-dependencies]
train = ["deepspeed==0.12.6", "ninja", "wandb", "tensorboardX"]
build = ["build", "twine"]
[tool.setuptools.packages.find]
exclude = ["data", "checkpoints", "logs", "models", "fairseq", "flash-attention"]
[tool.wheel]
exclude = ["data", "checkpoints", "logs", "models", "fairseq", "flash-attention"]