Skip to content

Commit

Permalink
update gradio more easy
Browse files Browse the repository at this point in the history
  • Loading branch information
lpscr committed Oct 23, 2024
1 parent 74b9ff9 commit f2f9396
Show file tree
Hide file tree
Showing 2 changed files with 205 additions and 55 deletions.
35 changes: 25 additions & 10 deletions finetune-cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,35 @@

# -------------------------- Argument Parsing --------------------------- #
def parse_args():
# batch_size_per_gpu = 1000 settting for gpu 8GB
# batch_size_per_gpu = 1600 settting for gpu 12GB
# batch_size_per_gpu = 2000 settting for gpu 16GB
# batch_size_per_gpu = 3200 settting for gpu 24GB

# num_warmup_updates 10000 sample = 500

# change save_per_updates , last_per_steps what you need

parser = argparse.ArgumentParser(description="Train CFM Model")

parser.add_argument(
"--exp_name", type=str, default="F5TTS_Base", choices=["F5TTS_Base", "E2TTS_Base"], help="Experiment name"
)
parser.add_argument("--dataset_name", type=str, default="Emilia_ZH_EN", help="Name of the dataset to use")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate for training")
parser.add_argument("--batch_size_per_gpu", type=int, default=256, help="Batch size per GPU")
parser.add_argument("--learning_rate", type=float, default=1e-5, help="Learning rate for training")
parser.add_argument("--batch_size_per_gpu", type=int, default=3200, help="Batch size per GPU")
parser.add_argument(
"--batch_size_type", type=str, default="frame", choices=["frame", "sample"], help="Batch size type"
)
parser.add_argument("--max_samples", type=int, default=16, help="Max sequences per batch")
parser.add_argument("--max_samples", type=int, default=64, help="Max sequences per batch")
parser.add_argument("--grad_accumulation_steps", type=int, default=1, help="Gradient accumulation steps")
parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm for clipping")
parser.add_argument("--epochs", type=int, default=10, help="Number of training epochs")
parser.add_argument("--num_warmup_updates", type=int, default=5, help="Warmup steps")
parser.add_argument("--save_per_updates", type=int, default=10, help="Save checkpoint every X steps")
parser.add_argument("--last_per_steps", type=int, default=10, help="Save last checkpoint every X steps")
parser.add_argument("--num_warmup_updates", type=int, default=500, help="Warmup steps")
parser.add_argument("--save_per_updates", type=int, default=10000, help="Save checkpoint every X steps")
parser.add_argument("--last_per_steps", type=int, default=20000, help="Save last checkpoint every X steps")
parser.add_argument("--finetune", type=bool, default=True, help="Use Finetune")

parser.add_argument("--pretrain", type=str, default=None, help="Use pretrain model for finetune")
parser.add_argument(
"--tokenizer", type=str, default="pinyin", choices=["pinyin", "char", "custom"], help="Tokenizer type"
)
Expand All @@ -59,14 +68,19 @@ def main():
model_cls = DiT
model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
if args.finetune:
ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt"))
if args.pretrain == "":
ckpt_path = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt"))
else:
ckpt_path = args.pretrain
elif args.exp_name == "E2TTS_Base":
wandb_resume_id = None
model_cls = UNetT
model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
if args.finetune:
ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt"))

if args.pretrain == "":
ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt"))
else:
ckpt_path = args.pretrain
if args.finetune:
path_ckpt = os.path.join("ckpts", args.dataset_name)
if not os.path.isdir(path_ckpt):
Expand Down Expand Up @@ -117,6 +131,7 @@ def main():
)

train_dataset = load_dataset(args.dataset_name, tokenizer, mel_spec_kwargs=mel_spec_kwargs)

trainer.train(
train_dataset,
resumable_with_seed=666, # seed for shuffling dataset
Expand Down
Loading

0 comments on commit f2f9396

Please sign in to comment.