Skip to content

Commit

Permalink
Update aydao config comments
Browse files Browse the repository at this point in the history
  • Loading branch information
Hans committed Nov 16, 2020
1 parent 52bcfef commit 24bbc1a
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,16 +213,24 @@ def setup_training_options(
args.G_args.conv_clamp = args.D_args.conv_clamp = 256 # clamp activations to avoid float16 overflow

if cfg == 'aydao':
# disable path length and style mixing regularization
args.loss_args.pl_weight = 0
args.G_args.style_mixing_prob = None

# double generator capacity
args.G_args.fmap_base = 32 << 10
args.G_args.fmap_max = 1024

# enable top k training
args.loss_args.G_top_k = True
# args.loss_args.G_top_k_gamma = 0.99 # takes ~70% of full training from scratch to decay to 0.5
# args.loss_args.G_top_k_gamma = 0.9862 # takes 12500 kimg to decay to 0.5 (~1/2 of total_kimg when training from scratch)
args.loss_args.G_top_k_gamma = 0.9726 # takes 6250 kimg to decay to 0.5 (~1/4 of total_kimg when training from scratch)
args.loss_args.G_top_k_frac = 0.5
args.minibatch_gpu = 2 # probably will need to set this pretty low with such a large G, higher values work better for top-k training though
# args.G_args.num_fp16_res = 6 # making more layers fp16 can help as well

# reduce in-memory size, you need a BIG GPU for this model
args.minibatch_gpu = 4 # probably will need to set this pretty low with such a large G, higher values work better for top-k training though
args.G_args.num_fp16_res = 6 # making more layers fp16 can help as well

if cfg == 'cifar' or cfg.split('-')[-1] == 'complex':
args.loss_args.pl_weight = 0 # disable path length regularization
Expand Down

0 comments on commit 24bbc1a

Please sign in to comment.