Skip to content

Commit

Permalink
[optim] Allow warmup used in conjunction with pervious stepwise lr de…
Browse files Browse the repository at this point in the history
…cay.
  • Loading branch information
ruotianluo committed Jun 6, 2020
1 parent 3e4d52d commit 6c0af5b
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 0 deletions.
3 changes: 3 additions & 0 deletions captioning/utils/opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,9 @@ def parse_opt():
help='')


parser.add_argument('--use_warmup', action='store_true',
help='warm up the learing rate?')

parser.add_argument('--scheduled_sampling_start', type=int, default=-1,
help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5,
Expand Down
3 changes: 3 additions & 0 deletions tools/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,9 @@ def train(opt):
epoch_done = False

start = time.time()
if opt.use_warmup and (iteration < opt.noamopt_warmup):
opt.current_lr = opt.learning_rate * (iteration+1) / opt.noamopt_warmup
utils.set_lr(optimizer, opt.current_lr)
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
Expand Down

0 comments on commit 6c0af5b

Please sign in to comment.