diff --git a/trainers/baseline_trainer.py b/trainers/baseline_trainer.py index d3a63d2..93164f3 100644 --- a/trainers/baseline_trainer.py +++ b/trainers/baseline_trainer.py @@ -113,6 +113,7 @@ def train_baselines(): logger = Logger('../log/{}'.format(name), name) net = model() + optimizer = get_optimizer(net, lr=.01, pretrained=True, resnet=True if 'resnet' in name else False) net = nn.DataParallel(net.cuda()) train_data.batch_size = batch @@ -135,7 +136,7 @@ def train_baselines(): t = time.time() for epoch in range(num_epoches): # loop over the dataset multiple times - optimizer = get_optimizer(net, lr=.01, pretrained=True, resnet=True if 'resnet' in name else False) + # train loss averaged every epoch total_epoch_loss = 0.0 diff --git a/util.py b/util.py index cbf6740..0877db4 100644 --- a/util.py +++ b/util.py @@ -127,7 +127,7 @@ def evaluate(net, test_loader): def get_learning_rate(optimizer): lr=[] for param_group in optimizer.param_groups: - lr +=[ param_group['lr'] ] + lr +=[param_group['lr']] return lr