From 9f12851b2f322d5d7460eaeb335b656db749ae58 Mon Sep 17 00:00:00 2001 From: jxu7 Date: Fri, 9 Jun 2017 11:28:05 -0400 Subject: [PATCH] train baseline models --- datasets.py | 17 +++- planet_models/densenet_planet.py | 18 +++- trainers/baseline_trainer.py | 167 +++++++++++++++++++++++++++++++ trainers/train_densenet.py | 6 +- trainers/train_pynet.py | 3 +- util.py | 15 ++- 6 files changed, 211 insertions(+), 15 deletions(-) create mode 100644 trainers/baseline_trainer.py diff --git a/datasets.py b/datasets.py index eac06fd..a7e257d 100644 --- a/datasets.py +++ b/datasets.py @@ -14,18 +14,27 @@ std = [0.16730586, 0.14391145, 0.13747531] -class RandomVerticalFLip(object): +class RandomVerticalFlip(object): def __call__(self, img): if random.random() < 0.5: img = img.transpose(Image.FLIP_TOP_BOTTOM) return img +class RandomTranspose(object): + def __call__(self, img): + if random.random() < 0.5: + img = np.array(img) + img = img.transpose(1, 0, 2) + img = Image.fromarray(img) + return img + + class RandomRotate(object): def __call__(self, img): if random.random() < 0.2: img = np.array(img) - angle = np.random.randint(1, 90) + angle = np.random.randint(-45, 45) height, width = img.shape[0:2] mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0) img = cv2.warpAffine(img, mat, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) @@ -197,7 +206,7 @@ def train_jpg_loader(batch_size=64, transform=ToTensor()): mode='Train', input_transform=transform ) - return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, ) + return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=3) def validation_jpg_loader(batch_size=64, transform=ToTensor()): @@ -207,7 +216,7 @@ def validation_jpg_loader(batch_size=64, transform=ToTensor()): mode='Validation', input_transform=transform ) - return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False) + return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=3) def test_jpg_loader(batch_size=128, transform=ToTensor()): diff --git a/planet_models/densenet_planet.py b/planet_models/densenet_planet.py index 16e3af3..230ed16 100644 --- a/planet_models/densenet_planet.py +++ b/planet_models/densenet_planet.py @@ -17,9 +17,21 @@ def densenet121(num_classes=17, pretrained=False): return model -def densenet169(num_classes=17, pretrained=False, dropout=0.0): - model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), num_classes=num_classes, - drop_rate=dropout) +def densenet169(num_classes=17, pretrained=False): + model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), num_classes=num_classes) + if pretrained: + # load model dictionary + model_dict = model.state_dict() + # load pretrained model + pretrained_dict = model_zoo.load_url(model_urls['densenet169']) + # update model dictionary using pretrained model without classifier layer + model_dict.update({key: pretrained_dict[key] for key in pretrained_dict.keys() if 'classifier' not in key}) + model.load_state_dict(model_dict) + return model + + +def densenet161(num_classes=17, pretrained=False): + model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), num_classes=num_classes) if pretrained: # load model dictionary model_dict = model.state_dict() diff --git a/trainers/baseline_trainer.py b/trainers/baseline_trainer.py new file mode 100644 index 0000000..cdae476 --- /dev/null +++ b/trainers/baseline_trainer.py @@ -0,0 +1,167 @@ +import torch.nn as nn +from torch.nn import functional as F +from torch import optim +from trainers.train_simplenet import evaluate +from torchvision.transforms import RandomHorizontalFlip, RandomCrop, Compose, Scale, ToTensor, Normalize +from planet_models.densenet_planet import densenet169, densenet121 +from planet_models.resnet_planet import resnet18_planet, resnet34_planet, resnet50_planet, resnet152_planet +from torch.autograd import Variable +from datasets import RandomRotate, RandomVerticalFlip, RandomTranspose, train_jpg_loader, validation_jpg_loader, mean, std +from util import Logger, f2_score +import numpy as np +import torch +import time +""" +A baseline trainer trains the models as followed: +1. ResNet: 18, 34, 50, and 152 (from scratch) +2. DenseNet: 169, 161, and 121 (from scratch) + +-------parameters--------- + epochs: 80 + + batch size: 128, 128, 128, 72, 72, 72, 72 + + use SGD+0.9momentum w/o nestrov + + weight decay: 5e-4 + + learning rate: 00-10 epoch: 0.1 + 10-25 epoch: 0.01 + 25-35 epoch: 0.005 + 35-40 epoch: 0.001 + 40-80 epoch: 0.0001 + + transformations: Rotate, VerticalFlip, HorizontalFlip, RandomCrop +""" + + +models = [resnet18_planet, resnet34_planet, resnet50_planet, resnet152_planet, densenet169, densenet121] +batch_size = [128, 128, 128, 72, 72, 72, 72] + + +def lr_schedule(epoch, optimizer): + if 0 <= epoch < 10: + lr = 1e-1 + elif 10 <= epoch < 25: + lr = 0.01 + elif 25 <= epoch < 35: + lr = 0.005 + elif 35 <= epoch < 40: + lr = 0.001 + else: + lr = 0.0001 + + for para_group in optimizer.param_groups: + para_group['lr'] = lr + + +def evaluate_train(model, val_data, criterion): + # evaluating + val_loss = 0.0 + model.eval() + preds = [] + targets = [] + for batch_index, (val_x, val_y) in enumerate(val_data): + if torch.cuda.is_available(): + val_y = val_y.cuda() + val_y = Variable(val_y, volatile=True) + val_output = evaluate(model, val_x) + val_loss += criterion(val_output, val_y) + val_output = F.sigmoid(val_output) + binary_y = val_output.data.cpu().numpy() + binary_y[binary_y > 0.2] = 1 + binary_y[binary_y <= 0.2] = 0 + preds.append(binary_y) + targets.append(val_y.data.cpu().numpy()) + targets = np.concatenate(targets) + preds = np.concatenate(preds) + f2_scores = f2_score(targets, preds) + val_loss = val_loss.data[0]/batch_index + return val_loss, f2_scores + + +def train_baselines(epoch): + transformations = Compose( + [ + RandomHorizontalFlip(), + RandomVerticalFlip(), + RandomTranspose(), + RandomRotate(), + RandomCrop(224), + ToTensor(), + Normalize(mean=mean, std=std) + ] + ) + + criterion = nn.MultiLabelSoftMarginLoss() + + for model, batch in zip(models, batch_size): + name = str(model).split()[1] + print('[!]Training %s' % name) + print('[!]Batch size %s' % batch) + logger = Logger(name=name, save_dir='../log/%s' % name) + model = nn.DataParallel(model().cuda()) + optimizer = optim.SGD(momentum=0.9, lr=0.1, params=model.parameters(), weight_decay=5e-4) + + train_data = train_jpg_loader(batch, transform=transformations) + val_data = validation_jpg_loader(batch, transform=Compose( + [ + Scale(224), + ToTensor(), + Normalize(mean=mean, std=std) + ] + )) + + + # start training + best_loss = np.inf + patience = 0 + start_time = time.time() + for i in range(epoch): + # training + training_loss = 0.0 + # adjust learning rate + lr_schedule(epoch, optimizer) + for batch_index, (target_x, target_y) in enumerate(train_data): + if torch.cuda.is_available(): + target_x, target_y = target_x.cuda(), target_y.cuda() + model.train() + target_x, target_y = Variable(target_x), Variable(target_y) + optimizer.zero_grad() + output = model(target_x) + loss = criterion(output, target_y) + training_loss += loss.data[0] + loss.backward() + optimizer.step() + if batch_index % 50 == 0: + print('Training loss is {}'.format(loss.data[0])) + print('Finished epoch {}'.format(i)) + training_loss /= batch_index + + # evaluating + val_loss, f2_scores = evaluate_train(model, val_data, criterion) + + if best_loss > val_loss: + print('Saving model...') + best_loss = val_loss + torch.save(model.state_dict(), '../models/{}.pth'.format(name)) + patience = 0 + else: + patience += 1 + print('Patience: {}'.format(patience)) + print('Best loss {}, previous loss {}'.format(best_loss, val_loss)) + + print('Evaluation loss is {}, Training loss is {}'.format(val_loss, training_loss)) + print('F2 Score is %s' % (f2_scores)) + + logger.add_record('train_loss', training_loss) + logger.add_record('evaluation_loss', val_loss) + logger.add_record('f2_score', f2_scores) + + # save for every epoch + logger.save() + logger.save_plot() + + logger.save_time(start_time, time.time()) +if __name__ == '__main__': + train_baselines(80) diff --git a/trainers/train_densenet.py b/trainers/train_densenet.py index f184b82..a211cc9 100644 --- a/trainers/train_densenet.py +++ b/trainers/train_densenet.py @@ -15,11 +15,11 @@ def __call__(self, img): def lr_scheduler(epoch, optimizer): - if epoch <= 20: + if epoch <= 10: lr = 1e-1 - elif 20 < epoch <= 40: + elif 10 < epoch <= 30: lr = 1e-2 - elif 40 < epoch <= 80: + elif 30 < epoch <= 45: lr = 5e-3 else: lr = 1e-3 diff --git a/trainers/train_pynet.py b/trainers/train_pynet.py index ea6fb5f..16fd20d 100644 --- a/trainers/train_pynet.py +++ b/trainers/train_pynet.py @@ -43,10 +43,9 @@ def train(epoch): Scale(78), RandomHorizontalFlip(), - RandomVerticalFLip(), + RandomVerticalFlip(), RandomRotate(), RandomCrop(72), - ToTensor(), Normalize(mean, std) ] )) diff --git a/util.py b/util.py index 010b8c8..cdfe29e 100644 --- a/util.py +++ b/util.py @@ -101,12 +101,21 @@ def save_plot(self): plt.plot(np.arange(len(train_loss)), train_loss, color='red', label='train_loss') plt.plot(np.arange(len(eval_loss)), eval_loss, color='blue', label='eval_loss') plt.legend(loc='best') - - plt.savefig('../log/%s_losses.jpg' % self.name) + # fig_save_dir = os.path.join(self.save_dir, '%s.jpg' %self.name) + plt.savefig(os.path.join(self.save_dir, 'loss.jpg')) plt.figure() plt.plot(np.arange(len(f2_scores)), f2_scores) - plt.savefig('../log/%s_fcscore.jpg' % self.name) + plt.savefig(os.path.join(self.save_dir, 'f2_score.jpg')) plt.close('all') + def save_time(self, start_time, end_time): + with open(os.path.join(self.save_dir, 'time.txt'), 'w') as f: + f.write('start time, end time, duration\n') + f.write('{}, {}, {}'.format(start_time, end_time, end_time - start_time)) + +if __name__ == '__main__': + from planet_models.resnet_planet import resnet14_planet + a = resnet14_planet() + print(str(resnet14_planet).split(' ')[1])