Skip to content

Commit

Permalink
fpnet62_wd_1e-4_sgd
Browse files Browse the repository at this point in the history
  • Loading branch information
Junhong Xu committed Jun 7, 2017
1 parent c417d47 commit 5a49c2a
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 9 deletions.
9 changes: 5 additions & 4 deletions planet_models/fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,9 @@ def forward(self, x):

class FPNet(nn.Module):

def __init__(self, block, layers, input_channels=3, num_classes=17):
def __init__(self, block, layers, input_channels=3, num_classes=17, dropout_rate=0.2):
self.inplanes = 32
self.dropout_rate = dropout_rate
super(FPNet, self).__init__()
self.conv1 = nn.Sequential(
_make_conv_bn_elu(in_channels=input_channels, out_channels=32, kernel_size=3, stride=2, padding=0)
Expand Down Expand Up @@ -96,9 +97,9 @@ def __init__(self, block, layers, input_channels=3, num_classes=17):
self.pool3 = nn.AdaptiveAvgPool2d(1)

# clasifier
self.cls_1 = nn.Sequential(_make_linear_bn_elu(256, 512))
self.cls_2 = nn.Sequential(_make_linear_bn_elu(256, 512))
self.cls_3 = nn.Sequential(_make_linear_bn_elu(256, 512))
self.cls_1 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))
self.cls_2 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))
self.cls_3 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))

# final prediction
self.fc = nn.Linear(512*3, num_classes)
Expand Down
29 changes: 24 additions & 5 deletions trainers/train_pynet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torchvision.transforms import *
from planet_models.fpn import FPNet, Bottleneck

NAME = 'fpnet33_wd_5e-4'
NAME = 'fpnet62_wd_1e-4_sgd'


class RandomVerticalFLip(object):
Expand All @@ -23,11 +23,26 @@ def get_optimizer(model, pretrained=True, lr=5e-5, weight_decay=5e-5):
return optim.Adam(params=params, lr=lr, weight_decay=weight_decay)


def lr_schedule(epoch, optimizer):
if epoch < 10:
lr = 1e-1
elif 10 <= epoch <= 20:
lr = 5e-2
elif 25 < epoch <= 45:
lr = 1e-3
else:
lr = 1e-4

for param_group in optimizer.param_groups:
param_group['lr'] = lr


def train(epoch):
criterion = MultiLabelSoftMarginLoss()
net = FPNet(Bottleneck, [2, 4, 15, 2])
net = FPNet(Bottleneck, [2, 4, 10, 2], dropout_rate=0.4)
logger = Logger('../log/', NAME)
optimizer = get_optimizer(net, False, 1e-4, 5e-4)
# optimizer = get_optimizer(net, False, 1e-4, 5e-4)
optimizer = optim.SGD(net.parameters(), lr=1e-1, weight_decay=1e-4, momentum=0.9)
net.cuda()
net = torch.nn.DataParallel(net, device_ids=[0, 1])
train_data_set = train_jpg_loader(128, transform=Compose(
Expand All @@ -53,6 +68,8 @@ def train(epoch):
for i in range(epoch):
# training
training_loss = 0.0
# adjust learning rate
lr_schedule(epoch, optimizer)
for batch_index, (target_x, target_y) in enumerate(train_data_set):
if torch.cuda.is_available():
target_x, target_y = target_x.cuda(), target_y.cuda()
Expand Down Expand Up @@ -105,8 +122,10 @@ def train(epoch):
logger.add_record('train_loss', training_loss)
logger.add_record('evaluation_loss', val_loss)
logger.add_record('f2_score', f2_scores)
logger.save()
logger.save_plot()

# save for every epoch
logger.save()
logger.save_plot()


if __name__ == '__main__':
Expand Down

0 comments on commit 5a49c2a

Please sign in to comment.