Skip to content

Commit

Permalink
fpn
Browse files Browse the repository at this point in the history
  • Loading branch information
jxu7 committed Jun 8, 2017
1 parent dcb34de commit 2002932
Show file tree
Hide file tree
Showing 12 changed files with 66 additions and 1,156 deletions.
8 changes: 7 additions & 1 deletion datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import numpy as np
from skimage import io
from sklearn.preprocessing import MinMaxScaler
import cv2

mean = [0.31151703, 0.34061992, 0.29885209]
std = [0.16730586, 0.14391145, 0.13747531]
Expand All @@ -23,7 +24,12 @@ def __call__(self, img):
class RandomRotate(object):
def __call__(self, img):
if random.random() < 0.2:
img = img.rotate(45)
img = np.array(img)
angle = np.random.randint(1, 90)
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
img = cv2.warpAffine(img, mat, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
img = Image.fromarray(img)
return img


Expand Down
511 changes: 0 additions & 511 deletions log/resnet-50.csv

This file was deleted.

Binary file removed log/resnet-50_fcscore.jpg
Binary file not shown.
Binary file removed log/resnet-50_losses.jpg
Binary file not shown.
601 changes: 0 additions & 601 deletions log/simplenet.csv

This file was deleted.

Binary file removed log/simplenet_fcscore.jpg
Binary file not shown.
Binary file removed log/simplenet_losses.jpg
Binary file not shown.
3 changes: 2 additions & 1 deletion planet_models/densenet_planet.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ def densenet121(num_classes=17, pretrained=False):
model.load_state_dict(model_dict)
return model

def densenet169(num_classes=17, pretrained=False, dropout=0.2):

def densenet169(num_classes=17, pretrained=False, dropout=0.0):
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), num_classes=num_classes,
drop_rate=dropout)
if pretrained:
Expand Down
43 changes: 26 additions & 17 deletions planet_models/fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,29 +80,32 @@ def __init__(self, block, layers, input_channels=3, num_classes=17, dropout_rate
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 8*8*256
self.layer3 = self._make_layer(block, 256, layers[2], stride=2) # 4*4*512
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # 2*2*1024
# downsampling prediction
self.d_pool = nn.AdaptiveAvgPool2d(1)
self.cls_d = nn.Sequential(_make_linear_bn_elu(in_units=1024, output_units=512))

# upsampling
self.layer3_up = nn.Sequential(_make_conv_bn_elu(self.inplanes//2, self.inplanes, kernel_size=1, stride=1, padding=0)) # 4*4*1024
self.layer2_up = nn.Sequential(_make_conv_bn_elu(self.inplanes//4, self.inplanes//2, kernel_size=1, stride=1, padding=0)) # 8*8*512
self.layer1_up = nn.Sequential(_make_conv_bn_elu(self.inplanes//8, self.inplanes//4, kernel_size=1, stride=1, padding=0) ) # 16*16*256

# final feature generation
self.f1 = nn.Sequential(_make_conv_bn_elu(256, 256, kernel_size=3, stride=2)) # 8*8*256
self.f2 = nn.Sequential(_make_conv_bn_elu(512, 256, kernel_size=3, stride=2)) # 4*4*256
self.f3 = nn.Sequential(_make_conv_bn_elu(1024, 256)) # 4*4*256
self.f1 = nn.Sequential(_make_conv_bn_elu(256, 512, kernel_size=3, stride=2)) # 8*8*512
self.f2 = nn.Sequential(_make_conv_bn_elu(512, 512, kernel_size=3, stride=2)) # 4*4*512
self.f3 = nn.Sequential(_make_conv_bn_elu(1024, 512)) # 4*4*512

# reduce dimensionality before classifier 1*1*256
self.pool1 = nn.AdaptiveAvgPool2d(1)
self.pool2 = nn.AdaptiveAvgPool2d(1)
self.pool3 = nn.AdaptiveAvgPool2d(1)

# clasifier
self.cls_1 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))
self.cls_2 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))
self.cls_3 = nn.Sequential(_make_linear_bn_elu(256, 512, dropout_rate=self.dropout_rate))
self.cls_1 = nn.Sequential(_make_linear_bn_elu(512, 512, dropout_rate=self.dropout_rate))
self.cls_2 = nn.Sequential(_make_linear_bn_elu(512, 512, dropout_rate=self.dropout_rate))
self.cls_3 = nn.Sequential(_make_linear_bn_elu(512, 512, dropout_rate=self.dropout_rate))

# final prediction
self.fc = nn.Linear(512*3, num_classes)
# logit
self.fc = nn.Linear(512*4, num_classes)

for m in self.modules():
if isinstance(m, nn.Conv2d):
Expand Down Expand Up @@ -159,20 +162,26 @@ def forward(self, x):
f3 = self.f3(m3) # 2*2*256

# max pool
f1 = self.pool1(f1) # 256
f1 = self.pool1(f1) # 512
f1 = f1.view(f1.size(0), -1)
f2 = self.pool2(f2) # 256
f2 = self.pool2(f2) # 512
f2 = f2.view(f2.size(0), -1)
f3 = self.pool3(f3) # 256
f3 = self.pool3(f3) # 512
f3 = f3.view(f3.size(0), -1)
# downsampling classifier
d_out = self.d_pool(d4)
d_out = d_out.view(d_out.size(0), -1)
d_out = self.cls_d(d_out) # 512

# classifier
cls1 = self.cls_1(f1)
cls2 = self.cls_2(f2)
cls3 = self.cls_3(f3)
# concatenate
cls = torch.cat((cls1, cls2, cls3, ), dim=1) # 512 * 3
# logit
cls1 = self.cls_1(f1) # 512
cls2 = self.cls_2(f2) # 512
cls3 = self.cls_3(f3) # 512

cls = torch.cat((cls1, cls2, cls3, d_out), dim=1)

logit = self.fc(cls)

prob = F.sigmoid(logit)
return logit, prob

Expand Down
30 changes: 23 additions & 7 deletions trainers/train_densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torchvision.transforms import *
from planet_models.densenet_planet import densenet121, densenet169

NAME = 'pretrained_densenet169_wd_1e-4'
NAME = 'pretrained_densenet169_wd_5e-4_adam'


class RandomVerticalFLip(object):
Expand All @@ -14,6 +14,20 @@ def __call__(self, img):
return img


def lr_scheduler(epoch, optimizer):
if epoch <= 10:
lr = 5e-4
elif 10 < epoch <= 25:
lr =1e-4
elif 25 < epoch <=45:
lr = 9e-5
else:
lr = 5e-5

for param_group in optimizer.param_groups:
param_group['lr'] = lr


def get_optimizer(model, pretrained=True, lr=5e-5, weight_decay=5e-5):
if pretrained:
params = [
Expand All @@ -33,17 +47,19 @@ def train(epoch):
net = densenet169(pretrained=False)
logger = Logger('../log/', NAME)
# optimizer = optim.Adam(lr=5e-4, params=net.parameters())
optimizer = get_optimizer(net, False, 1e-4, 1e-4)
# optimizer = get_optimizer(net, False, 1e-4, 1e-4)
optimizer = optim.Adam(params=net.parameters(), lr=5e-4, weight_decay=5e-4)
net.cuda()
net = torch.nn.DataParallel(net, device_ids=[0, 1])
# resnet.load_state_dict(torch.load('../models/simplenet_v3.pth'))
train_data_set = train_jpg_loader(64, transform=Compose(
train_data_set = train_jpg_loader(72, transform=Compose(
[

Scale(256),
RandomHorizontalFlip(),
RandomVerticalFLip(),
RandomCrop(224),
RandomRotate(),
ToTensor(),
Normalize(mean, std)
]
Expand All @@ -59,7 +75,7 @@ def train(epoch):
patience = 0
for i in range(epoch):
# training
# lr_scheduler(optimizer, epoch)
lr_scheduler(epoch, optimizer)
training_loss = 0.0
for batch_index, (target_x, target_y) in enumerate(train_data_set):
if torch.cuda.is_available():
Expand Down Expand Up @@ -110,11 +126,11 @@ def train(epoch):
print('Evaluation loss is {}, Training loss is {}'.format(val_loss, training_loss))
print('F2 Score is %s' % (f2_scores))

logger.add_record('train_loss', loss.data[0])
logger.add_record('train_loss', training_loss)
logger.add_record('evaluation_loss', val_loss)
logger.add_record('f2_score', f2_scores)
logger.save()
logger.save_plot()
logger.save()
logger.save_plot()


if __name__ == '__main__':
Expand Down
16 changes: 8 additions & 8 deletions trainers/train_pynet.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torchvision.transforms import *
from planet_models.fpn import FPNet, Bottleneck

NAME = 'fpnet62_wd_1e-4_adam_rotate'
NAME = 'fpnet30_wd_5e-4_adam'


def get_optimizer(model, pretrained=True, lr=5e-5, weight_decay=5e-5):
Expand All @@ -18,21 +18,21 @@ def get_optimizer(model, pretrained=True, lr=5e-5, weight_decay=5e-5):

def lr_schedule(epoch, optimizer):
if epoch < 10:
lr = 6e-4
elif 10 <= epoch <= 20:
lr = 3e-4
elif 25 < epoch <= 45:
lr = 1e-4
elif 10 <= epoch <= 40:
lr = 9e-5
elif 40 < epoch <= 80:
lr = 6e-5
else:
lr = 5e-5
lr = 3e-5

for param_group in optimizer.param_groups:
param_group['lr'] = lr


def train(epoch):
criterion = MultiLabelSoftMarginLoss()
net = FPNet(Bottleneck, [3, 4, 6, 3], dropout_rate=0.4)
net = FPNet(Bottleneck, [2, 2, 4, 2], dropout_rate=0.15)
logger = Logger('../log/', NAME)
# optimizer = get_optimizer(net, False, 1e-4, 5e-4)
optimizer = optim.Adam(net.parameters(), lr=5e-4, weight_decay=5e-4)
Expand All @@ -44,7 +44,7 @@ def train(epoch):
Scale(78),
RandomHorizontalFlip(),
RandomVerticalFLip(),
# RandomRotate(),
RandomRotate(),
RandomCrop(72),
ToTensor(),
Normalize(mean, std)
Expand Down
10 changes: 0 additions & 10 deletions util.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,13 +110,3 @@ def save_plot(self):

plt.close('all')


if __name__ == '__main__':
import cv2
img = cv2.imread('dog.jpg')
img = cv2.resize(img, (256, 256))
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width / 2, height / 2), 45, 1.0)
img = cv2.warpAffine(img, mat, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
cv2.imshow('frame', img)
cv2.waitKey()

0 comments on commit 2002932

Please sign in to comment.