-
Notifications
You must be signed in to change notification settings - Fork 13
/
train_regression.py
39 lines (32 loc) · 1.3 KB
/
train_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
import torch.nn as nn
import torch.optim as optim
import configs
from data.qmul_loader import get_batch, train_people, test_people
from io_utils import parse_args_regression, get_resume_file
from methods.DKT_regression import DKT
from methods.feature_transfer_regression import FeatureTransfer
import backbone
import os
import numpy as np
params = parse_args_regression('train_regression')
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params.checkpoint_dir = '%scheckpoints/%s/' % (configs.save_dir, params.dataset)
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
params.checkpoint_dir = '%scheckpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
bb = backbone.Conv3().cuda()
if params.method=='DKT':
model = DKT(bb).cuda()
elif params.method=='transfer':
model = FeatureTransfer(bb).cuda()
else:
ValueError('Unrecognised method')
optimizer = torch.optim.Adam([{'params': model.model.parameters(), 'lr': 0.001},
{'params': model.feature_extractor.parameters(), 'lr': 0.001}])
for epoch in range(params.stop_epoch):
model.train_loop(epoch, optimizer)
model.save_checkpoint(params.checkpoint_dir)