-
Notifications
You must be signed in to change notification settings - Fork 50
/
train_graph_prognosis.py
77 lines (60 loc) · 3.62 KB
/
train_graph_prognosis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/python
# -*- coding:utf-8 -*-
import argparse
import os
from datetime import datetime
from utils.logger import setlogger
import logging
from utils.train_utils_graph import train_utils
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train')
# basic parameters
parser.add_argument('--model_name', type=str, default='GAT', help='the name of the model')
parser.add_argument('--data_name', type=str, default='CMAPSS_graph', help='the name of the data')
parser.add_argument('--data_file', type=str, default='FD001', help='the file of the data')
parser.add_argument('--data_dir', type=str, default='./data/CMAPSS/', help='the directory of the data')
parser.add_argument('--monitor_acc', type=str, default='RUL', help='the performance score')
parser.add_argument('--cuda_device', type=str, default='0', help='assign device')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint',
help='the directory to save the model')
parser.add_argument('--batch_size', type=int, default=256, help='batchsize of the training process')
parser.add_argument('--num_workers', type=int, default=0, help='the number of training process')
# Define the tasks
parser.add_argument('--task', choices=[ 'Graph'], type=str,
default='Graph', help='Graph regression only.')
parser.add_argument('--pooltype', choices=['TopKPool', 'EdgePool', 'ASAPool', 'SAGPool'],type=str,
default='TopKPool', help='For the Graph classification task')
# optimization information
parser.add_argument('--layer_num_last', type=int, default=0, help='the number of last layers which unfreeze')
parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam', help='the optimizer')
parser.add_argument('--lr', type=float, default=0.001, help='the initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='the momentum for sgd')
parser.add_argument('--weight_decay', type=float, default=1e-5, help='the weight decay')
parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix', 'cos'], default='step',
help='the learning rate schedule')
parser.add_argument('--gamma', type=float, default=0.1, help='learning rate scheduler parameter for step and exp')
parser.add_argument('--steps', type=str, default='120, 160', help='the learning rate decay for step and stepLR')
# save, load and display information
parser.add_argument('--resume', type=str, default='', help='the directory of the resume training model')
parser.add_argument('--max_model_num', type=int, default=1, help='the number of most recent models to save')
parser.add_argument('--max_epoch', type=int, default=180, help='max number of epoch')
parser.add_argument('--print_step', type=int, default=50, help='the interval of log training information')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_device.strip()
# Prepare the saving path for the model
sub_dir = args.model_name + '_' + args.pooltype+ '_'+ datetime.strftime(datetime.now(), '%m%d-%H%M%S')
save_dir = os.path.join(args.checkpoint_dir, sub_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# set the logger
setlogger(os.path.join(save_dir, 'train.log'))
# save the args
for k, v in args.__dict__.items():
logging.info("{}: {}".format(k, v))
trainer = train_utils(args, save_dir)
trainer.setup()
trainer.train()