-
Notifications
You must be signed in to change notification settings - Fork 19
/
train.py
97 lines (63 loc) · 2.5 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import time
import tensorflow as tf
from tensorflow.keras import optimizers
from utils import *
from models import GCN, MLP
from config import args
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
print('tf version:', tf.__version__)
assert tf.__version__.startswith('2.')
# set random seed
seed = 123
np.random.seed(seed)
tf.random.set_seed(seed)
# load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(args.dataset)
print('adj:', adj.shape)
print('features:', features.shape)
print('y:', y_train.shape, y_val.shape, y_test.shape)
print('mask:', train_mask.shape, val_mask.shape, test_mask.shape)
# D^-1@X
features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433]
print('features coordinates::', features[0].shape)
print('features data::', features[1].shape)
print('features shape::', features[2])
if args.model == 'gcn':
# D^-0.5 A D^-0.5
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif args.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, args.max_degree)
num_supports = 1 + args.max_degree
model_func = GCN
elif args.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(args.model))
# Create model
model = GCN(input_dim=features[2][1], output_dim=y_train.shape[1], num_features_nonzero=features[1].shape) # [1433]
train_label = tf.convert_to_tensor(y_train)
train_mask = tf.convert_to_tensor(train_mask)
val_label = tf.convert_to_tensor(y_val)
val_mask = tf.convert_to_tensor(val_mask)
test_label = tf.convert_to_tensor(y_test)
test_mask = tf.convert_to_tensor(test_mask)
features = tf.SparseTensor(*features)
support = [tf.cast(tf.SparseTensor(*support[0]), dtype=tf.float32)]
num_features_nonzero = features.values.shape
dropout = args.dropout
optimizer = optimizers.Adam(lr=1e-2)
for epoch in range(args.epochs):
with tf.GradientTape() as tape:
loss, acc = model((features, train_label, train_mask,support))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
_, val_acc = model((features, val_label, val_mask, support), training=False)
if epoch % 20 == 0:
print(epoch, float(loss), float(acc), '\tval:', float(val_acc))
test_loss, test_acc = model((features, test_label, test_mask, support), training=False)
print('\ttest:', float(test_loss), float(test_acc))