-
Notifications
You must be signed in to change notification settings - Fork 2
/
tagger.py
498 lines (429 loc) · 26.3 KB
/
tagger.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
"""
Entry point for training and evaluating a POS/morphological features tagger.
This tagger uses highway BiLSTM layers with character and word-level representations,
and biaffine classifiers to produce consistent POS and Feats predictions.
For details please refer to paper:
https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
# was stanza.models.tagger
import os
import time
import argparse
import logging
import random
import numpy as np
import torch
from data import DataLoader, unsort
from trainer import Trainer
from pretrain import Pretrain
from document import Document
from evaluator import POS_KEY
logger = logging.getLogger('stanza')
logger.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler()
log_formatter = logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
def set_random_seed(seed, cuda):
"""
Set a random seed on all of the things which might need it.
torch, np, python random, and torch.cuda
"""
if seed is None:
seed = random.randint(0, 1000000000)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
return seed
def ensure_dir(path):
if path is None:
return
d = os.path.split(path)[0]
if not os.path.exists(d):
logger.info("Directory {} does not exist, creating it...".format(d))
os.makedirs(d)
def get_adaptive_eval_interval(nb_train_batches, nb_dev_batches, min_interval=100, max_interval=None, multiplier=2):
if not max_interval:
# evaluate at least once per epoch
max_interval = nb_train_batches
if nb_dev_batches < 1:
return max_interval
if max_interval <= min_interval:
return min_interval
opt_interval = round(multiplier * nb_dev_batches / 100) * 100
if opt_interval < min_interval:
return min_interval
elif opt_interval > max_interval:
return max_interval
else:
return opt_interval
def get_adaptive_log_interval(batch_size, min_interval=10, max_interval=1000, avail_intervals=(10, 20, 50, 100, 200, 500, 1000), gpu=False):
available_intervals = [x for x in avail_intervals
if x >= min_interval and x <= max_interval]
# log 100 times less often if using gpu
gpu_multiplier = 100 if gpu else 1
opt_interval = 1000 * gpu_multiplier / max(batch_size, 50)
if opt_interval < min_interval:
return min_interval
elif opt_interval > max_interval:
return max_interval
else:
step_interval = [x for x in available_intervals if x <= opt_interval][-1]
return step_interval
def parse_args(args=None):
parser = argparse.ArgumentParser()
# data loading and saving
parser_paths = parser.add_argument_group('File paths')
parser_paths.add_argument("--training-data", nargs='+', type=str, default=[], help="Input training data file(s), a space-separated list of several file names can be given")
parser_paths.add_argument("--emb-data", type=str, default=None, help="File from which to read the pretrained embeddings (supported file types: .txt, .vec, .xz, .gz)")
parser_paths.add_argument('--emb-max-vocab', type=int, default=250000, help="Limit the pretrained embeddings to the first N entries (default: 250000)")
parser_paths.add_argument("--dev-data", type=str, default=None, help="Input development/validation data file")
parser_paths.add_argument("--dev-data-out", type=str, default=None, help="Output file for annotated development/validation data")
parser_paths.add_argument("--test-data", type=str, default=None, help="Input test data file")
parser_paths.add_argument("--test-data-out", type=str, default=None, help="Output file for annotated test data")
parser_paths.add_argument("--scores-out", default=None, help="TSV file in which training scores and statistics are saved (default: None)")
parser_paths.add_argument("--model", default=None, help="Binary file (.pt) containing the parameters of a trained model")
parser_paths.add_argument("--model-save", default=None, help="Binary file (.pt) in which the parameters of a trained model are saved")
parser_paths.add_argument("--embeddings", default=None, help="Binary file (.pt) containing the parameters of the pretrained embeddings")
parser_paths.add_argument("--embeddings-save", default=None, help="Binary file (.pt) in which the parameters of the pretrained embeddings are saved")
# data formatting (default options are fine for UD-formatted files)
parser_data = parser.add_argument_group('Data formatting and evaluation')
parser_data.add_argument("--number-index", type=int, help="Field in which the word numbers are stored (default: 0)", default=0)
parser_data.add_argument("--number-index-out", type=int, help="Field in which the word numbers are saved in the output file (default: 0). Use negative value to skip word numbers.", default=0)
parser_data.add_argument("--c-token-index", type=int, help="Field in which the tokens used for the character embeddings are stored (default: 1). Use negative value if character embeddings should be disabled.", default=1)
parser_data.add_argument("--c-token-index-out", type=int, help="Field in which the character embedding tokens are saved in the output file (default: 1). Use negative value to skip tokens.", default=1)
parser_data.add_argument("--w-token-index", type=int, help="Field in which the tokens used for the word embeddings are stored (default: 1). Use negative value if word embeddings should be disabled.", default=1)
parser_data.add_argument("--w-token-index-out", type=int, help="Field in which the tokens used for the word embeddings are saved in the output file (default: -1). Use negative value to skip tokens.", default=-1)
parser_data.add_argument("--w-token-min-freq", type=int, help="Minimum frequency starting from which word embeddings will be considered (default: 7)", default=7)
parser_data.add_argument("--pos-index", type=int, help="Field in which the main POS is stored (default [UPOS tags]: 3)", default=3)
parser_data.add_argument("--pos-index-out", type=int, help="Field in which the main POS is saved in the output file (default: 3)", default=3)
parser_data.add_argument("--morph-index", type=int, help="Field in which the morphology features are stored (default: 5). Use negative value if morphology features should not be considered", default=5)
parser_data.add_argument("--morph-index-out", type=int, help="Field in which the morphology features are saved in the output file (default: 5). Use negative value to skip features.", default=5)
parser_data.add_argument("--oov-index-out", type=int, default=-1, help="Field in which OOV information is saved in the output file (default: not written)")
parser_data.add_argument("--no-eval-feats", nargs='+', default=[], help="Space-separated list of morphological features that should be ignored during evaluation. Typically used for additional tasks in multitask settings.")
parser_data.add_argument("--mask-other-fields", dest="copy_untouched", action="store_false", help="Replaces fields in input that are not used by the tagger (e.g. lemmas, dependencies) with '_' instead of copying them.", default=True)
parser_data.add_argument('--augment-nopunct', nargs='?', type=float, const=0.1, default=None, help='Augment the training data by copying some amount of punct-ending sentences as non-punct (default: 0.1, corresponding to 10%%)')
parser_data.add_argument('--punct-tag', type=str, default='PUNCT', help="POS tag of sentence-final punctuation used for augmentation (default: PUNCT)")
parser_data.add_argument('--sample-train', type=float, default=1.0, help='Subsample training data to proportion of N (default: 1.0)')
parser_data.add_argument('--cut-dev', type=int, default=-1, help='Cut dev data to first N sentences (default: keep all)')
parser_data.add_argument("--debug", action="store_true", help="Debug mode. This is a shortcut for '--sample-train 0.05 --cut-dev 100 --batch-size -1'")
parser_net = parser.add_argument_group('Network architecture')
parser_net.add_argument('--word-emb-dim', type=int, default=75, help="Size of word embedding layer (default: 75). Use negative value to turn off word embeddings")
parser_net.add_argument('--char-emb-dim', type=int, default=100, help="Size of character embedding layer (default: 100). Use negative value to turn off character embeddings")
parser_net.add_argument('--transformed-emb-dim', dest="transformed_dim", type=int, default=125, help="Size of transformed output layer of character embeddings and pretrained embeddings (default: 125)")
parser_net.add_argument('--pos-emb-dim', type=int, default=50, help="Size of POS embeddings that are fed to predict the morphology features (default: 50). Use negative value to use shared, i.e. non-hierarchical representations for POS and morphology")
parser_net.add_argument('--char-hidden-dim', type=int, default=400, help="Size of character LSTM hidden layers (default: 400)")
parser_net.add_argument('--char-num-layers', type=int, default=1, help="Number of character LSTM layers (default: 1). Use 0 to disable character LSTM")
parser_net.add_argument('--char-unidir', dest='char_bidir', action='store_false', help="Uses a unidirectional LSTM for the character embeddings (default: bidirectional)")
parser_net.add_argument('--tag-hidden-dim', type=int, default=200, help="Size of tagger LSTM hidden layers (default: 200)")
parser_net.add_argument('--tag-num-layers', type=int, default=2, help="Number of tagger LSTM layers (default: 2)")
# TODO: merge two lines below, improve help
parser_net.add_argument('--deep-biaff-hidden-dim', type=int, default=400, help="Size of biaffine hidden layers (default: 400)")
parser_net.add_argument('--composite-deep-biaff-hidden-dim', type=int, default=100, help="Size of composite biaffine hidden layers (default: 100)")
parser_net.add_argument('--dropout', type=float, default=0.5, help="Input dropout (default: 0.5)")
parser_net.add_argument('--char-rec-dropout', type=float, default=0, help="Recurrent dropout for character LSTM (default: 0). Should only be used with more than one layer")
parser_net.add_argument('--tag-rec-dropout', type=float, default=0, help="Recurrent dropout for the tagger LSTM (default: 0). Should only be used with more than one layer")
parser_net.add_argument('--word-dropout', type=float, default=0.33, help="Word dropout (default: 0.33)")
parser_train = parser.add_argument_group('Training and optimization')
parser_train.add_argument('--batch-size', type=int, default=5000, help='Batch size in tokens (default: 5000). Use negative value to use single sentences')
parser_train.add_argument('--max-steps', type=int, default=50000, help="Maximum training steps (default: 50000)")
parser_train.add_argument('--max-steps-before-stop', type=int, default=3000, help='Changes learning method or early terminates after N steps if the dev scores are not improving (default: 3000). Use negative value to disable early stopping')
parser_train.add_argument('--log-interval', type=int, default=None, help='Print log every N steps. The default value is determined on the basis of batch size and CPU/GPU mode.')
parser_train.add_argument('--eval-interval', type=int, default=None, help="Evaluate on dev set every N steps. The default value is determined on the basis of the training and dev data sizes.")
parser_train.add_argument('--learning-rate', dest="lr", type=float, default=3e-3, help='Learning rate (default: 3e-3)')
parser_train.add_argument('--optimizer', dest="optim", type=str, choices=['sgd', 'adagrad', 'adam', 'adamax'], default='adam', help='Optimization algorithm (default: adam)')
parser_train.add_argument('--beta2', type=float, default=0.95, help="Beta2 value required for adam optimizer (default: 0.95)")
parser_train.add_argument('--max-grad-norm', type=float, default=1.0, help='Gradient clipping (default: 1.0)')
# Other arguments
parser.add_argument('--seed', type=int, default=None, help="Set the random seed")
parser.add_argument('--cpu', action='store_true', help='Force CPU even if GPU is available')
# TODO:
# parser.add_argument("--add-probs", action="store_true", help="Write prediction probabilities to output files")
# - default directory + default output names
args = parser.parse_args(args=args)
return args
def validate_args(args):
if args.c_token_index < 0 or args.char_num_layers < 1 or args.char_emb_dim < 1:
logger.info("Disable character embeddings")
args.c_token_index = -1
args.c_token_index_out = -1
args.char_num_layers = 0
args.char_emb_dim = 0
if args.w_token_index < 0:
logger.info("Disable word embeddings")
args.w_token_index = -1
args.w_token_index_out = -1
args.word_emb_dim = 0
logger.info("Disable pretrained embeddings")
args.emb_data = None
args.embeddings = None
args.embeddings_save = None
elif args.emb_data is None and args.embeddings is None:
if args.word_emb_dim < 1:
logger.info("Disable word embeddings")
args.w_token_index = -1
args.w_token_index_out = -1
logger.info("Disable pretrained embeddings")
args.emb_data = None
args.embeddings = None
args.embeddings_save = None
if args.c_token_index < 0 and args.w_token_index < 0:
raise RuntimeError("Cannot use tagger without any token information.")
if args.training_data and args.pos_index < 0:
raise RuntimeError("POS tag field is required for training the tagger.")
if args.debug:
logger.info("Debug mode: reduce train and dev set")
args.sample_train = 0.05
args.cut_dev = 100
args.batch_size = -1
if args.training_data and args.model:
logger.info("Retraining existing model {} with new training data {}".format(args.model, ",".join(args.training_data)))
logger.info("Parameters of the 'Network architecture' section will be ignored")
if args.model and args.emb_data:
raise RuntimeError("Cannot load embeddings in text format with --emb-data together with an existing model. Load the embeddings in binary (.pt) format with --embeddings.")
if not args.dev_data:
logger.info("Disable early stopping")
args.max_steps_before_stop = -1
if args.model_save and args.emb_data and (args.embeddings_save is None):
logger.warning("Pre-trained embeddings must be saved as a .pt file!")
args.embeddings_save = args.model_save.replace(".pt", ".emb.pt")
logger.warning("Saving them as {}".format(args.embeddings_save))
ensure_dir(args.model_save)
ensure_dir(args.embeddings_save)
ensure_dir(args.dev_data_out)
ensure_dir(args.test_data_out)
ensure_dir(args.scores_out)
def get_read_format_args(args):
return {"id": args.number_index,
"cform": args.c_token_index, "wform": args.w_token_index,
"pos": args.pos_index, "feats": args.morph_index}
def get_write_format_args(args):
return {"id": args.number_index_out, "unk": args.oov_index_out,
"cform": args.c_token_index_out, "wform": args.w_token_index_out,
"pos": args.pos_index_out, "feats": args.morph_index_out}
def main(args=None):
args = parse_args(args=args)
validate_args(args)
use_cuda = (not args.cpu) and torch.cuda.is_available()
logger.info("Selected device: {}".format("gpu" if use_cuda else "cpu"))
seed = set_random_seed(args.seed, use_cuda)
logger.info("Random seed: {}".format(seed))
trainer, pretrained = None, None
if args.training_data:
logger.info("Running tagger in training mode...")
trainer, pretrained = train(args, use_cuda)
if args.test_data:
logger.info("Running tagger in prediction mode...")
predict(args, trainer, pretrained, use_cuda)
def display_results(doc, no_eval_feats, per_feature=False, report_oov=True):
feats_eval, oov_eval, exact_eval = doc.evaluate()
if feats_eval.instance_count == 0:
return 0.0
s = "Evaluation:"
if report_oov:
s += "\nOOV rate: {:.2f}%".format(100*oov_eval.instance_count / feats_eval.instance_count)
s += "\n All MicroF1 OOV MicroF1"
s += "\nPOS+FEATS {:.2f}% {:.2f}%".format(
100*feats_eval.micro_f1(excl=no_eval_feats),
100*oov_eval.micro_f1(excl=no_eval_feats)
)
s += "\nPOS {:.2f}% {:.2f}%".format(
100*feats_eval.acc(att=POS_KEY),
100*oov_eval.acc(att=POS_KEY)
)
s += "\nFEATS {:.2f}% {:.2f}%".format(
100*feats_eval.micro_f1(excl=[POS_KEY]+no_eval_feats),
100*oov_eval.micro_f1(excl=[POS_KEY]+no_eval_feats)
)
s += "\nUFEATS {:.2f}% (exact match)".format(100*exact_eval.acc())
else:
s += "\n All MicroF1"
s += "\nPOS+FEATS {:.2f}%".format(
100*feats_eval.micro_f1(excl=no_eval_feats))
s += "\nPOS {:.2f}%".format(
100*feats_eval.acc(att=POS_KEY))
s += "\nFEATS {:.2f}%".format(
100*feats_eval.micro_f1(excl=[POS_KEY]+no_eval_feats))
s += "\nUFEATS {:.2f}% (exact match)".format(100*exact_eval.acc())
if per_feature:
maxfeatlen = max([len(x) for x in feats_eval.keys()])
if report_oov:
s += "\n\n{feat: <{fill}} All MicroF1 OOV MicroF1".format(feat="Feature", fill=maxfeatlen)
else:
s += "\n\n{feat: <{fill}} All MicroF1".format(feat="Feature", fill=maxfeatlen)
for key in sorted(feats_eval.keys()):
if key == POS_KEY:
continue
if report_oov:
s += "\n{feat: <{fill}} {all:.2f}% {oov:.2f}%".format(
feat=key, fill=maxfeatlen,
all=100*feats_eval.acc(att=key),
oov=100*oov_eval.acc(att=key)
)
else:
s += "\n{feat: <{fill}} {all:.2f}%".format(
feat=key, fill=maxfeatlen,
all=100*feats_eval.acc(att=key)
)
logger.info(s)
return feats_eval.micro_f1(excl=no_eval_feats)
# return exact_eval.acc() # for backwards compatibility
def train(args, use_cuda=False):
logger.info("Loading training data...")
train_doc = Document(from_file=args.training_data, read_positions=get_read_format_args(args), sample_ratio=args.sample_train)
if args.augment_nopunct:
train_doc.augment_punct(args.augment_nopunct, args.punct_tag)
# continue training existing model
if args.model:
pretrained = None
if args.embeddings:
pretrained = Pretrain(from_pt=args.embeddings)
if args.embeddings_save:
pretrained.save_to_pt(args.embeddings_save)
logger.info("Loading model from {}".format(args.model))
trainer = Trainer(model_file=args.model, pretrain=pretrained, args=vars(args), use_cuda=use_cuda)
train_data = DataLoader(train_doc, args.batch_size, vocab=trainer.vocab, pretrain=pretrained, evaluation=False)
# create new model from scratch and start training
else:
pretrained = None
if args.embeddings:
pretrained = Pretrain(from_pt=args.embeddings)
elif args.emb_data:
pretrained = Pretrain(from_text=args.emb_data, max_vocab=args.emb_max_vocab)
if pretrained and args.embeddings_save:
pretrained.save_to_pt(args.embeddings_save)
logger.info("Creating new model...")
train_data = DataLoader(train_doc, args.batch_size, vocab=None, pretrain=pretrained, evaluation=False, word_cutoff=args.w_token_min_freq)
trainer = Trainer(vocab=train_data.vocab, pretrain=pretrained, args=vars(args), use_cuda=use_cuda)
if len(train_data) == 0:
raise RuntimeError("Cannot start training because no training data is available")
if args.dev_data:
logger.info("Loading development data...")
dev_doc = Document(from_file=args.dev_data, read_positions=get_read_format_args(args), write_positions=get_write_format_args(args), copy_untouched=args.copy_untouched, cut_first=args.cut_dev)
dev_data = DataLoader(dev_doc, args.batch_size, vocab=trainer.vocab, pretrain=pretrained, evaluation=True)
else:
dev_doc = None
dev_data = []
if not args.eval_interval:
args.eval_interval = get_adaptive_eval_interval(len(train_data), len(dev_data))
if len(dev_data) > 0:
logger.info("Evaluating the model every {} steps".format(args.eval_interval))
else:
logger.info("No dev data given, not evaluating the model")
if not args.log_interval:
args.log_interval = get_adaptive_log_interval(args.batch_size, max_interval=args.eval_interval, gpu=use_cuda)
logger.info("Showing log every {} steps".format(args.log_interval))
if args.scores_out:
scores_file = open(args.scores_out, "w")
scores_file.write("Step\tEpoch\tTrainLoss\tDevLoss\tDevScore\tNewBest\n")
scores_file.flush()
else:
scores_file = None
global_step = 0
epoch = 0
dev_score_history = []
last_best_step = 0
max_steps = args.max_steps
current_lr = args.lr
global_start_time = time.time()
format_str = 'Finished step {}/{}, loss = {:.6f}, {:.3f} sec/batch, lr: {:.6f}'
# start training
logger.info("Start training...")
using_amsgrad = False
train_loss = 0
while True:
epoch += 1
epoch_start_time = time.time()
do_break = False
for batch in train_data:
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args.log_interval == 0:
duration = time.time() - start_time
logger.info(format_str.format(global_step, max_steps, loss, duration, current_lr))
if global_step % args.eval_interval == 0:
new_best = ""
dev_loss = 0.0
dev_score = 0.0
if len(dev_data) > 0:
logger.info("Evaluating on dev set...")
dev_preds = []
dev_loss = 0.0
for dev_batch in dev_data:
preds, loss = trainer.predict(dev_batch)
dev_preds += preds
dev_loss += float(loss)
dev_preds = unsort(dev_preds, dev_data.data_orig_idx)
dev_loss = dev_loss / len(dev_data)
dev_doc.add_predictions(dev_preds)
dev_doc.write_to_file(args.dev_data_out)
dev_score = display_results(dev_doc, args.no_eval_feats, report_oov=args.w_token_index >= 0)
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
logger.info("New best model found")
new_best = "*"
last_best_step = global_step
if args.model_save:
trainer.save(args.model_save)
dev_score_history += [dev_score]
train_loss = train_loss / args.eval_interval # avg loss per batch
logger.info("Step {}/{}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_score = {:.4f}".format(global_step, max_steps, train_loss, dev_loss, dev_score))
if scores_file:
scores_file.write("{}\t{}\t{:.6f}\t{:.6f}\t{:.4f}\t{}\n".format(global_step, epoch, train_loss, dev_loss, dev_score, new_best))
scores_file.flush()
train_loss = 0
if args.max_steps_before_stop > 0 and global_step - last_best_step >= args.max_steps_before_stop:
if args.optim == 'adam' and not using_amsgrad:
logger.info("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.set_optimizer('amsgrad', lr=args.lr, betas=(.9, args.beta2), eps=1e-6)
else:
logger.info("Early stopping: dev_score has not improved in {} steps".format(args.max_steps_before_stop))
do_break = True
break
if global_step >= args.max_steps:
do_break = True
break
if do_break: break
epoch_duration = time.time() - epoch_start_time
logger.info("Finished epoch {} after step {}, {:.3f} sec/epoch".format(epoch, global_step, epoch_duration))
train_data.reshuffle()
logger.info("Training ended with {} steps in epoch {}".format(global_step, epoch))
if len(dev_score_history) > 0:
best_score, best_step = max(dev_score_history), np.argmax(dev_score_history)+1
logger.info("Best dev score = {:.2f} at step {}".format(best_score*100, best_step * args.eval_interval))
elif args.model_save:
logger.info("Dev set never evaluated, saving final model")
trainer.save(args.model_save)
return trainer, pretrained
def predict(args, trainer=None, pretrained=None, use_cuda=False):
# load pretrained embeddings and model
if not trainer:
# load pretrained embeddings
pretrained = Pretrain(from_pt=args.embeddings)
# load model
logger.info("Loading model from {}".format(args.model))
trainer = Trainer(model_file=args.model, pretrain=pretrained, use_cuda=use_cuda)
# load data
logger.info("Loading prediction data...")
doc = Document(from_file=args.test_data, read_positions=get_read_format_args(args), write_positions=get_write_format_args(args), copy_untouched=args.copy_untouched)
data = DataLoader(doc, args.batch_size, vocab=trainer.vocab, pretrain=pretrained, evaluation=True)
if len(data) == 0:
raise RuntimeError("Cannot start prediction because no data is available")
logger.info("Start prediction...")
preds = []
for batch in data:
preds += trainer.predict(batch)[0] # don't keep loss
preds = unsort(preds, data.data_orig_idx)
# write to file and score
doc.add_predictions(preds)
doc.write_to_file(args.test_data_out)
display_results(doc, args.no_eval_feats, per_feature=True, report_oov=args.w_token_index >= 0)
if __name__ == '__main__':
main()