-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
89 lines (73 loc) · 2.9 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import argparse
import mlflow
import numpy as np
import torch
from datasets import load_metric
import pickle
from pathlib import Path
import tempfile
from models import (
arXivModel,
Summarizer,
StyleEncoder,
)
import utils
def evaluate_gen(num_samples=20):
metric = load_metric("rouge", experiment_id=run.info.run_id)
samples = []
for batch_raw in data_test:
generated = model.generate(batch_raw, num_beams=args.num_beams)
metric.add_batch(predictions=generated, references=batch_raw['title'])
if len(samples) < num_samples:
for i in range(len(batch_raw['abstract'])):
samples.append({
'abstract': batch_raw['abstract'][i],
'title_actual': batch_raw['title'][i],
'title_generated': generated[i],
})
scores_all = metric.compute(use_agregator=False, rouge_types=['rouge1', 'rouge2', 'rouge3'])
scores_f1 = []
for i in range(len(scores_all['rouge1'])):
scores_f1.append((
scores_all['rouge1'][i].fmeasure,
scores_all['rouge2'][i].fmeasure,
scores_all['rouge3'][i].fmeasure,
))
scores_f1 = np.array(scores_f1) * 100.
mlflow.log_metrics({
'rouge1': scores_f1[:, 0].mean(axis=0),
'rouge2': scores_f1[:, 1].mean(axis=0),
'rouge3': scores_f1[:, 2].mean(axis=0),
})
with tempfile.TemporaryDirectory() as tempdir:
path = Path(tempdir, f'samples.txt')
with open(path, 'w') as f:
for sample, score in zip(samples, scores_f1):
f.write(f"<Abstract>\n{utils.wrap_text(sample['abstract'], 80)}\n")
f.write(f"<Actual title>\n{utils.wrap_text(sample['title_actual'], 80)}\n")
f.write(f"<Generated title>\n{utils.wrap_text(sample['title_generated'], 80)}\n")
f.write(f"Score: {score[0]:.2f}/{score[1]:.2f}/{score[2]:.2f}\n\n")
mlflow.log_artifact(path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('exp_name', type=str)
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_beams', type=int, default=4)
args = parser.parse_args()
with open('data/preprocessed.pkl', 'rb') as f:
data = pickle.load(f)
_, _, data_test = utils.split_data(data)
data_test = utils.arXivDataLoader(data_test, args.batch_size)
print("data loaded", flush=True)
model = arXivModel.from_checkpoint(args.checkpoint_path, device='cuda')
print("model loaded", flush=True)
mlflow.set_experiment(args.exp_name)
run = mlflow.start_run()
mlflow.log_params({
'checkpoint_path': args.checkpoint_path,
'batch_size': args.batch_size,
'num_beams': args.num_beams,
})
print("test start", flush=True)
evaluate_gen()