forked from PaddlePaddle/PaddleNLP
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
224 lines (193 loc) Β· 9.11 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import paddle
from paddle import nn
from rouge import Rouge
from paddlenlp.metrics import BLEU
from paddlenlp.trainer import Seq2SeqTrainer
from paddlenlp.utils.log import logger
def convert_example(example, text_column, summary_column, tokenizer, max_source_length, max_target_length):
"""
Convert a example into necessary features.
"""
inputs = example[text_column]
targets = example[summary_column]
model_inputs = tokenizer(
inputs, max_length=max_source_length, padding=False, truncation=True, return_attention_mask=True
)
labels = tokenizer(targets, max_length=max_target_length, padding=False, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def compute_metrics(preds, targets):
assert len(preds) == len(targets), (
"The length of pred_responses should be equal to the length of "
"target_responses. But received {} and {}.".format(len(preds), len(targets))
)
rouge = Rouge()
bleu4 = BLEU(n_size=4)
scores = []
for pred, target in zip(preds, targets):
try:
score = rouge.get_scores(" ".join(pred), " ".join(target))
scores.append([score[0]["rouge-1"]["f"], score[0]["rouge-2"]["f"], score[0]["rouge-l"]["f"]])
except ValueError:
scores.append([0, 0, 0])
bleu4.add_inst(pred, [target])
rouge1 = np.mean([i[0] for i in scores])
rouge2 = np.mean([i[1] for i in scores])
rougel = np.mean([i[2] for i in scores])
print("\n" + "*" * 15)
print("The auto evaluation result is:")
print("rouge-1:", round(rouge1, 4))
print("rouge-2:", round(rouge2, 4))
print("rouge-L:", round(rougel, 4))
print("BLEU-4:", round(bleu4.score(), 4))
return rougel
@contextlib.contextmanager
def main_process_first(desc="work"):
if paddle.distributed.get_world_size() > 1:
rank = paddle.distributed.get_rank()
is_main_process = rank == 0
main_process_desc = "main local process"
try:
if not is_main_process:
# tell all replicas to wait
logger.debug(f"{rank}: waiting for the {main_process_desc} to perform {desc}")
paddle.distributed.barrier()
yield
finally:
if is_main_process:
# the wait is over
logger.debug(f"{rank}: {main_process_desc} completed {desc}, releasing all replicas")
paddle.distributed.barrier()
else:
yield
class PegasusTrainer(Seq2SeqTrainer):
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.criterion is not None:
if "labels" in inputs:
labels = inputs.pop("labels")
elif "start_positions" in inputs and "end_positions" in inputs:
labels = (inputs.pop("start_positions"), inputs.pop("end_positions"))
elif self.args.label_names is not None:
labels = []
for label in self.label_names:
labels.append(inputs.pop(label))
labels = tuple(labels)
elif "generator_labels" in inputs:
labels = inputs["generator_labels"]
else:
labels = None
outputs = model(**inputs)
if self.criterion is not None:
loss = self.criterion(outputs, labels)
outputs = (loss, outputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[2]
return (loss, outputs) if return_outputs else loss
def prediction_step(
self,
model: nn.Layer,
inputs: Dict[str, Union[paddle.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[paddle.Tensor], Optional[paddle.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Layer`):
The model to evaluate.
inputs (`Dict[str, Union[paddle.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[paddle.Tensor], Optional[paddle.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
gen_kwargs = self._gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.model.config.max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams
)
if "attention_mask" in inputs:
gen_kwargs["attention_mask"] = inputs.get("attention_mask", None)
if "global_attention_mask" in inputs:
gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None)
# prepare generation inputs
# some encoder-decoder models can have varying encoder's and thus
# varying model input names
if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = inputs[self.model.encoder.main_input_name]
else:
generation_inputs = inputs[self.model.main_input_name]
generated_tokens = self.model.generate(
generation_inputs,
**gen_kwargs,
)
# different from hf returns: tuple[Tensor]: It is a tuple contains two elements: ids and scores.
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
# in case the batch is shorter than max length, the output should be padded
if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1)
with paddle.no_grad():
if has_labels:
with self.autocast_smart_context_manager():
outputs = model(**inputs)
if self.label_smoother is not None:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
# pegasus output is lm_logits, new_cache, masked_lm_loss
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[2]).mean().detach()
else:
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
if has_labels:
labels = inputs["labels"]
if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1))
else:
labels = None
return (loss, generated_tokens, labels)