Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Jun 17, 2024
1 parent 773bd1b commit 91cf74c
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion intel_extension_for_transformers/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def distill(
assert False, "Please provide teacher model for DistillationConfig."
self._eval_func = self.builtin_eval_func if eval_func is None else eval_func
self._train_func = self.builtin_train_func if train_func is None else train_func

compression_manager = prepare_compression(self.model, distillation_config)
self.compression_manager = compression_manager
self.compression_manager.callbacks.on_train_begin()
Expand Down
4 changes: 2 additions & 2 deletions tests/Nightly/test_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from neural_compressor.config import (
DistillationConfig,
KnowledgeDistillationLossConfig,
)
)
from intel_extension_for_transformers.transformers.trainer import NLPTrainer
from transformers import (
AutoModelForSequenceClassification,
Expand Down Expand Up @@ -76,7 +76,7 @@ def compute_metrics(p):
preds = np.argmax(preds, axis=1)
return metric.compute(predictions=preds, references=p.label_ids)
origin_weight = copy.deepcopy(self.model.classifier.weight)

self.trainer = NLPTrainer(
model=copy.deepcopy(self.model),
train_dataset=self.dataset,
Expand Down
4 changes: 2 additions & 2 deletions tests/Nightly/test_orchestrate_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ def compute_metrics(p):
name="eval_accuracy", is_relative=True, criterion=0.5
)
self.trainer.metrics = tune_metric
pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}],
target_sparsity=0.64,
pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}],
target_sparsity=0.64,
pruning_scope="local")
distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"])
distillation_conf = DistillationConfig(teacher_model=self.teacher_model, criterion=distillation_criterion)
Expand Down
2 changes: 1 addition & 1 deletion workflows/compression_aware_training/src/itrex_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ def _do_quantization_aware_training(self):
quantization_config = QuantizationAwareTrainingConfig(
tuning_criterion=tuning_criterion,
accuracy_criterion=accuracy_criterion
)
)
early_stopping_patience = 2
early_stopping_threshold = 0.001 # optional
self.trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \
Expand Down

0 comments on commit 91cf74c

Please sign in to comment.