From 8ba28b80d6e99bc45abc106e32bee0e6470d2677 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 03:28:21 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../transformers/llm/evaluation/lm_eval/evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/evaluator.py b/intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/evaluator.py index 8ced6a9abdf..fe4709af467 100644 --- a/intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/evaluator.py +++ b/intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/evaluator.py @@ -538,7 +538,7 @@ def evaluate( _higher_is_better[m] = h if m in _higher_is_better and _higher_is_better[m] is not None and _higher_is_better[m] != h: eval_logger.warning( - f"Higher_is_better values for metric {m} in group {group} are not consistent." + + f"Higher_is_better values for metric {m} in group {group} are not consistent." + f"Defaulting to None." ) _higher_is_better[m] = None @@ -571,7 +571,7 @@ def evaluate( # TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility # To use the old (likely incorrect) variance formula, # comment out the above and uncomment this line: - # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, + # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, # sizes, metrics=metrics) results[group]["samples"] = sum(sizes)