Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit 8ba28b8

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 429424f commit 8ba28b8

File tree

1 file changed

+2
-2
lines changed
  • intel_extension_for_transformers/transformers/llm/evaluation/lm_eval

1 file changed

+2
-2
lines changed

intel_extension_for_transformers/transformers/llm/evaluation/lm_eval/evaluator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@ def evaluate(
538538
_higher_is_better[m] = h
539539
if m in _higher_is_better and _higher_is_better[m] is not None and _higher_is_better[m] != h:
540540
eval_logger.warning(
541-
f"Higher_is_better values for metric {m} in group {group} are not consistent." +
541+
f"Higher_is_better values for metric {m} in group {group} are not consistent." +
542542
f"Defaulting to None."
543543
)
544544
_higher_is_better[m] = None
@@ -571,7 +571,7 @@ def evaluate(
571571
# TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
572572
# To use the old (likely incorrect) variance formula,
573573
# comment out the above and uncomment this line:
574-
# results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs,
574+
# results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs,
575575
# sizes, metrics=metrics)
576576

577577
results[group]["samples"] = sum(sizes)

0 commit comments

Comments
 (0)