From e89a9876ada1da511df0708955a63012739222b5 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Thu, 20 Feb 2025 21:57:24 +0000 Subject: [PATCH] Fix typo --- src/lighteval/models/sglang/sglang_model.py | 4 ++-- src/lighteval/models/vllm/vllm_model.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lighteval/models/sglang/sglang_model.py b/src/lighteval/models/sglang/sglang_model.py index 08346f32..2f758be6 100644 --- a/src/lighteval/models/sglang/sglang_model.py +++ b/src/lighteval/models/sglang/sglang_model.py @@ -216,14 +216,14 @@ def greedy_until( if max_new_tokens is not None: if context_size + max_new_tokens > self.max_length: logger.warning( - f"{context_size + max_new_tokens=} which is greather than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens." + f"{context_size + max_new_tokens=} which is greater than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens." ) context_size = self.max_length - max_new_tokens inputs = [input[-context_size:] for input in inputs] else: if context_size > self.max_length: logger.warning( - f"{context_size=} which is greather than {self.max_length=}. Truncating context to {self.max_length} tokens." + f"{context_size=} which is greater than {self.max_length=}. Truncating context to {self.max_length} tokens." ) context_size = self.max_length inputs = [input[-context_size:] for input in inputs] diff --git a/src/lighteval/models/vllm/vllm_model.py b/src/lighteval/models/vllm/vllm_model.py index c606c04e..f9898838 100644 --- a/src/lighteval/models/vllm/vllm_model.py +++ b/src/lighteval/models/vllm/vllm_model.py @@ -266,7 +266,7 @@ def greedy_until( if max_new_tokens is not None: if context_size + max_new_tokens > self.max_length: logger.warning( - f"{context_size + max_new_tokens=} which is greather than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens." + f"{context_size + max_new_tokens=} which is greater than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens." ) context_size = self.max_length - max_new_tokens if context_size < 0: @@ -278,7 +278,7 @@ def greedy_until( else: if context_size > self.max_length: logger.warning( - f"{context_size=} which is greather than {self.max_length=}. Truncating context to {self.max_length} tokens." + f"{context_size=} which is greater than {self.max_length=}. Truncating context to {self.max_length} tokens." ) context_size = self.max_length inputs = [input[-context_size:] for input in inputs]