Skip to content

Commit

Permalink
Fix issue for openai evaluation.
Browse files Browse the repository at this point in the history
  • Loading branch information
JoelNiklaus committed Nov 25, 2024
1 parent f74afd4 commit 88a9838
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions src/lighteval/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,10 @@
from concurrent.futures import ThreadPoolExecutor
from typing import Optional

from transformers import AutoTokenizer

from tqdm import tqdm
from transformers import AutoTokenizer

from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset
from lighteval.data import GenerativeTaskDataset
from lighteval.logging.hierarchical_logger import hlog_warn
from lighteval.models.abstract_model import LightevalModel
from lighteval.models.endpoint_model import ModelInfo
Expand All @@ -49,10 +48,10 @@

if is_litellm_available():
import logging

import litellm
from litellm.caching.caching import Cache


logging.getLogger("litellm").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)

Expand Down Expand Up @@ -82,6 +81,7 @@ def __init__(self, config, env_config) -> None:
self.model = config.model
self._tokenizer = AutoTokenizer.from_pretrained("gpt2") # Use a dummy tokenizer for compatibility
self.pairwise_tokenization = False
litellm.drop_params = True

def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, logit_bias):
for _ in range(self.API_MAX_RETRY):
Expand Down

0 comments on commit 88a9838

Please sign in to comment.