diff --git a/lmms_eval/api/samplers.py b/lmms_eval/api/samplers.py index 6683bba96..e1a3dda21 100644 --- a/lmms_eval/api/samplers.py +++ b/lmms_eval/api/samplers.py @@ -118,7 +118,7 @@ def add_question(self, doc, data_frame=None, index=None): if visual: self.contexts.append(visual) self.contexts.append(QAPairs(question)) - self.contexts.append(self.target_delimiter) + # self.contexts.append(self.target_delimiter) def get_text(self, *, image_tokens="", lazy=True): texts = [] diff --git a/lmms_eval/api/task.py b/lmms_eval/api/task.py index cc3599622..0631fed9d 100644 --- a/lmms_eval/api/task.py +++ b/lmms_eval/api/task.py @@ -791,7 +791,7 @@ def fewshot_context(self, doc_id, num_fewshot, split): labeled_examples = Context(self, self.config.fewshot_delimiter, self.config.target_delimiter, self.config.description) if num_fewshot != 0: labeled_examples.extend(self.sampler.get_context(doc, num_fewshot)) - labeled_examples.add_question(doc) + labeled_examples.add_question(doc, self.test_docs(), doc_id) return labeled_examples def apply_filters(self): diff --git a/lmms_eval/models/llava.py b/lmms_eval/models/llava.py index ea5e7d03e..a830fd8d9 100644 --- a/lmms_eval/models/llava.py +++ b/lmms_eval/models/llava.py @@ -349,6 +349,7 @@ def _collate(x): answer = obj.answer conv.append_message(conv.roles[0], question) conv.append_message(conv.roles[1], answer) + num_image_tokens = 0 # conv.append_message(conv.roles[0], question) # conv.append_message(conv.roles[1], None) @@ -358,14 +359,17 @@ def _collate(x): # The above for loop has bugs. When there is no visuals, e.g. pure text, # there will be no for loop execute resulting in an empty question_input (because no visuals) # Scenario 1 won't even be execute - if len(flattened_visuals) == 0: - for context in contexts: - question = context - conv = conv_templates[self.conv_template].copy() - conv.append_message(conv.roles[0], question) - conv.append_message(conv.roles[1], None) - prompt_question = conv.get_prompt() - question_input.append(prompt_question) + # if len(flattened_visuals) == 0: + # for context in contexts: + # question = context + # conv = conv_templates[self.conv_template].copy() + # conv.append_message(conv.roles[0], question) + # conv.append_message(conv.roles[1], None) + # try: + # prompt_question = conv.get_prompt() + # except Exception as e: + # pass + # question_input.append(prompt_question) # input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device) # preconfigure gen_kwargs with defaults