From ed381736730d8fb785b4ee919fdb751734ecef25 Mon Sep 17 00:00:00 2001 From: kcz358 Date: Mon, 1 Jul 2024 06:06:38 +0000 Subject: [PATCH 1/5] Add wild vision 0617 --- .../wild_vision_bench/_default_template_yaml | 23 +++ lmms_eval/tasks/wild_vision_bench/utils.py | 168 ++++++++++++++++++ .../wild_vision_bench0617.yaml | 9 + 3 files changed, 200 insertions(+) create mode 100644 lmms_eval/tasks/wild_vision_bench/_default_template_yaml create mode 100644 lmms_eval/tasks/wild_vision_bench/utils.py create mode 100644 lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml diff --git a/lmms_eval/tasks/wild_vision_bench/_default_template_yaml b/lmms_eval/tasks/wild_vision_bench/_default_template_yaml new file mode 100644 index 00000000..7ce709dc --- /dev/null +++ b/lmms_eval/tasks/wild_vision_bench/_default_template_yaml @@ -0,0 +1,23 @@ +dataset_path: WildVision/wildvision-arena-data +dataset_kwargs: + token: True +output_type: generate_until +doc_to_visual: !function utils.wild_vision_doc_to_visual +doc_to_text: !function utils.wild_vision_doc_to_text +doc_to_target: !function utils.wild_vision_doc_to_target +generation_kwargs: + max_new_tokens: 4096 + temperature: 0 + top_p: 1.0 + num_beams: 1 + do_sample: false +# The return value of process_results will be used by metrics +process_results: !function utils.wild_vision_process_results +# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results +metric_list: + - metric: gpt_eval_score + aggregation: !function utils.wild_vision_aggregation + higher_is_better: true +metadata: + judge_model: gpt-4o + baseline_model: claude-3-sonnet-20240229 diff --git a/lmms_eval/tasks/wild_vision_bench/utils.py b/lmms_eval/tasks/wild_vision_bench/utils.py new file mode 100644 index 00000000..747a4bb6 --- /dev/null +++ b/lmms_eval/tasks/wild_vision_bench/utils.py @@ -0,0 +1,168 @@ +import json + +import os +import requests +import numpy as np +import time +import yaml +from pathlib import Path +from copy import deepcopy +from io import BytesIO +import base64 + +from loguru import logger as eval_logger + +NUM_SECONDS_TO_SLEEP = 5 + + +with open(Path(__file__).parent / "_default_template_yaml", "r") as f: + raw_data = f.readlines() + safe_data = [] + for i, line in enumerate(raw_data): + # remove function definition since yaml load cannot handle it + if "!function" not in line: + safe_data.append(line) + + config = yaml.safe_load("".join(safe_data)) + +GPT_EVAL_MODEL_NAME = config["metadata"]["judge_model"] +BASELINE_MODEL_NAME = config["metadata"]["baseline_model"] + +API_TYPE = os.getenv("API_TYPE", "openai") + +if API_TYPE == "openai": + API_URL = os.getenv("OPENAI_API_URL", "https://api.openai.com/v1/chat/completions") + API_KEY = os.getenv("OPENAI_API_KEY", "YOUR_API_KEY") + headers = { + "Authorization": f"Bearer {API_KEY}", + "Content-Type": "application/json", + } +elif API_TYPE == "azure": + API_URL = os.getenv("AZURE_ENDPOINT", "https://api.cognitive.microsoft.com/sts/v1.0/issueToken") + API_KEY = os.getenv("AZURE_API_KEY", "YOUR_API_KEY") + headers = { + "api-key": API_KEY, + "Content-Type": "application/json", + } + +system_prompt = """\ +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better. + +Begin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers. + +When evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information. + +Then consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive. + +Then consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt. + +After providing your explanation, you must output only one of the following choices as your final verdict with a label: + +1. Assistant A is significantly better: [[A>>B]] +2. Assistant A is slightly better: [[A>B]] +3. Tie, relatively the same: [[A=B]] +4. Assistant B is slightly better: [[B>A]] +5. Assistant B is significantly better: [[B>>A]] + +Example output: "My final verdict is tie: [[A=B]]".\ +""" + +prompt_template = "<|User Prompt|>\n{question_1}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>" + +def get_chat_response(base64_image, prompt, max_retries=5, wait_time=10): + headers = { + "Authorization": f"Bearer {API_KEY}", + "Content-Type": "application/json", + } + + payload = { + "model": GPT_EVAL_MODEL_NAME, + "messages": [ + {"role": "system", "content": system_prompt}, + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": f"data:image/jpeg;base64,{base64_image}", + }, + ], + } + ], + "max_tokens": 1024, + "temperature": 0.0, + } + + for attempt in range(max_retries): + try: + response = requests.post(API_URL, headers=headers, json=payload, timeout=60) + response.raise_for_status() + response_data = response.json() + return response_data["choices"][0]["message"]["content"], GPT_EVAL_MODEL_NAME + except requests.exceptions.RequestException as e: + eval_logger.warning(f"Request failed on attempt {attempt+1}: {e}") + time.sleep(wait_time) + if attempt == max_retries - 1: + eval_logger.error(f"Failed to get response after {max_retries} attempts") + return "", GPT_EVAL_MODEL_NAME + except Exception as e: + eval_logger.error(f"Error on attempt {attempt+1}: {e}") + return "", GPT_EVAL_MODEL_NAME + + +def image_to_base64(pil_image): + buffered = BytesIO() + pil_image.save(buffered, format="PNG") + return base64.b64encode(buffered.getvalue()).decode("utf-8") + +def get_score(judgement, pattern, pairwise=True): + matches = pattern.findall(judgement) + matches = [m for m in matches if m != ""] + if len(set(matches)) == 0: + return None, True + elif len(set(matches)) == 1: + if pairwise: + return matches[0].strip("\n"), False + return int(matches[0]) + else: + return None, False + +def wild_vision_doc_to_visual(doc): + return [doc["image"].convert('RGB')] + + +def wild_vision_doc_to_text(doc, model_specific_prompt_kwargs=None): + question = doc["instruction"].strip() + if "pre_prompt" in model_specific_prompt_kwargs and model_specific_prompt_kwargs["pre_prompt"] != "": + question = f"{model_specific_prompt_kwargs['pre_prompt']}{question}" + if "post_prompt" in model_specific_prompt_kwargs and model_specific_prompt_kwargs["post_prompt"] != "": + question = f"{question}{model_specific_prompt_kwargs['post_prompt']}" + return question + +def wild_vision_doc_to_target(doc): + return doc[BASELINE_MODEL_NAME] + + +def wild_vision_process_results(doc, results): + pred = results[0] + user_prompt = prompt_template.format(question_1=doc["instruction"], answer_1=doc[BASELINE_MODEL_NAME], answer_2=pred) + base64_image = image_to_base64(doc["image"]) + resps, gpt_name = get_chat_response(base64_image, user_prompt) + score, _ = get_score(resps, pattern="\[\[([AB<>=]+)\]\]") + try: + score = int(score) + except: + score = 0 + + return {"gpt_eval_score" : {"question" : doc["instruction"], "score" : score, "gpt_resps" : resps, "ans_1" : doc[BASELINE_MODEL_NAME], "ans_2" : pred}} + + +def wild_vision_aggregation(results): + score = 0 + for res in results: + score += res["score"] + + return score / len(results) + + diff --git a/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml b/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml new file mode 100644 index 00000000..67ce4c78 --- /dev/null +++ b/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml @@ -0,0 +1,9 @@ +task: wildvision_0617 +dataset_name: release_bench_0617_with_modeljudgement +test_split: test500 +output_type: generate_until +include: _default_template_yaml +model_specific_prompt_kwargs: + default: + pre_prompt: "" + post_prompt: "" \ No newline at end of file From e2990d0a69e876721256fdf946c68ba7ae0cbdc1 Mon Sep 17 00:00:00 2001 From: kcz358 Date: Mon, 1 Jul 2024 06:06:57 +0000 Subject: [PATCH 2/5] Hardcode to keep image for wild vision --- lmms_eval/evaluator.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lmms_eval/evaluator.py b/lmms_eval/evaluator.py index 0104b01a..6788467e 100755 --- a/lmms_eval/evaluator.py +++ b/lmms_eval/evaluator.py @@ -325,7 +325,12 @@ def evaluate( # hack: remove image columns to speed avoid loading images and speed up postprocessing # reason: doc_iterator will actually load image if it's in the doc. docs = task.test_docs() if task.has_test_docs() else task.validation_docs() - if "d170" not in task_name and "dc100" not in task_name and "dc200" not in task_name and "llava_wilder" not in task_name and "livebench" not in task_name: + if "d170" not in task_name \ + and "dc100" not in task_name \ + and "dc200" not in task_name \ + and "llava_wilder" not in task_name \ + and "livebench" not in task_name \ + and "wildvision" not in task_name: remove_cols = [] features = docs.features # If it is an Image instance or a Sequence of Image instance. Remove it From 8d963e132ac03fc0d835d480cfcfcabe72af143c Mon Sep 17 00:00:00 2001 From: kcz358 Date: Mon, 1 Jul 2024 08:24:51 +0000 Subject: [PATCH 3/5] Fixing scoring logic --- lmms_eval/tasks/wild_vision_bench/utils.py | 47 +++++++++++++++------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/lmms_eval/tasks/wild_vision_bench/utils.py b/lmms_eval/tasks/wild_vision_bench/utils.py index 747a4bb6..6841d793 100644 --- a/lmms_eval/tasks/wild_vision_bench/utils.py +++ b/lmms_eval/tasks/wild_vision_bench/utils.py @@ -1,5 +1,5 @@ import json - +import re import os import requests import numpy as np @@ -78,14 +78,15 @@ def get_chat_response(base64_image, prompt, max_retries=5, wait_time=10): payload = { "model": GPT_EVAL_MODEL_NAME, "messages": [ - {"role": "system", "content": system_prompt}, + {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, { "role": "user", "content": [ {"type": "text", "text": prompt}, - { - "type": "image_url", - "image_url": f"data:image/jpeg;base64,{base64_image}", + {"type": "image_url", + "image_url" : { + "url" : f"data:image/jpeg;base64, {base64_image}" + } }, ], } @@ -101,16 +102,16 @@ def get_chat_response(base64_image, prompt, max_retries=5, wait_time=10): response_data = response.json() return response_data["choices"][0]["message"]["content"], GPT_EVAL_MODEL_NAME except requests.exceptions.RequestException as e: - eval_logger.warning(f"Request failed on attempt {attempt+1}: {e}") - time.sleep(wait_time) + print(f"Request failed on attempt {attempt+1}: {e}") if attempt == max_retries - 1: - eval_logger.error(f"Failed to get response after {max_retries} attempts") + print(f"Failed to get response after {max_retries} attempts") return "", GPT_EVAL_MODEL_NAME except Exception as e: - eval_logger.error(f"Error on attempt {attempt+1}: {e}") + print(f"Error on attempt {attempt+1}: {e}") return "", GPT_EVAL_MODEL_NAME + def image_to_base64(pil_image): buffered = BytesIO() pil_image.save(buffered, format="PNG") @@ -149,13 +150,29 @@ def wild_vision_process_results(doc, results): user_prompt = prompt_template.format(question_1=doc["instruction"], answer_1=doc[BASELINE_MODEL_NAME], answer_2=pred) base64_image = image_to_base64(doc["image"]) resps, gpt_name = get_chat_response(base64_image, user_prompt) - score, _ = get_score(resps, pattern="\[\[([AB<>=]+)\]\]") - try: - score = int(score) - except: - score = 0 + score, _ = get_score(resps, pattern=re.compile("\[\[([AB<>=]+)\]\]")) + + if "A>B" in score: + final_score = -1 + judgement = "Worse" #Baseline better + elif "A>>B" in score: + final_score = -2 + judgement = "Worse++" + elif "A=B" in score: + final_score = 0 + judgement = "Tie" + elif "B>A" in score: + final_score = 1 + judgement = "Better" + elif "B>>A" in score: + final_score = 2 + judgement = "Better++" + else: + final_score = 0 + judgement = "Unclear" + - return {"gpt_eval_score" : {"question" : doc["instruction"], "score" : score, "gpt_resps" : resps, "ans_1" : doc[BASELINE_MODEL_NAME], "ans_2" : pred}} + return {"gpt_eval_score" : {"question" : doc["instruction"], "score" : final_score, "gpt_resps" : resps, "ans_1" : doc[BASELINE_MODEL_NAME], "ans_2" : pred, "filtered_resps" : score, "judgement" : judgement}} def wild_vision_aggregation(results): From 725fac2781446958b905e1e6c6eb3c0a8e582e49 Mon Sep 17 00:00:00 2001 From: kcz358 Date: Mon, 1 Jul 2024 08:25:42 +0000 Subject: [PATCH 4/5] Fixing dataset name --- lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml b/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml index 67ce4c78..93576768 100644 --- a/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml +++ b/lmms_eval/tasks/wild_vision_bench/wild_vision_bench0617.yaml @@ -1,9 +1,9 @@ task: wildvision_0617 -dataset_name: release_bench_0617_with_modeljudgement +dataset_name: release_bench_0617_with_modelresponse test_split: test500 output_type: generate_until include: _default_template_yaml model_specific_prompt_kwargs: default: pre_prompt: "" - post_prompt: "" \ No newline at end of file + post_prompt: "" From 79514eeebcfd6f655be2a10c776037d12a7b7214 Mon Sep 17 00:00:00 2001 From: kcz358 Date: Mon, 1 Jul 2024 15:10:02 +0000 Subject: [PATCH 5/5] Fixing handling None filtered score --- lmms_eval/tasks/wild_vision_bench/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lmms_eval/tasks/wild_vision_bench/utils.py b/lmms_eval/tasks/wild_vision_bench/utils.py index 6841d793..bb426557 100644 --- a/lmms_eval/tasks/wild_vision_bench/utils.py +++ b/lmms_eval/tasks/wild_vision_bench/utils.py @@ -152,6 +152,9 @@ def wild_vision_process_results(doc, results): resps, gpt_name = get_chat_response(base64_image, user_prompt) score, _ = get_score(resps, pattern=re.compile("\[\[([AB<>=]+)\]\]")) + if score is None: + score = resps + if "A>B" in score: final_score = -1 judgement = "Worse" #Baseline better