From 6d08fe8713adefd29e1d248053d4927703ba81d7 Mon Sep 17 00:00:00 2001 From: Lewis Tunstall Date: Wed, 10 Apr 2024 13:40:28 +0000 Subject: [PATCH] Refactor --- lmms_eval/constants.py | 4 ---- lmms_eval/models/llava_hf.py | 6 +++++- 2 files changed, 5 insertions(+), 5 deletions(-) delete mode 100644 lmms_eval/constants.py diff --git a/lmms_eval/constants.py b/lmms_eval/constants.py deleted file mode 100644 index b7b0283e..00000000 --- a/lmms_eval/constants.py +++ /dev/null @@ -1,4 +0,0 @@ -DEFAULT_IMAGE_TOKEN = "" - -# Default chat templates -VICUNA_CHAT_TEMPLATE = "{% for message in messages %}{% if loop.index0 == 0 %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {{ message['content'] }} {% elif message['role'] == 'user' %}USER: {{ message['content'] }} {% else %} ASSISTANT: {{ message['content'] }}{{ eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}" diff --git a/lmms_eval/models/llava_hf.py b/lmms_eval/models/llava_hf.py index 4ec87556..e5a88da6 100644 --- a/lmms_eval/models/llava_hf.py +++ b/lmms_eval/models/llava_hf.py @@ -5,7 +5,6 @@ from lmms_eval.api.instance import Instance from lmms_eval.api.model import lmms from lmms_eval.api.registry import register_model -from lmms_eval.constants import DEFAULT_IMAGE_TOKEN, VICUNA_CHAT_TEMPLATE from accelerate import Accelerator, DistributedType from accelerate.state import AcceleratorState from typing import List, Optional, Union, Tuple @@ -17,6 +16,11 @@ eval_logger = logging.getLogger("lmms-eval") +DEFAULT_IMAGE_TOKEN = "" + +# Default chat for llava-hf/llava-1.5 models: https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0 +VICUNA_CHAT_TEMPLATE = "{% for message in messages %}{% if loop.index0 == 0 %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {{ message['content'] }} {% elif message['role'] == 'user' %}USER: {{ message['content'] }} {% else %} ASSISTANT: {{ message['content'] }}{{ eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}" + @register_model("llava_hf") class LlavaHf(lmms):