Skip to content

Commit

Permalink
Merge pull request #10 from TianyiQ/main
Browse files Browse the repository at this point in the history
fix(evaluations): resolve minor naming and type issues
  • Loading branch information
TianyiQ authored Oct 22, 2024
2 parents a45f2cf + eb09b25 commit 4242dd8
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 17 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
logs/
**/dataset/dataset_text_sequence/C*/
**/dataset/dataset_model_sequence/C*/
**/dataset/dataset_model_sequence/*C0*/
**/undated.json
**/nltk_data/
output/
Expand Down
4 changes: 3 additions & 1 deletion src/abstractions/backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,9 @@ def start_inference_backend(
:return: A tuple containing the backend process and the function to process a batch of samples (type signature: List[dict] -> List[dict], with optional metadata arguments)
:rtype: Tuple[subprocess.Popen, Callable]
"""

if eval(os.environ.get("LOUD_BACKEND", "0")):
silent = False

if num_gpus is None:
num_gpus = torch.cuda.device_count()

Expand Down
28 changes: 14 additions & 14 deletions src/evaluation/test_eval_01.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,31 @@
from ..abstractions.model import Model
from utils import generate_alpaca
from ..abstractions import Model
from .utils import generate_alpaca
import os, json
from multiprocessing import freeze_support
import quantify as qt
from . import quantify as qt

"""
generate_alpaca('mc', os.path.join('src', 'evaluation', 'raw_dataset', 'moralchoice'))
generate_alpaca('views', os.path.join('src', 'evaluation', 'raw_dataset', 'views'))
generate_alpaca('foundation', os.path.join('src', 'evaluation', 'raw_dataset', 'foundation'))
"""
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICE"] = "0,1,2,3,4,6"

freeze_support()
set_model = [
"8b-C013-instruct",
"8b-C014-instruct",
"8b-C015-instruct",
"8b-C016-instruct",
"8b-C017-instruct",
"8b-C018-instruct",
"8b-C019-instruct",
"8b-C020-instruct",
"8b-C021-instruct",
"8B-C013-instruct",
"8B-C014-instruct",
"8B-C015-instruct",
"8B-C016-instruct",
"8B-C017-instruct",
"8B-C018-instruct",
"8B-C019-instruct",
"8B-C020-instruct",
"8B-C021-instruct",
]
vec = []
for m in set_model:
boi = Model(m, num_gpus=4)
boi = Model(m)
v = boi.evaluate(method="fast")
# v = qt.calculate_model('output/evaluation_results/' + m + '_single/', m)
vec.append(v)
Expand Down
4 changes: 2 additions & 2 deletions src/evaluation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def generate_alpaca(source: str, dir: str, rearrange = True):

for key, boi in context_and_action.items():
for mapping in mappings:
rearranged_actions = [boi["action" + str(mapping[x])] for x in mapping]
rearranged_actions = [boi["action" + str(mapping[x])] for x in range(len(mapping))]
boi_ab = {
"scenario_id": boi["scenario_id"],
"question_type": "ab",
Expand Down Expand Up @@ -439,7 +439,7 @@ def generate_alpaca(source: str, dir: str, rearrange = True):

for key, boi in context_and_action.items():
for mapping in mappings:
rearranged_actions = [boi["action" + str(mapping[x])] for x in mapping]
rearranged_actions = [boi["action" + str(mapping[x])] for x in range(len(mapping))]
boi_ab_f = {
"scenario_id": boi["scenario_id"],
"question_type": "4c_fav",
Expand Down

0 comments on commit 4242dd8

Please sign in to comment.