Skip to content

Commit

Permalink
Merge pull request #251 from EvolvingLMMs-Lab/fix/pbar
Browse files Browse the repository at this point in the history
[Fix] Bring back process result pbar
  • Loading branch information
Luodian authored Sep 14, 2024
2 parents b993ef9 + 8d8d692 commit 9877ddf
Showing 1 changed file with 6 additions and 0 deletions.
6 changes: 6 additions & 0 deletions lmms_eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,9 @@ def evaluate(
# iterate over different filters used
for filter_key in task.instances[0].filtered_resps.keys():
doc_iterator = task.doc_iterator(rank=RANK, limit=limit, world_size=WORLD_SIZE)
doc_iterator_for_counting = itertools.islice(range(len(task.test_docs())), RANK, limit, WORLD_SIZE) if task.has_test_docs() else itertools.islice(range(len(task.validation_docs())), RANK, limit, WORLD_SIZE)
total_docs = sum(1 for _ in doc_iterator_for_counting)
pbar = tqdm(total=total_docs, desc=f"Postprocessing", disable=(RANK != 0))
for doc_id, doc in doc_iterator:
requests = instances_by_doc_id[doc_id]
metrics = task.process_results(doc, [req.filtered_resps[filter_key] for req in requests])
Expand Down Expand Up @@ -514,6 +517,9 @@ def evaluate(
task_output.logged_samples.append(example)
for metric, value in metrics.items():
task_output.sample_metrics[(metric, filter_key)].append(value)
pbar.update(1)

pbar.close()

if WORLD_SIZE > 1:
# if multigpu, then gather data across all ranks to rank 0
Expand Down

0 comments on commit 9877ddf

Please sign in to comment.