Skip to content

Commit cb387a7

Browse files
committed
Fix collator
1 parent ae17f63 commit cb387a7

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

lmms_eval/models/chat/llava_hf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,12 @@ def generate_until(self, requests: List[Instance]) -> List[str]:
4444

4545
# A dummy collate here to sort by doc id
4646
def _collate(x):
47-
return x[2], x[2]
47+
return x[0], x[0]
4848

4949
# we group requests by their generation_kwargs,
5050
# so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
5151
# in the same batch.
52-
re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
52+
re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=False)
5353
chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
5454
num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
5555
pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")

0 commit comments

Comments
 (0)