Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 51135fd

Browse files
author
anirudh
committed
move torchtune imports inside VLMEvalWrapper
1 parent 842be23 commit 51135fd

File tree

1 file changed

+14
-12
lines changed

1 file changed

+14
-12
lines changed

torchchat/usages/eval.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,18 +36,6 @@
3636
from lm_eval.models.hf_vlms import HFMultimodalLM
3737
from lm_eval.models.huggingface import HFLM as eval_wrapper
3838
from lm_eval.tasks import get_task_dict
39-
from torchtune import utils
40-
from torchtune.data import (
41-
format_content_with_images,
42-
left_pad_sequence,
43-
Message,
44-
padded_collate_tiled_images_and_mask,
45-
)
46-
from torchtune.generation import generate, sample
47-
48-
from torchtune.modules.common_utils import local_kv_cache
49-
from torchtune.modules.model_fusion import DeepFusionModel
50-
from torchtune.modules.transforms import Transform
5139

5240

5341
def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
@@ -209,6 +197,20 @@ class VLMEvalWrapper(HFMultimodalLM):
209197
the max number of images in MMMU.
210198
"""
211199

200+
# Having the imports here allow running other evals without installing torchtune
201+
from torchtune import utils
202+
from torchtune.data import (
203+
format_content_with_images,
204+
left_pad_sequence,
205+
Message,
206+
padded_collate_tiled_images_and_mask,
207+
)
208+
from torchtune.generation import generate, sample
209+
210+
from torchtune.modules.common_utils import local_kv_cache
211+
from torchtune.modules.model_fusion import DeepFusionModel
212+
from torchtune.modules.transforms import Transform
213+
212214
def __init__(
213215
self,
214216
model: DeepFusionModel,

0 commit comments

Comments
 (0)