Skip to content

Commit 1b8b217

Browse files
authored
fix lm_eval import in eval_hf_models.py (#2674)
Update [ghstack-poisoned]
1 parent 2e361d7 commit 1b8b217

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

benchmarks/_models/eval_hf_models.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313

1414
from benchmarks.microbenchmarks.utils import string_to_config
1515
from torchao.quantization import * # noqa: F401, F403
16-
from torchao.quantization.utils import _lm_eval_available
1716

1817

1918
def quantize_model_and_save(model_id, quant_config, output_dir="results"):
@@ -113,7 +112,9 @@ def run(
113112

114113

115114
if __name__ == "__main__":
116-
if not _lm_eval_available:
115+
try:
116+
import lm_eval # noqa: F401
117+
except:
117118
print(
118119
"lm_eval is required to run this script. Please install it using pip install lm-eval."
119120
)

0 commit comments

Comments
 (0)