Skip to content

Commit cded0cd

Browse files
committed
Refine optional eval dependency
1 parent 322a75f commit cded0cd

File tree

2 files changed

+7
-5
lines changed

2 files changed

+7
-5
lines changed

src/endpoints/evaluation/lm_evaluation_harness.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,13 @@
1111

1212
try:
1313
from lm_eval.__main__ import setup_parser as lm_eval_setup_parser
14-
available = True
14+
from fastapi_utils.tasks import repeat_every
1515
except ImportError:
16-
available = False
16+
raise ImportError("The TrustyAI service was not built with LM-Evaluation-Harness support, use `pip install .[eval]`")
1717

1818
from pydantic import BaseModel, create_model
1919

2020
from fastapi import HTTPException
21-
from fastapi_utils.tasks import repeat_every
22-
2321
from fastapi import APIRouter
2422
import subprocess
2523
import logging

src/main.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,11 @@
2525
from src.endpoints.metrics.metrics_info import router as metrics_info_router
2626
from src.endpoints.data.data_download import router as data_download_router
2727

28-
from src.endpoints.evaluation.lm_evaluation_harness import router as lm_evaluation_harness_router, available as lm_evaluation_harness_available
28+
try:
29+
from src.endpoints.evaluation.lm_evaluation_harness import router as lm_evaluation_harness_router
30+
lm_evaluation_harness_available = True
31+
except ImportError:
32+
lm_evaluation_harness_available = False
2933

3034
logging.basicConfig(
3135
level=logging.DEBUG,

0 commit comments

Comments
 (0)