5
5
import sys
6
6
from dataclasses import dataclass
7
7
from pathlib import Path
8
- from typing import TYPE_CHECKING , Any
8
+ from typing import Any
9
9
10
10
import pytest
11
11
from dirty_equals import HasRepr
16
16
from .utils import render_table
17
17
18
18
with try_import () as imports_successful :
19
+ import logfire
20
+ from logfire .testing import CaptureLogfire
21
+
19
22
from pydantic_evals import Case , Dataset
20
23
from pydantic_evals .dataset import increment_eval_metric , set_eval_attribute
21
24
from pydantic_evals .evaluators import EvaluationResult , Evaluator , EvaluatorOutput , LLMJudge , Python
22
25
from pydantic_evals .evaluators .context import EvaluatorContext
23
- from pydantic_evals .reporting import ReportCase
24
-
25
- pytestmark = [pytest .mark .skipif (not imports_successful (), reason = 'pydantic-evals not installed' ), pytest .mark .anyio ]
26
-
27
- if TYPE_CHECKING or imports_successful ():
28
- import logfire
29
- from logfire .testing import CaptureLogfire
30
26
31
27
@dataclass
32
28
class MockEvaluator (Evaluator [object , object , object ]):
@@ -37,6 +33,10 @@ class MockEvaluator(Evaluator[object, object, object]):
37
33
def evaluate (self , ctx : EvaluatorContext [object , object , object ]) -> EvaluatorOutput :
38
34
return self .output
39
35
36
+ from pydantic_evals .reporting import ReportCase
37
+
38
+ pytestmark = [pytest .mark .skipif (not imports_successful (), reason = 'pydantic-evals not installed' ), pytest .mark .anyio ]
39
+
40
40
41
41
if sys .version_info < (3 , 11 ):
42
42
from exceptiongroup import ExceptionGroup
0 commit comments