Skip to content

Commit 25b4ed1

Browse files
authored
evals: error when opentelemetry-sdk is not installed (#1403)
1 parent 84c6c5d commit 25b4ed1

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

pydantic_evals/pydantic_evals/dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from .evaluators.common import DEFAULT_EVALUATORS
4040
from .evaluators.context import EvaluatorContext
4141
from .otel import SpanTree
42-
from .otel._context_in_memory_span_exporter import context_subtree
42+
from .otel._context_subtree import context_subtree
4343
from .reporting import EvaluationReport, ReportCase
4444

4545
if sys.version_info < (3, 11): # pragma: no cover

tests/evals/test_dataset.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import sys
66
from dataclasses import dataclass
77
from pathlib import Path
8-
from typing import TYPE_CHECKING, Any
8+
from typing import Any
99

1010
import pytest
1111
from dirty_equals import HasRepr
@@ -16,17 +16,13 @@
1616
from .utils import render_table
1717

1818
with try_import() as imports_successful:
19+
import logfire
20+
from logfire.testing import CaptureLogfire
21+
1922
from pydantic_evals import Case, Dataset
2023
from pydantic_evals.dataset import increment_eval_metric, set_eval_attribute
2124
from pydantic_evals.evaluators import EvaluationResult, Evaluator, EvaluatorOutput, LLMJudge, Python
2225
from pydantic_evals.evaluators.context import EvaluatorContext
23-
from pydantic_evals.reporting import ReportCase
24-
25-
pytestmark = [pytest.mark.skipif(not imports_successful(), reason='pydantic-evals not installed'), pytest.mark.anyio]
26-
27-
if TYPE_CHECKING or imports_successful():
28-
import logfire
29-
from logfire.testing import CaptureLogfire
3026

3127
@dataclass
3228
class MockEvaluator(Evaluator[object, object, object]):
@@ -37,6 +33,10 @@ class MockEvaluator(Evaluator[object, object, object]):
3733
def evaluate(self, ctx: EvaluatorContext[object, object, object]) -> EvaluatorOutput:
3834
return self.output
3935

36+
from pydantic_evals.reporting import ReportCase
37+
38+
pytestmark = [pytest.mark.skipif(not imports_successful(), reason='pydantic-evals not installed'), pytest.mark.anyio]
39+
4040

4141
if sys.version_info < (3, 11):
4242
from exceptiongroup import ExceptionGroup

0 commit comments

Comments
 (0)