diff --git a/deepeval/__init__.py b/deepeval/__init__.py index e56f1ad11..834c989ed 100644 --- a/deepeval/__init__.py +++ b/deepeval/__init__.py @@ -17,6 +17,7 @@ def _expose_public_api() -> None: # Do not do this at module level or ruff will complain with E402 global __version__, evaluate, assert_test, compare global on_test_run_end, log_hyperparameters, login, telemetry + global AsyncConfig, DisplayConfig, CacheConfig, ErrorConfig from ._version import __version__ as _version from deepeval.evaluate import ( @@ -24,6 +25,12 @@ def _expose_public_api() -> None: assert_test as _assert_test, ) from deepeval.evaluate.compare import compare as _compare + from deepeval.evaluate.configs import ( + AsyncConfig as _AsyncConfig, + DisplayConfig as _DisplayConfig, + CacheConfig as _CacheConfig, + ErrorConfig as _ErrorConfig, + ) from deepeval.test_run import ( on_test_run_end as _on_end, log_hyperparameters as _log_hparams, @@ -35,6 +42,10 @@ def _expose_public_api() -> None: evaluate = _evaluate assert_test = _assert_test compare = _compare + AsyncConfig = _AsyncConfig + DisplayConfig = _DisplayConfig + CacheConfig = _CacheConfig + ErrorConfig = _ErrorConfig on_test_run_end = _on_end log_hyperparameters = _log_hparams login = _login @@ -60,6 +71,10 @@ def _expose_public_api() -> None: "assert_test", "on_test_run_end", "compare", + "AsyncConfig", + "DisplayConfig", + "CacheConfig", + "ErrorConfig", ] diff --git a/docs/docs/evaluation-flags-and-configs.mdx b/docs/docs/evaluation-flags-and-configs.mdx index d6f1a3be3..fc046e147 100644 --- a/docs/docs/evaluation-flags-and-configs.mdx +++ b/docs/docs/evaluation-flags-and-configs.mdx @@ -73,8 +73,7 @@ These flags control retry and backoff behavior for API calls. The `AsyncConfig` controls how concurrently `metrics`, `observed_callback`, and `test_cases` will be evaluated during `evaluate()`. ```python -from deepeval.evaluate import AsyncConfig -from deepeval import evaluate +from deepeval import AsyncConfig, evaluate evaluate(async_config=AsyncConfig(), ...) ``` @@ -92,8 +91,7 @@ The `throttle_value` and `max_concurrent` parameter is only used when `run_async The `DisplayConfig` controls how results and intermediate execution steps are displayed during `evaluate()`. ```python -from deepeval.evaluate import DisplayConfig -from deepeval import evaluate +from deepeval import DisplayConfig, evaluate evaluate(display_config=DisplayConfig(), ...) ``` @@ -111,8 +109,7 @@ There are **FOUR** optional parameters when creating an `DisplayConfig`: The `ErrorConfig` controls how error is handled in `evaluate()`. ```python -from deepeval.evaluate import ErrorConfig -from deepeval import evaluate +from deepeval import ErrorConfig, evaluate evaluate(error_config=ErrorConfig(), ...) ``` @@ -129,8 +126,7 @@ If both `skip_on_missing_params` and `ignore_errors` are set to `True`, `skip_on The `CacheConfig` controls the caching behavior of `evaluate()`. ```python -from deepeval.evaluate import CacheConfig -from deepeval import evaluate +from deepeval import CacheConfig, evaluate evaluate(cache_config=CacheConfig(), ...) ``` diff --git a/tests/test_core/test_imports.py b/tests/test_core/test_imports.py index 7fa12ddf0..68885605e 100644 --- a/tests/test_core/test_imports.py +++ b/tests/test_core/test_imports.py @@ -256,6 +256,25 @@ def test_evaluate_imports(): assert CacheConfig is not None assert ErrorConfig is not None + # Test that config classes can also be imported directly from deepeval (Issue #2216) + from deepeval import ( + AsyncConfig as AsyncConfig2, + DisplayConfig as DisplayConfig2, + CacheConfig as CacheConfig2, + ErrorConfig as ErrorConfig2, + ) + + assert AsyncConfig2 is not None + assert DisplayConfig2 is not None + assert CacheConfig2 is not None + assert ErrorConfig2 is not None + + # Verify they are the same classes + assert AsyncConfig is AsyncConfig2 + assert DisplayConfig is DisplayConfig2 + assert CacheConfig is CacheConfig2 + assert ErrorConfig is ErrorConfig2 + def test_dataset_imports(): """Test that dataset classes can be imported."""