The Open-source LLMOps Platform
Build reliable LLM applications faster with integrated prompt management, evaluation, and observability.
@@ -84,15 +83,15 @@ Agenta is a platform for building production-grade LLM applications. It helps **
Collaborate with Subject Matter Experts (SMEs) on prompt engineering and make sure nothing breaks in production.
- **Interactive Playground**: Compare prompts side by side against your test cases
-- **Multi-Model Support**: Experiment with 50+ LLM models or [bring-your-own models](https://docs.agenta.ai/prompt-engineering/playground/custom-providers?utm_source=github&utm_medium=referral&utm_campaign=readme)
+- **Multi-Model Support**: Experiment with 50+ LLM models or [bring-your-own models](https://docs.agenta.ai/prompt-engineering/playground/adding-custom-providers?utm_source=github&utm_medium=referral&utm_campaign=readme)
- **Version Control**: Version prompts and configurations with branching and environments
- **Complex Configurations**: Enable SMEs to collaborate on [complex configuration schemas](https://docs.agenta.ai/custom-workflows/overview?utm_source=github&utm_medium=referral&utm_campaign=readme) beyond simple prompts
-[Explore prompt management →](https://docs.agenta.ai/prompt-engineering/concepts?utm_source=github&utm_medium=referral&utm_campaign=readme)
+[Explore prompt management →](https://docs.agenta.ai/prompt-engineering/overview?utm_source=github&utm_medium=referral&utm_campaign=readme)
### 📊 Evaluation & Testing
Evaluate your LLM applications systematically with both human and automated feedback.
-- **Flexible Testsets**: Create testcases from production data, playground experiments, or upload CSVs
+- **Flexible Test Sets**: Create test cases from production data, playground experiments, or upload CSVs
- **Pre-built and Custom Evaluators**: Use LLM-as-judge, one of our 20+ pre-built evaluators, or you custom evaluators
- **UI and API Access**: Run evaluations via UI (for SMEs) or programmatically (for engineers)
- **Human Feedback Integration**: Collect and incorporate expert annotations
@@ -182,7 +181,7 @@ We welcome contributions of all kinds — from filing issues and sharing ideas t
## Contributors ✨
-[](#contributors-)
+[](#contributors-)
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
diff --git a/sdk/agenta/__init__.py b/sdk/agenta/__init__.py
index a85ac5093f..427e846399 100644
--- a/sdk/agenta/__init__.py
+++ b/sdk/agenta/__init__.py
@@ -27,12 +27,8 @@
from .sdk.tracing import Tracing, get_tracer
from .sdk.tracing.conventions import Reference
from .sdk.decorators.tracing import instrument
-from .sdk.decorators.running import (
- workflow,
- application,
- evaluator,
-)
-from .sdk.decorators.serving import route, app
+from .sdk.decorators.running import workflow, workflows
+from .sdk.decorators.serving import entrypoint, app, route
from .sdk.context.running import workflow_mode_enabled
from .sdk.litellm import litellm as callbacks
from .sdk.managers.apps import AppManager
@@ -44,10 +40,6 @@
from .sdk import assets as assets
from .sdk import tracer
-# evaluations
-from .sdk import testsets as testsets
-
-
config = PreInitObject("agenta.config", Config)
DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton()
@@ -73,8 +65,7 @@ def init(
global api, async_api, tracing, tracer # pylint: disable=global-statement
_init(
- host=host,
- api_url=api_url,
+ host=host or api_url,
api_key=api_key,
config_fname=config_fname,
redact=redact,
diff --git a/sdk/agenta/client/backend/core/http_client.py b/sdk/agenta/client/backend/core/http_client.py
index 9726651786..8aee8acb89 100644
--- a/sdk/agenta/client/backend/core/http_client.py
+++ b/sdk/agenta/client/backend/core/http_client.py
@@ -148,9 +148,9 @@ def get_request_body(
json_body = maybe_filter_request_body(json, request_options, omit)
# If you have an empty JSON body, you should just send None
- return (json_body if json_body != {} else None), (
- data_body if data_body != {} else None
- )
+ return (
+ json_body if json_body != {} else None
+ ), data_body if data_body != {} else None
class HttpClient:
diff --git a/sdk/agenta/client/backend/human_evaluations/client.py b/sdk/agenta/client/backend/human_evaluations/client.py
index 272a81d48e..f1edbf19c6 100644
--- a/sdk/agenta/client/backend/human_evaluations/client.py
+++ b/sdk/agenta/client/backend/human_evaluations/client.py
@@ -237,7 +237,7 @@ def update_human_evaluation(
Updates an evaluation's status.
Raises:
- HTTPException: If the columns in the testset do not match with the inputs in the variant.
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
Returns:
None: A 204 No Content status code, indicating that the update was successful.
@@ -785,7 +785,7 @@ async def update_human_evaluation(
Updates an evaluation's status.
Raises:
- HTTPException: If the columns in the testset do not match with the inputs in the variant.
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
Returns:
None: A 204 No Content status code, indicating that the update was successful.
diff --git a/sdk/agenta/client/backend/human_evaluations/raw_client.py b/sdk/agenta/client/backend/human_evaluations/raw_client.py
index 84c4c53a02..6f3bce69bf 100644
--- a/sdk/agenta/client/backend/human_evaluations/raw_client.py
+++ b/sdk/agenta/client/backend/human_evaluations/raw_client.py
@@ -336,7 +336,7 @@ def update_human_evaluation(
Updates an evaluation's status.
Raises:
- HTTPException: If the columns in the testset do not match with the inputs in the variant.
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
Returns:
None: A 204 No Content status code, indicating that the update was successful.
@@ -1112,7 +1112,7 @@ async def update_human_evaluation(
Updates an evaluation's status.
Raises:
- HTTPException: If the columns in the testset do not match with the inputs in the variant.
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
Returns:
None: A 204 No Content status code, indicating that the update was successful.
diff --git a/sdk/agenta/client/backend/testsets/client.py b/sdk/agenta/client/backend/testsets/client.py
index b9cd2862b8..48aef2271d 100644
--- a/sdk/agenta/client/backend/testsets/client.py
+++ b/sdk/agenta/client/backend/testsets/client.py
@@ -6,8 +6,8 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.request_options import RequestOptions
from ..types.meta_request import MetaRequest
-from ..types.testset_output_response import TestsetOutputResponse
-from ..types.testset_simple_response import TestsetSimpleResponse
+from ..types.test_set_output_response import TestSetOutputResponse
+from ..types.test_set_simple_response import TestSetSimpleResponse
from ..types.testcase_response import TestcaseResponse
from ..types.testset import Testset
from ..types.testset_response import TestsetResponse
@@ -49,7 +49,7 @@ def upload_file(
upload_type: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> TestsetSimpleResponse:
+ ) -> TestSetSimpleResponse:
"""
Uploads a CSV or JSON file and saves its data to Postgres.
@@ -75,7 +75,7 @@ def upload_file(
Returns
-------
- TestsetSimpleResponse
+ TestSetSimpleResponse
Successful Response
Examples
@@ -102,7 +102,7 @@ def import_testset(
endpoint: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> TestsetSimpleResponse:
+ ) -> TestSetSimpleResponse:
"""
Import JSON testset data from an endpoint and save it to Postgres.
@@ -126,7 +126,7 @@ def import_testset(
Returns
-------
- TestsetSimpleResponse
+ TestSetSimpleResponse
Successful Response
Examples
@@ -148,7 +148,7 @@ def import_testset(
def get_testsets(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.List[TestsetOutputResponse]:
+ ) -> typing.List[TestSetOutputResponse]:
"""
Get all testsets.
@@ -165,7 +165,7 @@ def get_testsets(
Returns
-------
- typing.List[TestsetOutputResponse]
+ typing.List[TestSetOutputResponse]
Successful Response
Examples
@@ -313,11 +313,11 @@ def update_testset(
Update a testset with given id, update the testset in Postgres.
Args:
- testset_id (str): id of the testset to be updated.
+ testset_id (str): id of the test set to be updated.
csvdata (NewTestset): New data to replace the old testset.
Returns:
- str: The id of the testset updated.
+ str: The id of the test set updated.
Parameters
----------
@@ -749,7 +749,7 @@ async def upload_file(
upload_type: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> TestsetSimpleResponse:
+ ) -> TestSetSimpleResponse:
"""
Uploads a CSV or JSON file and saves its data to Postgres.
@@ -775,7 +775,7 @@ async def upload_file(
Returns
-------
- TestsetSimpleResponse
+ TestSetSimpleResponse
Successful Response
Examples
@@ -810,7 +810,7 @@ async def import_testset(
endpoint: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> TestsetSimpleResponse:
+ ) -> TestSetSimpleResponse:
"""
Import JSON testset data from an endpoint and save it to Postgres.
@@ -834,7 +834,7 @@ async def import_testset(
Returns
-------
- TestsetSimpleResponse
+ TestSetSimpleResponse
Successful Response
Examples
@@ -864,7 +864,7 @@ async def main() -> None:
async def get_testsets(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.List[TestsetOutputResponse]:
+ ) -> typing.List[TestSetOutputResponse]:
"""
Get all testsets.
@@ -881,7 +881,7 @@ async def get_testsets(
Returns
-------
- typing.List[TestsetOutputResponse]
+ typing.List[TestSetOutputResponse]
Successful Response
Examples
@@ -1061,11 +1061,11 @@ async def update_testset(
Update a testset with given id, update the testset in Postgres.
Args:
- testset_id (str): id of the testset to be updated.
+ testset_id (str): id of the test set to be updated.
csvdata (NewTestset): New data to replace the old testset.
Returns:
- str: The id of the testset updated.
+ str: The id of the test set updated.
Parameters
----------
diff --git a/sdk/agenta/client/backend/testsets/raw_client.py b/sdk/agenta/client/backend/testsets/raw_client.py
index ff8421e847..078272bbde 100644
--- a/sdk/agenta/client/backend/testsets/raw_client.py
+++ b/sdk/agenta/client/backend/testsets/raw_client.py
@@ -14,8 +14,8 @@
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from ..types.meta_request import MetaRequest
-from ..types.testset_output_response import TestsetOutputResponse
-from ..types.testset_simple_response import TestsetSimpleResponse
+from ..types.test_set_output_response import TestSetOutputResponse
+from ..types.test_set_simple_response import TestSetSimpleResponse
from ..types.testcase_response import TestcaseResponse
from ..types.testset import Testset
from ..types.testset_response import TestsetResponse
@@ -45,7 +45,7 @@ def upload_file(
upload_type: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[TestsetSimpleResponse]:
+ ) -> HttpResponse[TestSetSimpleResponse]:
"""
Uploads a CSV or JSON file and saves its data to Postgres.
@@ -71,7 +71,7 @@ def upload_file(
Returns
-------
- HttpResponse[TestsetSimpleResponse]
+ HttpResponse[TestSetSimpleResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -91,9 +91,9 @@ def upload_file(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- TestsetSimpleResponse,
+ TestSetSimpleResponse,
parse_obj_as(
- type_=TestsetSimpleResponse, # type: ignore
+ type_=TestSetSimpleResponse, # type: ignore
object_=_response.json(),
),
)
@@ -129,7 +129,7 @@ def import_testset(
endpoint: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[TestsetSimpleResponse]:
+ ) -> HttpResponse[TestSetSimpleResponse]:
"""
Import JSON testset data from an endpoint and save it to Postgres.
@@ -153,7 +153,7 @@ def import_testset(
Returns
-------
- HttpResponse[TestsetSimpleResponse]
+ HttpResponse[TestSetSimpleResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -175,9 +175,9 @@ def import_testset(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- TestsetSimpleResponse,
+ TestSetSimpleResponse,
parse_obj_as(
- type_=TestsetSimpleResponse, # type: ignore
+ type_=TestSetSimpleResponse, # type: ignore
object_=_response.json(),
),
)
@@ -208,7 +208,7 @@ def import_testset(
def get_testsets(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> HttpResponse[typing.List[TestsetOutputResponse]]:
+ ) -> HttpResponse[typing.List[TestSetOutputResponse]]:
"""
Get all testsets.
@@ -225,7 +225,7 @@ def get_testsets(
Returns
-------
- HttpResponse[typing.List[TestsetOutputResponse]]
+ HttpResponse[typing.List[TestSetOutputResponse]]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -236,9 +236,9 @@ def get_testsets(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.List[TestsetOutputResponse],
+ typing.List[TestSetOutputResponse],
parse_obj_as(
- type_=typing.List[TestsetOutputResponse], # type: ignore
+ type_=typing.List[TestSetOutputResponse], # type: ignore
object_=_response.json(),
),
)
@@ -476,11 +476,11 @@ def update_testset(
Update a testset with given id, update the testset in Postgres.
Args:
- testset_id (str): id of the testset to be updated.
+ testset_id (str): id of the test set to be updated.
csvdata (NewTestset): New data to replace the old testset.
Returns:
- str: The id of the testset updated.
+ str: The id of the test set updated.
Parameters
----------
@@ -1202,7 +1202,7 @@ async def upload_file(
upload_type: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[TestsetSimpleResponse]:
+ ) -> AsyncHttpResponse[TestSetSimpleResponse]:
"""
Uploads a CSV or JSON file and saves its data to Postgres.
@@ -1228,7 +1228,7 @@ async def upload_file(
Returns
-------
- AsyncHttpResponse[TestsetSimpleResponse]
+ AsyncHttpResponse[TestSetSimpleResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -1248,9 +1248,9 @@ async def upload_file(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- TestsetSimpleResponse,
+ TestSetSimpleResponse,
parse_obj_as(
- type_=TestsetSimpleResponse, # type: ignore
+ type_=TestSetSimpleResponse, # type: ignore
object_=_response.json(),
),
)
@@ -1286,7 +1286,7 @@ async def import_testset(
endpoint: typing.Optional[str] = OMIT,
testset_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[TestsetSimpleResponse]:
+ ) -> AsyncHttpResponse[TestSetSimpleResponse]:
"""
Import JSON testset data from an endpoint and save it to Postgres.
@@ -1310,7 +1310,7 @@ async def import_testset(
Returns
-------
- AsyncHttpResponse[TestsetSimpleResponse]
+ AsyncHttpResponse[TestSetSimpleResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -1332,9 +1332,9 @@ async def import_testset(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- TestsetSimpleResponse,
+ TestSetSimpleResponse,
parse_obj_as(
- type_=TestsetSimpleResponse, # type: ignore
+ type_=TestSetSimpleResponse, # type: ignore
object_=_response.json(),
),
)
@@ -1365,7 +1365,7 @@ async def import_testset(
async def get_testsets(
self, *, request_options: typing.Optional[RequestOptions] = None
- ) -> AsyncHttpResponse[typing.List[TestsetOutputResponse]]:
+ ) -> AsyncHttpResponse[typing.List[TestSetOutputResponse]]:
"""
Get all testsets.
@@ -1382,7 +1382,7 @@ async def get_testsets(
Returns
-------
- AsyncHttpResponse[typing.List[TestsetOutputResponse]]
+ AsyncHttpResponse[typing.List[TestSetOutputResponse]]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -1393,9 +1393,9 @@ async def get_testsets(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- typing.List[TestsetOutputResponse],
+ typing.List[TestSetOutputResponse],
parse_obj_as(
- type_=typing.List[TestsetOutputResponse], # type: ignore
+ type_=typing.List[TestSetOutputResponse], # type: ignore
object_=_response.json(),
),
)
@@ -1633,11 +1633,11 @@ async def update_testset(
Update a testset with given id, update the testset in Postgres.
Args:
- testset_id (str): id of the testset to be updated.
+ testset_id (str): id of the test set to be updated.
csvdata (NewTestset): New data to replace the old testset.
Returns:
- str: The id of the testset updated.
+ str: The id of the test set updated.
Parameters
----------
diff --git a/sdk/agenta/client/backend/types/__init__.py b/sdk/agenta/client/backend/types/__init__.py
index 06ad2a7e86..1dc35288f0 100644
--- a/sdk/agenta/client/backend/types/__init__.py
+++ b/sdk/agenta/client/backend/types/__init__.py
@@ -161,8 +161,8 @@
from .standard_provider_settings_dto import StandardProviderSettingsDto
from .status_code import StatusCode
from .status_dto import StatusDto
-from .testset_output_response import TestsetOutputResponse
-from .testset_simple_response import TestsetSimpleResponse
+from .test_set_output_response import TestSetOutputResponse
+from .test_set_simple_response import TestSetSimpleResponse
from .testcase_response import TestcaseResponse
from .testset import Testset
from .testset_request import TestsetRequest
@@ -351,8 +351,8 @@
"StandardProviderSettingsDto",
"StatusCode",
"StatusDto",
- "TestsetOutputResponse",
- "TestsetSimpleResponse",
+ "TestSetOutputResponse",
+ "TestSetSimpleResponse",
"TestcaseResponse",
"Testset",
"TestsetRequest",
diff --git a/sdk/agenta/client/backend/types/custom_provider_kind.py b/sdk/agenta/client/backend/types/custom_provider_kind.py
index 8c7cc2772e..5696c5c148 100644
--- a/sdk/agenta/client/backend/types/custom_provider_kind.py
+++ b/sdk/agenta/client/backend/types/custom_provider_kind.py
@@ -8,7 +8,7 @@
"azure",
"bedrock",
"sagemaker",
- "vertex_ai",
+ "vertex",
"openai",
"cohere",
"anyscale",
diff --git a/sdk/agenta/client/backend/types/testset_output_response.py b/sdk/agenta/client/backend/types/test_set_output_response.py
similarity index 93%
rename from sdk/agenta/client/backend/types/testset_output_response.py
rename to sdk/agenta/client/backend/types/test_set_output_response.py
index c73add9298..9537fa00a6 100644
--- a/sdk/agenta/client/backend/types/testset_output_response.py
+++ b/sdk/agenta/client/backend/types/test_set_output_response.py
@@ -8,7 +8,7 @@
from ..core.serialization import FieldMetadata
-class TestsetOutputResponse(UniversalBaseModel):
+class TestSetOutputResponse(UniversalBaseModel):
id: typing_extensions.Annotated[str, FieldMetadata(alias="_id")]
name: str
created_at: str
diff --git a/sdk/agenta/client/backend/types/testset_simple_response.py b/sdk/agenta/client/backend/types/test_set_simple_response.py
similarity index 91%
rename from sdk/agenta/client/backend/types/testset_simple_response.py
rename to sdk/agenta/client/backend/types/test_set_simple_response.py
index abd4e66869..433d409899 100644
--- a/sdk/agenta/client/backend/types/testset_simple_response.py
+++ b/sdk/agenta/client/backend/types/test_set_simple_response.py
@@ -6,7 +6,7 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
-class TestsetSimpleResponse(UniversalBaseModel):
+class TestSetSimpleResponse(UniversalBaseModel):
id: str
name: str
created_at: str
diff --git a/sdk/agenta/sdk/__init__.py b/sdk/agenta/sdk/__init__.py
index 6f908168d5..b117dfe298 100644
--- a/sdk/agenta/sdk/__init__.py
+++ b/sdk/agenta/sdk/__init__.py
@@ -2,50 +2,6 @@
from .utils.preinit import PreInitObject # always the first import!
-__all__ = [
- # Decorators
- "workflow",
- "application",
- "evaluator",
- "instrument",
- "route",
- "app",
- # Initialization
- "init",
- "config",
- # Types
- "DictInput",
- "MultipleChoice",
- "FloatParam",
- "IntParam",
- "MultipleChoiceParam",
- "GroupedMultipleChoiceParam",
- "TextParam",
- "MessagesInput",
- "FileInputURL",
- "BinaryParam",
- "Prompt",
- # Tracing
- "Tracing",
- "tracing",
- "tracer",
- "get_tracer",
- "Reference",
- # Managers
- "AppManager",
- "VaultManager",
- "SecretsManager",
- "ConfigManager",
- "VariantManager",
- "DeploymentManager",
- # Utilities
- "calculate_token_usage",
- # API clients
- "api",
- "async_api",
- "types",
-]
-
import agenta.client.backend.types as client_types # pylint: disable=wrong-import-order
from .types import (
@@ -65,13 +21,9 @@
)
from .tracing import Tracing, get_tracer
-from agenta.sdk.decorators.tracing import instrument
-from agenta.sdk.decorators.running import (
- workflow,
- application,
- evaluator,
-)
-from agenta.sdk.decorators.serving import route, app
+from .decorators.tracing import instrument
+from .decorators.running import workflow, workflows
+from .decorators.serving import entrypoint, app, route
from .tracing.conventions import Reference
from .agenta_init import Config, AgentaSingleton, init as _init
from .utils.costs import calculate_token_usage
@@ -81,7 +33,6 @@
from .managers.config import ConfigManager
from .managers.variant import VariantManager
from .managers.deployment import DeploymentManager
-from .managers import testsets as testsets
config = PreInitObject("agenta.config", Config)
diff --git a/sdk/agenta/sdk/agenta_init.py b/sdk/agenta/sdk/agenta_init.py
index 45c85a13ff..b4bbbb740f 100644
--- a/sdk/agenta/sdk/agenta_init.py
+++ b/sdk/agenta/sdk/agenta_init.py
@@ -9,7 +9,7 @@
from agenta.client.client import AgentaApi, AsyncAgentaApi
from agenta.sdk.tracing import Tracing
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
log = get_module_logger(__name__)
@@ -27,7 +27,6 @@ class AgentaSingleton:
def __init__(self):
self.host = None
- self.api_url = None
self.api_key = None
self.scope_type = None
@@ -42,7 +41,6 @@ def init(
self,
*,
host: Optional[str] = None,
- api_url: Optional[str] = None,
api_key: Optional[str] = None,
config_fname: Optional[str] = None,
redact: Optional[Callable[..., Any]] = None,
@@ -70,7 +68,7 @@ def init(
"""
- log.info("Agenta - SDK ver: %s", version("agenta"))
+ log.info("Agenta - SDK version: %s", version("agenta"))
config = {}
if config_fname:
@@ -79,29 +77,16 @@ def init(
_host = (
host
or getenv("AGENTA_HOST")
+ or config.get("backend_host")
or config.get("host")
- or "https://cloud.agenta.ai"
+ or getenv("AGENTA_API_URL", "https://cloud.agenta.ai")
)
- _api_url = (
- api_url
- or getenv("AGENTA_API_URL")
- or config.get("api_url")
- or None # NO FALLBACK
- )
-
- if _api_url:
- _host = _api_url.rsplit("/api", 1)[0]
-
- if _host and not _api_url:
- _api_url = _host + "/api"
-
try:
assert _host and isinstance(
_host, str
), "Host is required. Please provide a valid host or set AGENTA_HOST environment variable."
self.host = parse_url(url=_host)
- self.api_url = self.host + "/api"
except AssertionError as e:
log.error(str(e))
raise
@@ -109,27 +94,19 @@ def init(
log.error(f"Failed to parse host URL '{_host}': {e}")
raise
- self.api_key = (
- api_key
- or getenv("AGENTA_API_KEY")
- or config.get("api_key")
- or None # NO FALLBACK
- )
+ log.info("Agenta - Host: %s", self.host)
- log.info("Agenta - API URL: %s", self.api_url)
+ self.api_key = api_key or getenv("AGENTA_API_KEY") or config.get("api_key")
self.scope_type = (
scope_type
or getenv("AGENTA_SCOPE_TYPE")
or config.get("scope_type")
- or None # NO FALLBACK
+ or None
)
self.scope_id = (
- scope_id
- or getenv("AGENTA_SCOPE_ID")
- or config.get("scope_id")
- or None # NO FALLBACK
+ scope_id or getenv("AGENTA_SCOPE_ID") or config.get("scope_id") or None
)
self.tracing = Tracing(
@@ -143,12 +120,12 @@ def init(
)
self.api = AgentaApi(
- base_url=self.api_url,
+ base_url=self.host + "/api",
api_key=self.api_key if self.api_key else "",
)
self.async_api = AsyncAgentaApi(
- base_url=self.api_url,
+ base_url=self.host + "/api",
api_key=self.api_key if self.api_key else "",
)
@@ -172,7 +149,7 @@ def get_default(self):
return self.default_parameters
def __getattr__(self, key):
- context = RoutingContext.get()
+ context = serving_context.get()
parameters = context.parameters
@@ -195,7 +172,6 @@ def __getattr__(self, key):
def init(
host: Optional[str] = None,
- api_url: Optional[str] = None,
api_key: Optional[str] = None,
config_fname: Optional[str] = None,
redact: Optional[Callable[..., Any]] = None,
@@ -224,7 +200,6 @@ def init(
singleton.init(
host=host,
- api_url=api_url,
api_key=api_key,
config_fname=config_fname,
redact=redact,
diff --git a/sdk/agenta/sdk/assets.py b/sdk/agenta/sdk/assets.py
index e4b0b44be4..6f9a2741d3 100644
--- a/sdk/agenta/sdk/assets.py
+++ b/sdk/agenta/sdk/assets.py
@@ -1,8 +1,6 @@
supported_llm_models = {
- "anthropic": [
+ "Anthropic": [
"anthropic/claude-sonnet-4-5",
- "anthropic/claude-haiku-4-5",
- "anthropic/claude-opus-4-1",
"anthropic/claude-sonnet-4-20250514",
"anthropic/claude-opus-4-20250514",
"anthropic/claude-3-7-sonnet-20250219",
@@ -15,32 +13,29 @@
"anthropic/claude-2.1",
"anthropic/claude-2",
],
- "cohere": [
+ "Cohere": [
"cohere/command-light",
"cohere/command-r-plus",
"cohere/command-nightly",
],
- "deepinfra": [
+ "DeepInfra": [
"deepinfra/meta-llama/Llama-2-70b-chat-hf",
"deepinfra/meta-llama/Llama-2-13b-chat-hf",
"deepinfra/codellama/CodeLlama-34b-Instruct-hf",
"deepinfra/mistralai/Mistral-7B-Instruct-v0.1",
],
- "gemini": [
- "gemini/gemini-2.5-pro",
- "gemini/gemini-2.5-pro-preview-05-06",
- "gemini/gemini-2.5-flash",
- "gemini/gemini-2.5-flash-preview-09-2025",
+ "Gemini": [
"gemini/gemini-2.5-flash-preview-05-20",
"gemini/gemini-2.5-flash-preview-04-17",
- "gemini/gemini-2.5-flash-lite",
- "gemini/gemini-2.5-flash-lite-preview-09-2025",
- "gemini/gemini-2.0-flash",
"gemini/gemini-2.0-flash-001",
- "gemini/gemini-2.0-flash-lite",
+ "gemini/gemini-2.5-pro-preview-05-06",
"gemini/gemini-2.0-flash-lite-preview-02-05",
+ "gemini/gemini-1.5-pro-latest",
+ "gemini/gemini-2.0-flash-lite",
+ "gemini/gemini-1.5-flash",
+ "gemini/gemini-1.5-flash-8b",
],
- "groq": [
+ "Groq": [
"groq/deepseek-r1-distill-llama-70b",
"groq/deepseek-r1-distill-llama-70b-specdec",
"groq/gemma2-9b-it",
@@ -56,13 +51,13 @@
"groq/llama3-8b-8192",
"groq/mixtral-8x7b-32768",
],
- "mistral": [
+ "Mistral": [
"mistral/mistral-tiny",
"mistral/mistral-small",
"mistral/mistral-medium",
"mistral/mistral-large-latest",
],
- "openai": [
+ "OpenAI": [
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
@@ -78,7 +73,7 @@
"gpt-4.1-nano",
"o4-mini",
],
- "openrouter": [
+ "OpenRouter": [
"openrouter/qwen/qwen3-235b-a22b",
"openrouter/qwen/qwen3-32b",
"openrouter/qwen/qwen3-30b-a3b",
@@ -148,13 +143,13 @@
"openrouter/google/gemini-2.0-flash-001",
"openrouter/perplexity/sonar-reasoning",
],
- "perplexity": [
+ "Perplexity AI": [
"perplexity/sonar",
"perplexity/sonar-pro",
"perplexity/sonar-reasoning",
"perplexity/sonar-reasoning-pro",
],
- "together_ai": [
+ "togetherai": [
"together_ai/deepseek-ai/DeepSeek-R1",
"together_ai/deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"together_ai/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
diff --git a/sdk/agenta/sdk/context/serving.py b/sdk/agenta/sdk/context/serving.py
index 6fe648f59e..6d9e409ebf 100644
--- a/sdk/agenta/sdk/context/serving.py
+++ b/sdk/agenta/sdk/context/serving.py
@@ -1,7 +1,7 @@
from typing import Any, Dict, List, Optional
from contextlib import contextmanager
-from contextvars import ContextVar, Token
+from contextvars import ContextVar
from pydantic import BaseModel
@@ -11,29 +11,17 @@ class RoutingContext(BaseModel):
secrets: Optional[List[Any]] = None
mock: Optional[str] = None
- @classmethod
- def get(cls) -> "RoutingContext":
- try:
- return routing_context.get()
- except LookupError:
- return RoutingContext()
- @classmethod
- def set(cls, ctx: "RoutingContext") -> Token:
- return routing_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return routing_context.reset(token)
-
-
-routing_context: ContextVar[RoutingContext] = ContextVar("routing_context")
+serving_context = ContextVar("serving_context", default=RoutingContext())
@contextmanager
-def routing_context_manager(context: RoutingContext):
- token = RoutingContext.set(context)
+def serving_context_manager(
+ *,
+ context: Optional[RoutingContext] = None,
+):
+ token = serving_context.set(context)
try:
yield
finally:
- RoutingContext.reset(token)
+ serving_context.reset(token)
diff --git a/sdk/agenta/sdk/context/tracing.py b/sdk/agenta/sdk/context/tracing.py
index 8431fd2757..314ea0e845 100644
--- a/sdk/agenta/sdk/context/tracing.py
+++ b/sdk/agenta/sdk/context/tracing.py
@@ -1,7 +1,7 @@
from typing import Any, Dict, Optional
from contextlib import contextmanager
-from contextvars import ContextVar, Token
+from contextvars import ContextVar
from pydantic import BaseModel
@@ -15,60 +15,42 @@ class TracingContext(BaseModel):
link: Optional[Dict[str, Any]] = None
type: Optional[str] = None
- @classmethod
- def get(cls) -> "TracingContext":
- try:
- return tracing_context.get()
- except LookupError:
- return TracingContext()
- @classmethod
- def set(cls, ctx: "TracingContext") -> Token:
- return tracing_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return tracing_context.reset(token)
-
-
-tracing_context: ContextVar[TracingContext] = ContextVar("ag.tracing_context")
+tracing_context = ContextVar(
+ "ag.tracing_context",
+ default=TracingContext(),
+)
@contextmanager
-def tracing_context_manager(context: TracingContext):
- token = TracingContext.set(context)
+def tracing_context_manager(
+ *,
+ context: Optional[TracingContext] = None,
+):
+ token = tracing_context.set(context)
try:
yield
finally:
- TracingContext.reset(token)
+ tracing_context.reset(token)
-class OTLPContext(BaseModel):
+class TracingExporterContext(BaseModel):
credentials: Optional[str] = None
- @classmethod
- def get(cls) -> "OTLPContext":
- try:
- return otlp_context.get()
- except LookupError:
- return OTLPContext()
-
- @classmethod
- def set(cls, ctx: "OTLPContext") -> Token:
- return otlp_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return otlp_context.reset(token)
-
-otlp_context: ContextVar[OTLPContext] = ContextVar("ag.otlp_context")
+tracing_exporter_context = ContextVar(
+ "ag.tracing_exporter_context",
+ default=TracingExporterContext(),
+)
@contextmanager
-def otlp_context_manager(context: OTLPContext):
- token = otlp_context.set(context)
+def tracing_exporter_context_manager(
+ *,
+ context: Optional[TracingExporterContext] = None,
+):
+ token = tracing_exporter_context.set(context)
try:
yield
finally:
- otlp_context.reset(token)
+ tracing_exporter_context.reset(token)
diff --git a/sdk/agenta/sdk/contexts/routing.py b/sdk/agenta/sdk/contexts/routing.py
deleted file mode 100644
index 4996fdf1f6..0000000000
--- a/sdk/agenta/sdk/contexts/routing.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from typing import Optional
-from contextvars import ContextVar, Token
-from contextlib import contextmanager
-
-from pydantic import BaseModel
-
-
-class RoutingContext(BaseModel):
- parameters: Optional[dict] = None
- secrets: Optional[list] = None
- mock: Optional[str] = None
-
- @classmethod
- def get(cls) -> "RoutingContext":
- try:
- return routing_context.get()
- except LookupError:
- return RoutingContext()
-
- @classmethod
- def set(cls, ctx: "RoutingContext") -> Token:
- return routing_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return routing_context.reset(token)
-
-
-routing_context: ContextVar[RoutingContext] = ContextVar("routing_context")
-
-
-@contextmanager
-def routing_context_manager(context: RoutingContext):
- token = RoutingContext.set(context)
- try:
- yield
- finally:
- RoutingContext.reset(token)
diff --git a/sdk/agenta/sdk/contexts/running.py b/sdk/agenta/sdk/contexts/running.py
deleted file mode 100644
index 8359131a77..0000000000
--- a/sdk/agenta/sdk/contexts/running.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from typing import Optional, Union, Callable
-from contextvars import Token, ContextVar
-from contextlib import contextmanager
-
-
-from pydantic import BaseModel
-
-from agenta.sdk.models.workflows import (
- WorkflowServiceInterface,
- WorkflowServiceConfiguration,
-)
-
-
-class RunningContext(BaseModel):
- flags: Optional[dict] = None
- tags: Optional[dict] = None
- meta: Optional[dict] = None
-
- aggregate: Optional[Union[bool, Callable]] = None # stream to batch
- annotate: Optional[bool] = None # annotation vs invocation
-
- interface: Optional[WorkflowServiceInterface] = None
- configuration: Optional[WorkflowServiceConfiguration] = None
- parameters: Optional[dict] = None
- schemas: Optional[dict] = None
-
- secrets: Optional[list] = None
- credentials: Optional[str] = None
-
- handler: Optional[Callable] = None
-
- @classmethod
- def get(cls) -> "RunningContext":
- try:
- return running_context.get()
- except LookupError:
- return RunningContext()
-
- @classmethod
- def set(cls, ctx: "RunningContext") -> Token:
- return running_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return running_context.reset(token)
-
-
-running_context: ContextVar[RunningContext] = ContextVar("running_context")
-
-
-@contextmanager
-def running_context_manager(context: RunningContext):
- token = RunningContext.set(context)
- try:
- yield
- finally:
- RunningContext.reset(token)
diff --git a/sdk/agenta/sdk/contexts/tracing.py b/sdk/agenta/sdk/contexts/tracing.py
deleted file mode 100644
index 8425350ceb..0000000000
--- a/sdk/agenta/sdk/contexts/tracing.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from typing import Optional, Union, Callable
-from contextvars import ContextVar, Token
-from contextlib import contextmanager
-
-from pydantic import BaseModel
-
-
-class TracingContext(BaseModel):
- traceparent: Optional[dict] = None
- baggage: Optional[dict] = None
- #
- credentials: Optional[str] = None
- #
- script: Optional[dict] = None
- parameters: Optional[dict] = None
- #
- flags: Optional[dict] = None
- tags: Optional[dict] = None
- meta: Optional[dict] = None
- #
- references: Optional[dict] = None
- links: Optional[dict] = None
- #
- type: Optional[str] = None
- aggregate: Optional[Union[bool, Callable]] = None # stream to batch
- annotate: Optional[bool] = None # annotation vs invocation
- #
- link: Optional[dict] = None
-
- @classmethod
- def get(cls) -> "TracingContext":
- try:
- return tracing_context.get()
- except LookupError:
- return TracingContext()
-
- @classmethod
- def set(cls, ctx: "TracingContext") -> Token:
- return tracing_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return tracing_context.reset(token)
-
-
-tracing_context: ContextVar[TracingContext] = ContextVar("ag.tracing_context")
-
-
-@contextmanager
-def tracing_context_manager(context: TracingContext):
- token = TracingContext.set(context)
- try:
- yield
- finally:
- TracingContext.reset(token)
-
-
-class OTLPContext(BaseModel):
- credentials: Optional[str] = None
-
- @classmethod
- def get(cls) -> "OTLPContext":
- try:
- return otlp_context.get()
- except LookupError:
- return OTLPContext()
-
- @classmethod
- def set(cls, ctx: "OTLPContext") -> Token:
- return otlp_context.set(ctx)
-
- @classmethod
- def reset(cls, token: Token) -> None:
- return otlp_context.reset(token)
-
-
-otlp_context: ContextVar[OTLPContext] = ContextVar("ag.otlp_context")
-
-
-@contextmanager
-def otlp_context_manager(context: OTLPContext):
- token = OTLPContext.set(context)
- try:
- yield
- finally:
- OTLPContext.reset(token)
diff --git a/sdk/agenta/sdk/decorators/__init__.py b/sdk/agenta/sdk/decorators/__init__.py
index 629cec59bb..e69de29bb2 100644
--- a/sdk/agenta/sdk/decorators/__init__.py
+++ b/sdk/agenta/sdk/decorators/__init__.py
@@ -1 +0,0 @@
-from .running import application, evaluator
diff --git a/sdk/agenta/sdk/decorators/routing.py b/sdk/agenta/sdk/decorators/routing.py
deleted file mode 100644
index 6f4f8a2318..0000000000
--- a/sdk/agenta/sdk/decorators/routing.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# /agenta/sdk/decorators/routing.py
-
-from typing import Any, Callable, Optional, AsyncGenerator, Union
-from json import dumps
-from uuid import UUID
-from traceback import format_exception
-
-from fastapi import FastAPI, APIRouter, Request
-from fastapi.responses import JSONResponse, StreamingResponse, Response
-
-from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.models.workflows import (
- WorkflowServiceRequest,
- WorkflowServiceStatus,
- WorkflowServiceBatchResponse,
- WorkflowServiceStreamResponse,
- WorkflowServiceBaseResponse,
-)
-from agenta.sdk.middlewares.routing.cors import CORSMiddleware
-from agenta.sdk.middlewares.routing.auth import AuthMiddleware
-from agenta.sdk.middlewares.routing.otel import OTelMiddleware
-from agenta.sdk.contexts.running import running_context_manager, RunningContext
-from agenta.sdk.contexts.tracing import tracing_context_manager, TracingContext
-from agenta.sdk.decorators.running import auto_workflow, Workflow
-from agenta.sdk.workflows.errors import ErrorStatus
-
-
-def create_app(**kwargs: Any) -> FastAPI:
- app = FastAPI(**kwargs)
-
- app.add_middleware(CORSMiddleware)
- app.add_middleware(AuthMiddleware)
- app.add_middleware(OTelMiddleware)
-
- return app
-
-
-default_app: FastAPI = create_app()
-
-
-def _pick_stream_format(request: Request) -> str:
- if "text/event-stream" in request.headers.get("accept", ""):
- return "sse"
- return "ndjson"
-
-
-def _ndjson_stream(aiter: AsyncGenerator[Any, None]):
- async def gen():
- async for chunk in aiter:
- yield dumps(chunk, ensure_ascii=False) + "\n"
-
- return gen()
-
-
-def _sse_stream(aiter: AsyncGenerator[Any, None]):
- async def gen():
- async for chunk in aiter:
- yield "data: " + dumps(chunk, ensure_ascii=False) + "\n\n"
-
- return gen()
-
-
-def _set_common_headers(
- res: Response,
- response: WorkflowServiceBaseResponse,
-) -> Response:
- res.headers.setdefault("x-ag-version", response.version or "unknown")
-
- if response.trace_id:
- res.headers.setdefault("x-ag-trace-id", response.trace_id)
-
- if response.span_id:
- res.headers.setdefault("x-ag-span-id", response.span_id)
-
- return res
-
-
-def _make_json_response(
- response: WorkflowServiceBatchResponse,
-) -> JSONResponse:
- res = JSONResponse(
- status_code=((response.status.code or 200) if response.status else 200),
- content=response.model_dump(mode="json", exclude_none=True),
- )
-
- return _set_common_headers(res, response) # type: ignore
-
-
-def _make_stream_response(
- req: Request,
- response: WorkflowServiceStreamResponse,
-) -> StreamingResponse:
- aiter = response.iterator()
-
- if _pick_stream_format(req) == "sse":
- res = StreamingResponse(
- _sse_stream(aiter),
- media_type="text/event-stream",
- )
- else:
- res = StreamingResponse(
- _ndjson_stream(aiter),
- media_type="application/x-ndjson",
- )
-
- return _set_common_headers(res, response) # type: ignore
-
-
-async def handle_invoke_success(
- req: Request,
- response: Any,
-) -> Response:
- if isinstance(response, WorkflowServiceBatchResponse):
- return _make_json_response(response)
-
- if isinstance(response, WorkflowServiceStreamResponse):
- return _make_stream_response(req, response)
-
- batch = WorkflowServiceBatchResponse(data=response)
-
- return _make_json_response(batch)
-
-
-async def handle_invoke_failure(exception: Exception) -> Response:
- status = None
-
- if isinstance(exception, ErrorStatus):
- status = WorkflowServiceStatus(
- type=exception.type,
- code=exception.code,
- message=exception.message,
- stacktrace=exception.stacktrace,
- )
-
- else:
- type = "https://docs.agenta.ai/errors#v1:sdk:unknown-workflow-invoke-error"
-
- code = (
- getattr(exception, "status_code")
- if hasattr(exception, "status_code")
- else 500
- )
-
- if code in [401, 403]:
- code = 424
-
- message = str(exception) or "Internal Server Error"
-
- stacktrace = format_exception(
- exception, # type: ignore
- value=exception,
- tb=exception.__traceback__,
- )
-
- status = WorkflowServiceStatus(
- type=type,
- code=code,
- message=message,
- stacktrace=stacktrace,
- )
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- error = WorkflowServiceBatchResponse(
- status=status,
- trace_id=trace_id,
- span_id=span_id,
- )
-
- return _make_json_response(error)
-
-
-async def handle_inspect_success(
- request: Optional[WorkflowServiceRequest],
-):
- if request:
- return JSONResponse(request.model_dump(mode="json", exclude_none=True))
-
- return JSONResponse({"details": {"message": "Workflow not found"}}, status_code=404)
-
-
-async def handle_inspect_failure(exception: Exception) -> Response:
- code = (
- getattr(exception, "status_code") if hasattr(exception, "status_code") else 500
- )
-
- if code in [401, 403]:
- code = 424
-
- message = str(exception) or "Internal Server Error"
-
- return JSONResponse({"details": message}, status_code=code)
-
-
-class route:
- def __init__(
- self,
- path: str = "/",
- app: Optional[FastAPI] = None,
- router: Optional[APIRouter] = None,
- ):
- path = path.rstrip("/")
- path = path if path else "/"
- path = path if path.startswith("/") else "/" + path
- self.path = path
- self.root = app or router or default_app
-
- def __call__(self, foo: Optional[Union[Callable[..., Any], Workflow]] = None):
- if foo is None:
- return self
-
- workflow = auto_workflow(foo)
-
- async def invoke_endpoint(req: Request, request: WorkflowServiceRequest):
- credentials = req.state.auth.get("credentials")
-
- try:
- response = await workflow.invoke(
- request=request,
- credentials=credentials,
- )
-
- return await handle_invoke_success(req, response)
-
- except Exception as exception:
- return await handle_invoke_failure(exception)
-
- async def inspect_endpoint(req: Request):
- credentials = req.state.auth.get("credentials")
-
- try:
- request = await workflow.inspect(
- credentials=credentials,
- )
-
- return await handle_inspect_success(request)
-
- except Exception as exception:
- return await handle_inspect_failure(exception)
-
- invoke_responses: dict = {
- 200: {
- "description": "Response batch JSON or stream NDJSON/SSE",
- "content": {
- "application/json": {
- "schema": WorkflowServiceBatchResponse.model_json_schema()
- },
- "application/x-ndjson": {
- "schema": {"type": "string", "description": "NDJSON stream"}
- },
- "text/event-stream": {
- "schema": {"type": "string", "description": "SSE stream"}
- },
- },
- }
- }
-
- self.root.add_api_route(
- self.path + "/invoke",
- invoke_endpoint,
- methods=["POST"],
- responses=invoke_responses,
- )
-
- self.root.add_api_route(
- self.path + "/inspect",
- inspect_endpoint,
- methods=["GET"],
- response_model=WorkflowServiceRequest,
- )
-
- return foo
diff --git a/sdk/agenta/sdk/decorators/running.py b/sdk/agenta/sdk/decorators/running.py
index 0a44bcd072..351d50e175 100644
--- a/sdk/agenta/sdk/decorators/running.py
+++ b/sdk/agenta/sdk/decorators/running.py
@@ -1,732 +1,134 @@
-# /agenta/sdk/decorators/running.py
-
-from typing import Any, Callable, Optional, Protocol, Union, Dict
-from functools import update_wrapper, wraps
-from typing import Callable, Any
-from inspect import signature
-from uuid import UUID, uuid4
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.models.workflows import (
- WorkflowRevisionData,
- WorkflowRevision,
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
+from typing import Any, Callable, List, Awaitable, Dict, Optional
+import asyncio
+from functools import wraps
+from inspect import iscoroutinefunction
+from copy import deepcopy
+
+from decorator import decorator
+
+from agenta.sdk.context.running import (
+ workflow_mode_enabled_context,
+ workflow_registry_context,
+ WorkflowRegistryContext,
+)
+from agenta.sdk.workflows.types import (
WorkflowServiceRequest,
+ WorkflowServiceResponse,
WorkflowServiceInterface,
- WorkflowServiceConfiguration,
- WorkflowServiceBatchResponse,
- WorkflowServiceStreamResponse,
- Reference,
- Link,
-)
-from agenta.sdk.contexts.running import RunningContext, running_context_manager
-from agenta.sdk.contexts.tracing import TracingContext, tracing_context_manager
-from agenta.sdk.middlewares.running.normalizer import (
- NormalizerMiddleware,
-)
-from agenta.sdk.middlewares.running.resolver import (
- ResolverMiddleware,
- resolve_interface,
- resolve_configuration,
-)
-from agenta.sdk.middlewares.running.vault import (
- VaultMiddleware,
- get_secrets,
-)
-from agenta.sdk.decorators.tracing import auto_instrument
-from agenta.sdk.workflows.utils import (
- register_handler,
- retrieve_handler,
- retrieve_interface,
- retrieve_configuration,
- is_custom_uri,
+ WorkflowRevision,
+ Schema,
)
+from agenta.sdk.middleware.base import WorkflowMiddlewareDecorator
-import agenta as ag
-
-
-log = get_module_logger(__name__)
-
-
-class InvokeFn(Protocol):
- async def __call__(
- self,
- request: Union[WorkflowServiceRequest, dict],
- ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- ...
-
-
-class InspectFn(Protocol):
- async def __call__(self) -> WorkflowServiceRequest:
- ...
-
+from agenta.sdk.middleware.auth import AuthMiddleware
+from agenta.sdk.middleware.flags import FlagsMiddleware
+from agenta.sdk.middleware.adapt import AdaptMiddleware
-class Workflow:
- def __init__(self, fn: Callable[..., Any]):
- self._fn = fn
- update_wrapper(self, fn)
+LATEST_VERSION = "2025.07.14"
+DEFAULT_SCHEMAS = lambda: {} # pylint: disable=unnecessary-lambda-assignment
- # self.invoke: InvokeFn
- # self.inspect: InspectFn
- self.workflow: workflow
- async def invoke(
- self,
- *,
- request: Union[WorkflowServiceRequest, dict],
- #
- secrets: Optional[list] = None,
- credentials: Optional[str] = None,
- #
- **kwargs,
- ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- ...
-
- async def inspect(
- self,
- *,
- credentials: Optional[str] = None,
- #
- **kwargs,
- ) -> WorkflowServiceRequest:
- ...
+class workflows:
+ @classmethod
+ def get_registry(cls) -> WorkflowRegistryContext:
+ return deepcopy(workflow_registry_context.get())
- def __call__(self, *args, **kwargs) -> Any:
- return self._fn(*args, **kwargs)
- def __repr__(self) -> str:
- return repr(self._fn)
-
- def __str__(self) -> str:
- return str(self._fn)
-
-
-class workflow:
+class workflow: # pylint: disable=invalid-name
def __init__(
self,
- *,
- # -------------------------------------------------------------------- #
- id: Optional[UUID] = None,
- slug: Optional[str] = None,
version: Optional[str] = None,
- #
- references: Optional[Dict[str, Union[Reference, Dict[str, Any]]]] = None,
- # -------------------------------------------------------------------- #
- links: Optional[Dict[str, Union[Link, Dict[str, Any]]]] = None,
- # -------------------------------------------------------------------- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- # -------------------------------------------------------------------- #
- flags: Optional[dict] = None,
- tags: Optional[dict] = None,
- meta: Optional[dict] = None,
- # -------------------------------------------------------------------- #
- uri: Optional[str] = None,
- url: Optional[str] = None,
- headers: Optional[dict] = None,
- schemas: Optional[dict] = None,
- #
- interface: Optional[
- Union[
- WorkflowServiceInterface,
- Dict[str, Any],
- ]
- ] = None,
- # -------------------------------------------------------------------- #
- script: Optional[dict] = None,
- parameters: Optional[dict] = None,
- #
- configuration: Optional[
- Union[
- WorkflowServiceConfiguration,
- Dict[str, Any],
- ]
- ] = None,
- # -------------------------------------------------------------------- #
- aggregate: Optional[Union[bool, Callable]] = None, # stream to batch
- annotate: Optional[bool] = None, # annotation vs invocation
- # -------------------------------------------------------------------- #
- **kwargs,
+ schemas: Optional[Dict[str, Schema]] = None,
):
- # -------------------------------------------------------------------- #
- self.id = id
- self.slug = slug
- self.version = version
- #
- self.references = references # FIX TYPING
- # -------------------------------------------------------------------- #
- self.links = links
- # -------------------------------------------------------------------- #
- self.name = name
- self.description = description
- # -------------------------------------------------------------------- #
- self.flags = flags
- self.tags = tags
- self.meta = meta
- # -------------------------------------------------------------------- #
- self.uri = uri
- self.url = url
- self.headers = headers
- self.schemas = schemas
- #
- self.interface = interface
- # -------------------------------------------------------------------- #
- self.script = script
- self.parameters = parameters
- #
- self.configuration = configuration
- # -------------------------------------------------------------------- #
- self.aggregate = aggregate
- self.annotate = annotate
- # -------------------------------------------------------------------- #
- self.kwargs = kwargs
- # -------------------------------------------------------------------- #
-
- self.handler = None
-
- self.middlewares = [
- VaultMiddleware(),
- ResolverMiddleware(),
- NormalizerMiddleware(),
+ self.middleware: List[WorkflowMiddlewareDecorator] = [
+ AuthMiddleware,
+ FlagsMiddleware,
+ AdaptMiddleware,
]
- self.default_request = None
-
- self.uri = uri or (interface.uri if interface else None)
+ self.version = version or LATEST_VERSION
+ self.schemas = schemas or DEFAULT_SCHEMAS()
- if self.uri is not None:
- self._retrieve_handler(self.uri)
+ def __call__(self, func: Callable[..., Any]) -> Callable[..., Any]:
+ is_async = iscoroutinefunction(func)
- if self.handler:
- self.interface = retrieve_interface(self.uri) or self.interface
- if isinstance(self.interface, WorkflowServiceInterface):
- self.uri = self.interface.uri or self.uri
- self.configuration = self.configuration or retrieve_configuration(
- self.uri
- )
- if not isinstance(self.configuration, WorkflowServiceConfiguration):
- self.configuration = WorkflowServiceConfiguration()
- self.configuration.parameters = (
- self.parameters or self.configuration.parameters
- )
- self.parameters = self.configuration.parameters
+ workflow_registry = workflow_registry_context.get()
- if is_custom_uri(self.uri):
- self.flags = self.flags or dict()
- self.flags["is_custom"] = True
+ workflow_registry.version = workflow_registry.version or self.version
- def __call__(self, handler: Optional[Callable[..., Any]] = None) -> Workflow:
- if self.handler is None and handler is not None:
- self._register_handler(
- handler,
- uri=self.uri,
- )
-
- if self.handler is not None:
- self._extend_handler()
-
- if is_custom_uri(self.uri):
- self.flags = self.flags or dict()
- self.flags["is_custom"] = True
-
- return self.handler
+ if is_async:
- raise NotImplementedError("workflow without handler is not implemented yet")
-
- def _register_handler(
- self,
- handler: Optional[Callable[..., Any]] = None,
- uri: Optional[str] = None,
- ):
- """Register a handler function with the workflow system.
+ @wraps(func)
+ async def async_wrapper(*args, **kwargs):
+ if workflow_mode_enabled_context.get():
+ return await self._wrapped_async(func)(*args, **kwargs)
+ return await func(*args, **kwargs)
- Takes a callable handler, instruments it for observability, and registers it
- in the global handler registry with a URI. Also initializes or updates the
- workflow's interface with the URI and schemas.
+ workflow_registry.handlers = {
+ "invoke": async_wrapper,
+ "inspect": self.make_interface_wrapper(self.version, self.schemas),
+ }
- Args:
- handler: The callable function to register as the workflow handler
- uri: Optional URI to use for registration; if None, one will be generated
- """
- if handler is not None and callable(handler):
- instrumented = auto_instrument(handler)
- uri = register_handler(instrumented, uri=uri)
- if self.interface is None:
- self.interface = WorkflowServiceInterface()
- self.uri = uri
- self.interface.uri = uri
- self.interface.schemas = self.schemas
- self.handler = instrumented
+ return async_wrapper
- def _retrieve_handler(self, uri: str):
- self.handler = retrieve_handler(uri)
- if self.handler is None:
- raise ValueError(f"Unable to retrieve handler for URI: {uri}")
- if self.interface is None:
- self.interface = WorkflowServiceInterface()
- self.uri = uri
- self.interface.uri = uri
- self.interface.schemas = self.schemas
+ else:
- def _extend_handler(self):
- """Extend the registered handler with additional workflow capabilities.
-
- Wraps the handler function to:
- 1. Automatically inject workflow parameters if the handler expects them
- 2. Expose workflow-specific methods (invoke, inspect) on the handler
- 3. Mark the handler with is_workflow flag for identification
- 4. Wrap everything in a Workflow object for consistent interface
-
- This transforms a plain function into a full-featured workflow that can be
- invoked programmatically via .invoke() or inspected via .inspect().
-
- Raises:
- RuntimeError: If no handler has been registered yet
- ValueError: If handler becomes None during extension (should never happen)
- """
- if self.handler is None:
- raise RuntimeError("No handler registered")
-
- func = self.handler
-
- @wraps(func)
- def wrapper(*args: Any, **kwargs: Any):
- if "parameters" in signature(func).parameters:
- return func(
- *args,
- **{**{"parameters": self.parameters}, **kwargs},
- )
- else:
+ @wraps(func)
+ def sync_wrapper(*args, **kwargs):
+ if workflow_mode_enabled_context.get():
+ return self._wrapped_async(func)(*args, **kwargs)
return func(*args, **kwargs)
- # expose workflow extras
- wrapper.invoke = self.invoke # type: ignore[attr-defined]
- wrapper.inspect = self.inspect # type: ignore[attr-defined]
- wrapper.is_workflow = True # type: ignore[attr-defined]
-
- if self.handler is None:
- raise ValueError("handler must be set before extending")
-
- self.handler = Workflow(wrapper)
-
- async def invoke(
- self,
- *,
- request: WorkflowServiceRequest,
- #
- secrets: Optional[list] = None,
- credentials: Optional[str] = None,
- #
- **kwargs,
- ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- _flags = {**(self.flags or {}), **(request.flags or {})}
- _tags = {**(self.tags or {}), **(request.tags or {})}
- _meta = {**(self.meta or {}), **(request.meta or {})}
-
- credentials = credentials or (
- f"ApiKey {ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key}"
- if ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key
- else None
- )
-
- with tracing_context_manager(TracingContext.get()):
- tracing_ctx = TracingContext.get()
-
- tracing_ctx.credentials = credentials
-
- tracing_ctx.aggregate = self.aggregate
- tracing_ctx.annotate = self.annotate
-
- tracing_ctx.flags = _flags
- tracing_ctx.tags = _tags
- tracing_ctx.meta = _meta
-
- tracing_ctx.references = self.references
- tracing_ctx.links = self.links
-
- with running_context_manager(RunningContext.get()):
- running_ctx = RunningContext.get()
-
- running_ctx.secrets = secrets
- running_ctx.credentials = credentials
-
- running_ctx.interface = self.interface
- running_ctx.schemas = self.schemas
- running_ctx.configuration = self.configuration
- running_ctx.parameters = self.parameters
+ return sync_wrapper
- running_ctx.aggregate = self.aggregate
- running_ctx.annotate = self.annotate
-
- async def terminal(req: WorkflowServiceRequest):
- return None
-
- call_next = terminal
-
- for mw in reversed(self.middlewares):
- prev_next = call_next
-
- async def make_call(mw, prev_next):
- async def _call(
- req: WorkflowServiceRequest,
- ):
- return await mw(req, prev_next)
-
- return _call
-
- call_next = await make_call(mw, prev_next)
-
- return await call_next(request)
-
- async def inspect(
- self,
- *,
- credentials: Optional[str] = None,
- #
- **kwargs,
- ) -> WorkflowServiceRequest:
- with tracing_context_manager(TracingContext.get()):
- tracing_ctx = TracingContext.get()
-
- tracing_ctx.credentials = credentials
-
- tracing_ctx.aggregate = self.aggregate
- tracing_ctx.annotate = self.annotate
-
- tracing_ctx.references = self.references
- tracing_ctx.links = self.links
-
- with running_context_manager(RunningContext.get()):
- running_ctx = RunningContext.get()
-
- running_ctx.credentials = credentials
-
- running_ctx.interface = self.interface
- running_ctx.schemas = self.schemas
- running_ctx.configuration = self.configuration
- running_ctx.parameters = self.parameters
-
- running_ctx.aggregate = self.aggregate
- running_ctx.annotate = self.annotate
-
- if self.default_request is None:
- interface = await resolve_interface(
- interface=self.interface,
- **self.kwargs,
- )
- configuration = await resolve_configuration(
- configuration=self.configuration,
- **self.kwargs,
- )
-
- self.default_request = WorkflowServiceRequest(
- #
- interface=interface,
- configuration=configuration,
- #
- references=self.references,
- links=self.links,
- #
- flags=self.flags,
- tags=self.tags,
- meta=self.meta,
- #
- data=WorkflowServiceRequestData(
- revision=WorkflowRevision(
- id=self.id,
- slug=self.slug,
- version=self.version,
- #
- name=self.name,
- description=self.description,
- ).model_dump(
- mode="json",
- exclude_none=True,
- ),
- ),
- )
-
- return self.default_request
-
-
-def is_workflow(obj: Any) -> bool:
- return getattr(obj, "is_workflow", False) or isinstance(
- getattr(obj, "workflow", None), workflow
- )
-
-
-def auto_workflow(obj: Any, **kwargs) -> Workflow:
- if is_workflow(obj):
- return obj
- if isinstance(obj, workflow):
- return obj()
- if isinstance(getattr(obj, "workflow", None), workflow):
- return obj
-
- return workflow(**kwargs)(obj)
-
-
-async def invoke_workflow(
- request: WorkflowServiceRequest,
- #
- secrets: Optional[list] = None,
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- return await workflow(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- #
- **kwargs,
- )().invoke(
- request=request,
- #
- secrets=secrets,
- credentials=credentials,
- #
- **kwargs,
- )
-
-
-async def inspect_workflow(
- request: WorkflowServiceRequest,
- #
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> WorkflowServiceRequest:
- return await workflow(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- )().inspect(
- credentials=credentials,
- #
- **kwargs,
- )
-
-
-class application(workflow):
- def __init__(
- self,
- #
- slug: Optional[str] = None,
- *,
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- parameters: Optional[dict] = None,
- schemas: Optional[dict] = None,
- #
- variant_slug: Optional[str] = None,
- #
- **kwargs,
- ):
- kwargs["flags"] = dict(
- # is_custom=False, # None / False / missing is the same
- # is_evaluator=False, # None / False / missing is the same
- # is_human=False, # None / False / missing is the same
- )
-
- if not "references" in kwargs or not isinstance(kwargs["references"], dict):
- kwargs["references"] = dict()
-
- for key in kwargs["references"]:
- if key.startswith("evaluator_"):
- del kwargs["references"][key]
-
- if slug is not None:
- kwargs["references"]["application"] = {"slug": slug}
- if variant_slug is not None:
- kwargs["references"]["application_variant"] = {"slug": variant_slug}
-
- super().__init__(
- name=name,
- description=description,
- #
- parameters=parameters,
- schemas=schemas,
- #
- **kwargs,
- )
-
-
-async def invoke_application(
- request: WorkflowServiceRequest,
- #
- secrets: Optional[list] = None,
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- return await application(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- #
- **kwargs,
- )().invoke(
- request=request,
- #
- secrets=secrets,
- credentials=credentials,
- #
- **kwargs,
- )
-
-
-async def inspect_application(
- request: WorkflowServiceRequest,
- #
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> WorkflowServiceRequest:
- return await application(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- )().inspect(
- credentials=credentials,
- #
- **kwargs,
- )
-
-
-class evaluator(workflow):
- def __init__(
+ def _wrapped_async(
self,
- #
- slug: Optional[str] = None,
- *,
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- parameters: Optional[dict] = None,
- schemas: Optional[dict] = None,
- #
- variant_slug: Optional[str] = None,
- #
- **kwargs,
- ):
- kwargs["flags"] = dict(
- # is_custom=False, # None / False / missing is the same
- is_evaluator=True,
- # is_human=False, # None / False / missing is the same
- )
-
- if not "references" in kwargs or not isinstance(kwargs["references"], dict):
- kwargs["references"] = dict()
+ func: Callable[..., Any],
+ ) -> Callable[..., Awaitable[WorkflowServiceResponse]]:
+ @decorator
+ async def async_wrapper(func, *args, **kwargs):
+ result = (
+ await func(*args, **kwargs)
+ if iscoroutinefunction(func)
+ else await asyncio.to_thread(func, *args, **kwargs)
+ )
- for key in kwargs["references"]:
- if key.startswith("application_"):
- del kwargs["references"][key]
+ return result
- if slug is not None:
- kwargs["references"]["evaluator"] = {"slug": slug}
- if variant_slug is not None:
- kwargs["references"]["evaluator_variant"] = {"slug": variant_slug}
+ @wraps(func)
+ async def wrapper(*args, **kwargs):
+ handler = async_wrapper(func, *args, **kwargs)
+ request: WorkflowServiceRequest = (
+ kwargs.pop("request")
+ if "request" in kwargs
+ else args[0]
+ if len(args) > 0
+ else None
+ )
+ revision: WorkflowRevision = (
+ kwargs.pop("revision")
+ if "revision" in kwargs
+ else args[1]
+ if len(args) > 1
+ else None
+ )
- super().__init__(
- name=name,
- description=description,
- #
- parameters=parameters,
- schemas=schemas,
- #
- **kwargs,
- )
+ _handler = handler
+ for middleware in reversed(self.middleware):
+ _handler = middleware(_handler)
+ return await _handler(
+ request=request,
+ revision=revision,
+ )
-async def invoke_evaluator(
- request: WorkflowServiceRequest,
- #
- secrets: Optional[list] = None,
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]:
- return await evaluator(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- #
- **kwargs,
- )().invoke(
- request=request,
- #
- secrets=secrets,
- credentials=credentials,
- #
- **kwargs,
- )
+ return wrapper
+ def make_interface_wrapper(self, path, schemas):
+ async def interface_wrapper() -> WorkflowServiceResponse:
+ return WorkflowServiceInterface(
+ schemas={path: schemas},
+ )
-async def inspect_evaluator(
- request: WorkflowServiceRequest,
- #
- credentials: Optional[str] = None,
- #
- **kwargs,
-) -> WorkflowServiceRequest:
- return await evaluator(
- data=request.data,
- #
- interface=request.interface,
- configuration=request.configuration,
- #
- flags=request.flags,
- tags=request.tags,
- meta=request.meta,
- #
- references=request.references,
- links=request.links,
- )().inspect(
- credentials=credentials,
- #
- **kwargs,
- )
+ return interface_wrapper
diff --git a/sdk/agenta/sdk/decorators/serving.py b/sdk/agenta/sdk/decorators/serving.py
index c8683e4f63..77f9826e8c 100644
--- a/sdk/agenta/sdk/decorators/serving.py
+++ b/sdk/agenta/sdk/decorators/serving.py
@@ -28,12 +28,13 @@
from agenta.sdk.middleware.auth import AuthHTTPMiddleware
from agenta.sdk.middleware.cors import CORSMiddleware
-from agenta.sdk.contexts.routing import (
- routing_context_manager,
+from agenta.sdk.context.serving import (
+ serving_context_manager,
RoutingContext,
)
-from agenta.sdk.contexts.tracing import (
+from agenta.sdk.context.tracing import (
tracing_context_manager,
+ tracing_context,
TracingContext,
)
from agenta.sdk.router import router
@@ -337,7 +338,7 @@ async def execute_wrapper(
inline = state.inline
mock = state.mock
- with routing_context_manager(
+ with serving_context_manager(
context=RoutingContext(
parameters=parameters,
secrets=secrets,
@@ -393,15 +394,15 @@ async def handle_success(
try:
if isinstance(result, StarletteResponse):
- result.headers.setdefault("x-ag-version", "3.0")
+ result.headers.setdefault("X-ag-version", "3.0")
if content_type:
- result.headers.setdefault("x-ag-content-type", content_type)
+ result.headers.setdefault("X-ag-content-type", content_type)
if tree_id:
- result.headers.setdefault("x-ag-tree-id", tree_id)
+ result.headers.setdefault("X-ag-tree-id", tree_id)
if trace_id:
- result.headers.setdefault("x-ag-trace-id", trace_id)
+ result.headers.setdefault("X-ag-trace-id", trace_id)
if span_id:
- result.headers.setdefault("x-ag-span-id", span_id)
+ result.headers.setdefault("X-ag-span-id", span_id)
return result
except:
@@ -529,7 +530,7 @@ def patch_result(
async def fetch_inline_trace_id(
self,
):
- context = TracingContext.get()
+ context = tracing_context.get()
link = context.link
@@ -548,7 +549,7 @@ async def fetch_inline_trace(
TIMESTEP = 0.1
NOFSTEPS = TIMEOUT / TIMESTEP
- context = TracingContext.get()
+ context = tracing_context.get()
link = context.link
diff --git a/sdk/agenta/sdk/decorators/tracing.py b/sdk/agenta/sdk/decorators/tracing.py
index e94dd7f3f1..e4cfdd5744 100644
--- a/sdk/agenta/sdk/decorators/tracing.py
+++ b/sdk/agenta/sdk/decorators/tracing.py
@@ -1,9 +1,8 @@
-# /agenta/sdk/decorators/tracing.py
-
from typing import Callable, Optional, Any, Dict, List, Union
from opentelemetry import context as otel_context
from opentelemetry.context import attach, detach
+from opentelemetry import trace
from functools import wraps
@@ -15,37 +14,21 @@
isasyncgenfunction,
)
-from pydantic import BaseModel
-
from opentelemetry import baggage
from opentelemetry.context import attach, detach, get_current
from opentelemetry.baggage import set_baggage, get_all
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.contexts.tracing import (
- TracingContext,
- tracing_context_manager,
-)
+from agenta.sdk.utils.otel import debug_otel_context
+from agenta.sdk.context.tracing import tracing_context
from agenta.sdk.tracing.conventions import parse_span_kind
import agenta as ag
-
log = get_module_logger(__name__)
-def _has_instrument(handler: Callable[..., Any]) -> bool:
- return bool(getattr(handler, "__has_instrument__", False))
-
-
-def auto_instrument(handler: Callable[..., Any]) -> Callable[..., Any]:
- if _has_instrument(handler):
- return handler
-
- return instrument()(handler)
-
-
class instrument: # pylint: disable=invalid-name
DEFAULT_KEY = "__default__"
@@ -58,8 +41,7 @@ def __init__(
redact: Optional[Callable[..., Any]] = None,
redact_on_error: Optional[bool] = True,
max_depth: Optional[int] = 2,
- aggregate: Optional[Union[bool, Callable]] = None, # stream to batch
- annotate: Optional[bool] = None, # annotation vs invocation
+ aggregate: Optional[Callable[[List[Any]], Any]] = None,
# DEPRECATING
kind: str = "task",
spankind: Optional[str] = "TASK",
@@ -73,183 +55,142 @@ def __init__(
self.redact_on_error = redact_on_error
self.max_depth = max_depth
self.aggregate = aggregate
- self.annotate = annotate
- def __call__(self, handler: Callable[..., Any]):
- is_coroutine_function = iscoroutinefunction(handler)
- is_sync_generator = isgeneratorfunction(handler)
- is_async_generator = isasyncgenfunction(handler)
+ def __call__(self, func: Callable[..., Any]):
+ is_coroutine_function = iscoroutinefunction(func)
+ is_sync_generator = isgeneratorfunction(func)
+ is_async_generator = isasyncgenfunction(func)
# ---- ASYNC GENERATOR ----
if is_async_generator:
- @wraps(handler)
+ @wraps(func)
def astream_wrapper(*args, **kwargs):
- with tracing_context_manager(context=TracingContext.get()):
- # debug_otel_context("[BEFORE STREAM] [BEFORE SETUP]")
+ # debug_otel_context("[BEFORE STREAM] [BEFORE SETUP]")
- captured_ctx = otel_context.get_current()
+ captured_ctx = otel_context.get_current()
- self._parse_type_and_kind()
+ self._parse_type_and_kind()
- self._attach_baggage()
+ self._attach_baggage()
- ctx = self._get_traceparent()
+ ctx = self._get_traceparent()
- # debug_otel_context("[BEFORE STREAM] [AFTER SETUP]")
+ # debug_otel_context("[BEFORE STREAM] [AFTER SETUP]")
- async def wrapped_generator():
- # debug_otel_context("[WITHIN STREAM] [BEFORE ATTACH]")
+ async def wrapped_generator():
+ # debug_otel_context("[WITHIN STREAM] [BEFORE ATTACH]")
- otel_token = otel_context.attach(captured_ctx)
+ otel_token = otel_context.attach(captured_ctx)
- # debug_otel_context("[WITHIN STREAM] [AFTER ATTACH]")
+ # debug_otel_context("[WITHIN STREAM] [AFTER ATTACH]")
- try:
- with ag.tracer.start_as_current_span(
- name=handler.__name__,
- kind=self.kind,
- context=ctx,
- ):
- self._set_link()
- self._pre_instrument(handler, *args, **kwargs)
+ try:
+ with ag.tracer.start_as_current_span(
+ name=func.__name__,
+ kind=self.kind,
+ context=ctx,
+ ):
+ self._set_link()
+ self._pre_instrument(func, *args, **kwargs)
- _result = []
+ _result = []
- agen = handler(*args, **kwargs)
+ agen = func(*args, **kwargs)
- try:
- async for chunk in agen:
- _result.append(chunk)
- yield chunk
+ try:
+ async for chunk in agen:
+ _result.append(chunk)
+ yield chunk
- finally:
- if self.aggregate and callable(self.aggregate):
- result = self.aggregate(_result)
- elif all(isinstance(r, str) for r in _result):
- result = "".join(_result)
- elif all(isinstance(r, bytes) for r in _result):
- result = b"".join(_result)
- else:
- result = _result
+ finally:
+ if self.aggregate:
+ result = self.aggregate(_result)
+ elif all(isinstance(r, str) for r in _result):
+ result = "".join(_result)
+ elif all(isinstance(r, bytes) for r in _result):
+ result = b"".join(_result)
+ else:
+ result = _result
- self._post_instrument(result)
+ self._post_instrument(result)
- finally:
- # debug_otel_context("[WITHIN STREAM] [BEFORE DETACH]")
+ finally:
+ # debug_otel_context("[WITHIN STREAM] [BEFORE DETACH]")
- otel_context.detach(otel_token)
+ otel_context.detach(otel_token)
- # debug_otel_context("[WITHIN STREAM] [AFTER DETACH]")
+ # debug_otel_context("[WITHIN STREAM] [AFTER DETACH]")
return wrapped_generator()
- setattr(astream_wrapper, "__has_instrument__", True)
- setattr(astream_wrapper, "__original_handler__", handler)
return astream_wrapper
# ---- SYNC GENERATOR ----
if is_sync_generator:
- @wraps(handler)
+ @wraps(func)
def stream_wrapper(*args, **kwargs):
- with tracing_context_manager(context=TracingContext.get()):
- self._parse_type_and_kind()
+ self._parse_type_and_kind()
- token = self._attach_baggage()
+ token = self._attach_baggage()
- ctx = self._get_traceparent()
+ ctx = self._get_traceparent()
- def wrapped_generator():
- try:
- with ag.tracer.start_as_current_span(
- name=handler.__name__,
- kind=self.kind,
- context=ctx,
- ):
- self._set_link()
+ def wrapped_generator():
+ try:
+ with ag.tracer.start_as_current_span(
+ name=func.__name__,
+ kind=self.kind,
+ context=ctx,
+ ):
+ self._set_link()
- self._pre_instrument(handler, *args, **kwargs)
+ self._pre_instrument(func, *args, **kwargs)
- _result = []
+ _result = []
- gen = handler(*args, **kwargs)
+ gen = func(*args, **kwargs)
- gen_return = None
+ gen_return = None
- try:
- while True:
- try:
- chunk = next(gen)
- except StopIteration as e:
- gen_return = e.value
- break
+ try:
+ while True:
+ try:
+ chunk = next(gen)
+ except StopIteration as e:
+ gen_return = e.value
+ break
- _result.append(chunk)
- yield chunk
+ _result.append(chunk)
+ yield chunk
- finally:
- if self.aggregate and callable(self.aggregate):
- result = self.aggregate(_result)
- elif all(isinstance(r, str) for r in _result):
- result = "".join(_result)
- elif all(isinstance(r, bytes) for r in _result):
- result = b"".join(_result)
- else:
- result = _result
+ finally:
+ if self.aggregate:
+ result = self.aggregate(_result)
+ elif all(isinstance(r, str) for r in _result):
+ result = "".join(_result)
+ elif all(isinstance(r, bytes) for r in _result):
+ result = b"".join(_result)
+ else:
+ result = _result
- self._post_instrument(result)
+ self._post_instrument(result)
- return gen_return
+ return gen_return
- finally:
- self._detach_baggage(token)
+ finally:
+ self._detach_baggage(token)
return wrapped_generator()
- setattr(stream_wrapper, "__has_instrument__", True)
- setattr(stream_wrapper, "__original_handler__", handler)
return stream_wrapper
- # ---- ASYNC FUNCTION ----
+ # ---- ASYNC FUNCTION (non-generator) ----
if is_coroutine_function:
- @wraps(handler)
+ @wraps(func)
async def awrapper(*args, **kwargs):
- with tracing_context_manager(context=TracingContext.get()):
- self._parse_type_and_kind()
-
- token = self._attach_baggage()
-
- ctx = self._get_traceparent()
-
- try:
- with ag.tracer.start_as_current_span(
- name=handler.__name__,
- kind=self.kind,
- context=ctx,
- ):
- self._set_link()
-
- self._pre_instrument(handler, *args, **kwargs)
-
- result = await handler(*args, **kwargs)
-
- self._post_instrument(result)
-
- finally:
- self._detach_baggage(token)
-
- return result
-
- setattr(awrapper, "__has_instrument__", True)
- setattr(awrapper, "__original_handler__", handler)
- return awrapper
-
- # ---- SYNC FUNCTION ----
- @wraps(handler)
- def wrapper(*args, **kwargs):
- with tracing_context_manager(context=TracingContext.get()):
self._parse_type_and_kind()
token = self._attach_baggage()
@@ -258,15 +199,15 @@ def wrapper(*args, **kwargs):
try:
with ag.tracer.start_as_current_span(
- name=handler.__name__,
+ name=func.__name__,
kind=self.kind,
context=ctx,
):
self._set_link()
- self._pre_instrument(handler, *args, **kwargs)
+ self._pre_instrument(func, *args, **kwargs)
- result = handler(*args, **kwargs)
+ result = await func(*args, **kwargs)
self._post_instrument(result)
@@ -275,8 +216,36 @@ def wrapper(*args, **kwargs):
return result
- setattr(wrapper, "__has_instrument__", True)
- setattr(wrapper, "__original_handler__", handler)
+ return awrapper
+
+ # ---- SYNC FUNCTION (non-generator) ----
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ self._parse_type_and_kind()
+
+ token = self._attach_baggage()
+
+ ctx = self._get_traceparent()
+
+ try:
+ with ag.tracer.start_as_current_span(
+ name=func.__name__,
+ kind=self.kind,
+ context=ctx,
+ ):
+ self._set_link()
+
+ self._pre_instrument(func, *args, **kwargs)
+
+ result = func(*args, **kwargs)
+
+ self._post_instrument(result)
+
+ finally:
+ self._detach_baggage(token)
+
+ return result
+
return wrapper
def _parse_type_and_kind(self):
@@ -286,7 +255,7 @@ def _parse_type_and_kind(self):
self.kind = parse_span_kind(self.type)
def _get_traceparent(self):
- context = TracingContext.get()
+ context = tracing_context.get()
traceparent = context.traceparent
@@ -299,7 +268,7 @@ def _get_traceparent(self):
def _set_link(self):
span = ag.tracing.get_current_span()
- context = TracingContext.get()
+ context = tracing_context.get()
if not context.link:
context.link = {
@@ -307,10 +276,10 @@ def _set_link(self):
"span_id": span.get_span_context().span_id,
}
- TracingContext.set(context)
+ tracing_context.set(context)
def _attach_baggage(self):
- context = TracingContext.get()
+ context = tracing_context.get()
references = context.references
@@ -330,24 +299,26 @@ def _detach_baggage(
def _pre_instrument(
self,
- handler,
+ func,
*args,
**kwargs,
):
span = ag.tracing.get_current_span()
- context = TracingContext.get()
+ context = tracing_context.get()
with suppress():
trace_id = span.context.trace_id
ag.tracing.credentials.put(trace_id, context.credentials)
+ trace_type = context.type or "invocation"
span_type = self.type or "task"
span.set_attributes(
attributes={
"node": span_type,
+ "tree": trace_type,
},
namespace="type",
)
@@ -361,7 +332,7 @@ def _pre_instrument(
_inputs = self._redact(
name=span.name,
field="inputs",
- io=self._parse(handler, *args, **kwargs),
+ io=self._parse(func, *args, **kwargs),
ignore=self.ignore_inputs,
)
@@ -432,14 +403,14 @@ def _post_instrument(
def _parse(
self,
- handler,
+ func,
*args,
**kwargs,
) -> Dict[str, Any]:
inputs = {
key: value
for key, value in chain(
- zip(getfullargspec(handler).args, args),
+ zip(getfullargspec(func).args, args),
kwargs.items(),
)
}
@@ -493,22 +464,6 @@ def _redact(
if ag.tracing.redact_on_error:
io = {}
- if "request" in io:
- with suppress():
- if isinstance(io["request"], BaseModel):
- io["request"] = io["request"].model_dump(
- mode="json",
- exclude_none=True,
- )
-
- if "response" in io:
- with suppress():
- if isinstance(io["response"], BaseModel):
- io["response"] = io["response"].model_dump(
- mode="json",
- exclude_none=True,
- )
-
return io
def _patch(
diff --git a/sdk/agenta/sdk/engines/__init__.py b/sdk/agenta/sdk/engines/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/engines/running/__init__.py b/sdk/agenta/sdk/engines/running/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/engines/tracing/__init__.py b/sdk/agenta/sdk/engines/tracing/__init__.py
deleted file mode 100644
index 734c38b64d..0000000000
--- a/sdk/agenta/sdk/engines/tracing/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .tracing import Tracing, get_tracer
diff --git a/sdk/agenta/sdk/engines/tracing/attributes.py b/sdk/agenta/sdk/engines/tracing/attributes.py
deleted file mode 100644
index bdc042c9bb..0000000000
--- a/sdk/agenta/sdk/engines/tracing/attributes.py
+++ /dev/null
@@ -1,185 +0,0 @@
-from json import loads, dumps
-from typing import Optional, Union, Sequence, Any, Dict
-
-Primitive = Union[str, int, float, bool, bytes]
-PrimitivesSequence = Sequence[Primitive]
-Attribute = Union[Primitive, PrimitivesSequence]
-
-
-def _marshal(
- unmarshalled: Dict[str, Any],
- *,
- parent_key: Optional[str] = "",
- depth: Optional[int] = 0,
- max_depth: Optional[int] = None,
-) -> Dict[str, Any]:
- """
- Marshals a dictionary of unmarshalled attributes into a flat dictionary
-
- Example:
- unmarshalled = {
- "ag": {
- "type": "tree",
- "node": {
- "name": "root",
- "children": [
- {
- "name": "child1",
- },
- {
- "name": "child2",
- }
- ]
- }
- }
- }
- marshalled = {
- "ag.type": "tree",
- "ag.node.name": "root",
- "ag.node.children.0.name": "child1",
- "ag.node.children.1.name": "child2"
- }
- """
- marshalled = {}
-
- # If max_depth is set and we've reached it,
- # just return the unmarshalled attributes
- if max_depth is not None and depth >= max_depth:
- marshalled[parent_key] = unmarshalled
- # MISSING ENCODING TO JSON IF NOT PRIMITIVE
-
- return marshalled
-
- # Otherwise,
- # iterate over the unmarshalled attributes and marshall them
- for key, value in unmarshalled.items():
- child_key = f"{parent_key}.{key}" if parent_key else key
-
- if isinstance(value, dict):
- dict_key = child_key
-
- marshalled.update(
- _marshal(
- value,
- parent_key=dict_key,
- depth=depth + 1,
- max_depth=max_depth,
- )
- )
- elif isinstance(value, list):
- if max_depth is not None and depth + 1 >= max_depth:
- marshalled[child_key] = value
- # MISSING ENCODING TO JSON IF NOT PRIMITIVE
- else:
- for i, item in enumerate(value):
- list_key = f"{child_key}.{i}"
-
- if isinstance(item, (dict, list)):
- marshalled.update(
- _marshal(
- item,
- parent_key=list_key,
- depth=depth + 1,
- max_depth=max_depth,
- )
- )
- else:
- marshalled[list_key] = item
- # MISSING ENCODING TO JSON IF NOT PRIMITIVE
- else:
- marshalled[child_key] = value
- # MISSING ENCODING TO JSON IF NOT PRIMITIVE
-
- return marshalled
-
-
-def _encode_key(
- namespace: Optional[str] = None,
- key: str = "",
-) -> str:
- if namespace is None:
- return key
-
- return f"ag.{namespace}.{key}"
-
-
-def _make_serializable(value: Any) -> Any:
- """
- Transform complex nested structures into JSON-serializable form.
- Handles Pydantic models, nested dictionaries and lists recursively.
- """
- if value is None or isinstance(value, (str, int, float, bool, bytes)):
- return value
-
- # Handle Pydantic objects (prioritize v2 over v1 API)
- if hasattr(value, "model_dump"): # Pydantic v2
- return value.model_dump()
- elif hasattr(value, "dict"): # Pydantic v1
- return value.dict()
-
- if isinstance(value, dict):
- try:
- # Test serialization without modifying - optimizes for already-serializable dicts
- dumps(
- value
- ) # If serialization fails, we'll catch the exception and process deeply
- return value # Avoid unnecessary recursion for serializable dicts
- except TypeError:
- return {k: _make_serializable(v) for k, v in value.items()}
- elif isinstance(value, list):
- try:
- # Test serialization without modifying - optimizes for already-serializable lists
- dumps(
- value
- ) # If serialization fails, we'll catch the exception and process deeply
- return value # Avoid unnecessary recursion for serializable lists
- except TypeError:
- return [_make_serializable(item) for item in value]
-
- return repr(value)
-
-
-def _encode_value(value: Any) -> Optional[Attribute]:
- """
- Encode values for tracing, ensuring proper JSON serialization.
- Adds the @ag.type=json: prefix only to appropriate values.
- """
- if value is None:
- return None
-
- if isinstance(value, (str, int, float, bool, bytes)):
- return value
-
- try:
- if (
- isinstance(value, (dict, list))
- or hasattr(value, "model_dump")
- or hasattr(value, "dict")
- ):
- serializable_value = _make_serializable(value)
- return "@ag.type=json:" + dumps(serializable_value)
- except TypeError:
- pass
-
- return repr(value)
-
-
-def serialize(
- *,
- namespace: str,
- attributes: Dict[str, Any],
- max_depth: Optional[int] = None,
-) -> Dict[str, str]:
- if not isinstance(attributes, dict):
- return {}
-
- _attributes = {
- k: v
- for k, v in {
- _encode_key(namespace, key): _encode_value(value)
- for key, value in _marshal(attributes, max_depth=max_depth).items()
- }.items()
- if v is not None
- }
-
- return _attributes
diff --git a/sdk/agenta/sdk/engines/tracing/conventions.py b/sdk/agenta/sdk/engines/tracing/conventions.py
deleted file mode 100644
index 018cf64dea..0000000000
--- a/sdk/agenta/sdk/engines/tracing/conventions.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from enum import Enum
-from re import fullmatch
-
-from opentelemetry.trace import SpanKind
-
-
-class Reference(str, Enum):
- #
- VARIANT_ID = "variant.id"
- VARIANT_SLUG = "variant.slug"
- VARIANT_VERSION = "variant.version"
- #
- ENVIRONMENT_ID = "environment.id"
- ENVIRONMENT_SLUG = "environment.slug"
- ENVIRONMENT_VERSION = "environment.version"
- #
- APPLICATION_ID = "application.id"
- APPLICATION_SLUG = "application.slug"
- #
-
-
-_PATTERN = r"[A-Za-z0-9._-]+"
-
-
-def is_valid_attribute_key(
- string: str,
-):
- return bool(fullmatch(_PATTERN, string))
-
-
-def parse_span_kind(type: str) -> SpanKind:
- kind = SpanKind.INTERNAL
- if type in [
- "agent",
- "chain",
- "workflow",
- ]:
- kind = SpanKind.SERVER
- elif type in [
- "tool",
- "embedding",
- "query",
- "completion",
- "chat",
- "rerank",
- ]:
- kind = SpanKind.CLIENT
-
- return kind
diff --git a/sdk/agenta/sdk/engines/tracing/exporters.py b/sdk/agenta/sdk/engines/tracing/exporters.py
deleted file mode 100644
index 0cea71d4d8..0000000000
--- a/sdk/agenta/sdk/engines/tracing/exporters.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from typing import Sequence, Dict, List, Optional
-
-from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
-from opentelemetry.sdk.trace.export import (
- ConsoleSpanExporter,
- SpanExporter,
- SpanExportResult,
- ReadableSpan,
-)
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.utils.cache import TTLLRUCache
-from agenta.sdk.contexts.tracing import (
- otlp_context_manager,
- otlp_context,
- OTLPContext,
-)
-
-
-log = get_module_logger(__name__)
-
-
-class InlineTraceExporter(SpanExporter):
- def __init__(
- self,
- registry: Dict[str, List[ReadableSpan]],
- ):
- self._shutdown = False
- self._registry = registry
-
- def export(
- self,
- spans: Sequence[ReadableSpan],
- ) -> SpanExportResult:
- if self._shutdown:
- return
-
- with suppress():
- for span in spans:
- trace_id = span.get_span_context().trace_id
-
- if trace_id not in self._registry:
- self._registry[trace_id] = []
-
- self._registry[trace_id].append(span)
-
- def shutdown(self) -> None:
- self._shutdown = True
-
- def force_flush(self, timeout_millis: int = 30000) -> bool:
- return True
-
- def is_ready(
- self,
- trace_id: int,
- ) -> bool:
- is_ready = trace_id in self._registry
- return is_ready
-
- def fetch(
- self,
- trace_id: int,
- ) -> List[ReadableSpan]:
- trace = self._registry.get(trace_id, [])
-
- if trace_id in self._registry:
- del self._registry[trace_id]
-
- return trace
-
-
-class OTLPExporter(OTLPSpanExporter):
- _MAX_RETRY_TIMEOUT = 2
-
- def __init__(
- self,
- *args,
- credentials: Optional[TTLLRUCache] = None,
- **kwargs,
- ):
- super().__init__(*args, **kwargs)
-
- self.credentials = credentials
-
- def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
- grouped_spans: Dict[str, List[str]] = {}
-
- for span in spans:
- trace_id = span.get_span_context().trace_id
-
- credentials = None
- if self.credentials:
- credentials = self.credentials.get(trace_id)
-
- if credentials not in grouped_spans:
- grouped_spans[credentials] = []
-
- grouped_spans[credentials].append(span)
-
- serialized_spans = []
-
- for credentials, _spans in grouped_spans.items():
- with otlp_context_manager(
- context=OTLPContext(
- credentials=credentials,
- )
- ):
- serialized_spans.append(super().export(_spans))
-
- if all(serialized_spans):
- return SpanExportResult.SUCCESS
- else:
- return SpanExportResult.FAILURE
-
- def _export(self, serialized_data: bytes, timeout_sec: Optional[float] = None):
- credentials = otlp_context.get().credentials
-
- if credentials:
- self._session.headers.update({"Authorization": credentials})
-
- with suppress():
- if timeout_sec is not None:
- return super()._export(serialized_data, timeout_sec)
- else:
- return super()._export(serialized_data)
-
-
-ConsoleExporter = ConsoleSpanExporter
-InlineExporter = InlineTraceExporter
diff --git a/sdk/agenta/sdk/engines/tracing/inline.py b/sdk/agenta/sdk/engines/tracing/inline.py
deleted file mode 100644
index 971c4a108d..0000000000
--- a/sdk/agenta/sdk/engines/tracing/inline.py
+++ /dev/null
@@ -1,1154 +0,0 @@
-############################
-### services.shared.dtos ###
-### -------------------- ###
-
-from typing import Optional
-
-from pydantic import BaseModel
-from uuid import UUID
-from datetime import datetime
-from enum import Enum
-from collections import OrderedDict
-
-
-class ProjectScopeDTO(BaseModel):
- project_id: UUID
-
-
-class LifecycleDTO(BaseModel):
- created_at: datetime
- updated_at: Optional[datetime] = None
-
- updated_by_id: Optional[UUID] = None
-
-
-### -------------------- ###
-### services.shared.dtos ###
-############################
-
-
-###################################
-### services.observability.dtos ###
-### --------------------------- ###
-
-from typing import List, Dict, Any, Union, Optional
-
-from enum import Enum
-from datetime import datetime
-from uuid import UUID
-
-
-class TimeDTO(BaseModel):
- start: datetime
- end: datetime
-
-
-class StatusCode(Enum):
- UNSET = "UNSET"
- OK = "OK"
- ERROR = "ERROR"
-
-
-class StatusDTO(BaseModel):
- code: StatusCode
- message: Optional[str] = None
- stacktrace: Optional[str] = None
-
-
-AttributeValueType = Any
-Attributes = Dict[str, AttributeValueType]
-
-
-class TreeType(Enum):
- # --- VARIANTS --- #
- INVOCATION = "invocation"
- ANNOTATION = "annotation"
- # --- VARIANTS --- #
-
-
-class NodeType(Enum):
- # --- VARIANTS --- #
- ## SPAN_KIND_SERVER
- AGENT = "agent"
- WORKFLOW = "workflow"
- CHAIN = "chain"
- ## SPAN_KIND_INTERNAL
- TASK = "task"
- ## SPAN_KIND_CLIENT
- TOOL = "tool"
- EMBEDDING = "embedding"
- QUERY = "query"
- COMPLETION = "completion"
- CHAT = "chat"
- RERANK = "rerank"
- # --- VARIANTS --- #
-
-
-class RootDTO(BaseModel):
- id: UUID
-
-
-class TreeDTO(BaseModel):
- id: UUID
- type: Optional[TreeType] = None
-
-
-class NodeDTO(BaseModel):
- id: UUID
- type: Optional[NodeType] = None
- name: str
-
-
-Data = Dict[str, Any]
-Metrics = Dict[str, Any]
-Metadata = Dict[str, Any]
-Tags = Dict[str, Any]
-Refs = Dict[str, Any]
-
-
-class LinkDTO(BaseModel):
- type: str
- id: UUID
- tree_id: Optional[UUID] = None
-
-
-class ParentDTO(BaseModel):
- id: UUID
-
-
-class OTelSpanKind(Enum):
- SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED"
- # INTERNAL
- SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL"
- # SYNCHRONOUS
- SPAN_KIND_SERVER = "SPAN_KIND_SERVER"
- SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT"
- # ASYNCHRONOUS
- SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER"
- SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER"
-
-
-class OTelStatusCode(Enum):
- STATUS_CODE_OK = "STATUS_CODE_OK"
- STATUS_CODE_ERROR = "STATUS_CODE_ERROR"
- STATUS_CODE_UNSET = "STATUS_CODE_UNSET"
-
-
-class OTelContextDTO(BaseModel):
- trace_id: str
- span_id: str
-
-
-class OTelEventDTO(BaseModel):
- name: str
- timestamp: datetime
-
- attributes: Optional[Attributes] = None
-
-
-class OTelLinkDTO(BaseModel):
- context: OTelContextDTO
-
- attributes: Optional[Attributes] = None
-
-
-class OTelExtraDTO(BaseModel):
- kind: Optional[str] = None
-
- attributes: Optional[Attributes] = None
- events: Optional[List[OTelEventDTO]] = None
- links: Optional[List[OTelLinkDTO]] = None
-
-
-class SpanDTO(BaseModel):
- trace_id: str
- span_id: str
-
- scope: Optional[ProjectScopeDTO] = None
-
- lifecycle: Optional[LifecycleDTO] = None
-
- root: RootDTO
- tree: TreeDTO
- node: NodeDTO
-
- parent: Optional[ParentDTO] = None
-
- time: TimeDTO
- status: StatusDTO
-
- data: Optional[Data] = None
- metrics: Optional[Metrics] = None
- meta: Optional[Metadata] = None
- tags: Optional[Tags] = None
- refs: Optional[Refs] = None
-
- links: Optional[List[LinkDTO]] = None
-
- otel: Optional[OTelExtraDTO] = None
-
- nodes: Optional[Dict[str, Union["SpanDTO", List["SpanDTO"]]]] = None
-
-
-class OTelSpanDTO(BaseModel):
- context: OTelContextDTO
-
- name: str
- kind: OTelSpanKind = OTelSpanKind.SPAN_KIND_UNSPECIFIED
-
- start_time: datetime
- end_time: datetime
-
- status_code: OTelStatusCode = OTelStatusCode.STATUS_CODE_UNSET
- status_message: Optional[str] = None
-
- attributes: Optional[Attributes] = None
- events: Optional[List[OTelEventDTO]] = None
-
- parent: Optional[OTelContextDTO] = None
- links: Optional[List[OTelLinkDTO]] = None
-
-
-### --------------------------- ###
-### services.observability.dtos ###
-###################################
-
-
-####################################
-### services.observability.utils ###
-### ---------------------------- ###
-
-from typing import List, Dict, OrderedDict
-
-
-def parse_span_dtos_to_span_idx(
- span_dtos: List[SpanDTO],
-) -> Dict[str, SpanDTO]:
- span_idx = {span_dto.node.id: span_dto for span_dto in span_dtos}
-
- return span_idx
-
-
-def parse_span_idx_to_span_id_tree(
- span_idx: Dict[str, SpanDTO],
-) -> OrderedDict:
- span_id_tree = OrderedDict()
- index = {}
-
- def push(span_dto: SpanDTO) -> None:
- if span_dto.parent is None:
- span_id_tree[span_dto.node.id] = OrderedDict()
- index[span_dto.node.id] = span_id_tree[span_dto.node.id]
- elif span_dto.parent.id in index:
- index[span_dto.parent.id][span_dto.node.id] = OrderedDict()
- index[span_dto.node.id] = index[span_dto.parent.id][span_dto.node.id]
-
- for span_dto in sorted(span_idx.values(), key=lambda span_dto: span_dto.time.start):
- push(span_dto)
-
- return span_id_tree
-
-
-def cumulate_costs(
- spans_id_tree: OrderedDict,
- spans_idx: Dict[str, SpanDTO],
-) -> None:
- def _get_unit(span: SpanDTO):
- if span.metrics is not None:
- return span.metrics.get("unit.costs.total", 0.0)
-
- return 0.0
-
- def _get_acc(span: SpanDTO):
- if span.metrics is not None:
- return span.metrics.get("acc.costs.total", 0.0)
-
- return 0.0
-
- def _acc(a: float, b: float):
- return a + b
-
- def _set(span: SpanDTO, cost: float):
- if span.metrics is None:
- span.metrics = {}
-
- if cost != 0.0:
- span.metrics["acc.costs.total"] = cost
-
- _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set)
-
-
-def cumulate_tokens(
- spans_id_tree: OrderedDict,
- spans_idx: Dict[str, dict],
-) -> None:
- def _get_unit(span: SpanDTO):
- _tokens = {
- "prompt": 0.0,
- "completion": 0.0,
- "total": 0.0,
- }
-
- if span.metrics is not None:
- return {
- "prompt": span.metrics.get("unit.tokens.prompt", 0.0),
- "completion": span.metrics.get("unit.tokens.completion", 0.0),
- "total": span.metrics.get("unit.tokens.total", 0.0),
- }
-
- return _tokens
-
- def _get_acc(span: SpanDTO):
- _tokens = {
- "prompt": 0.0,
- "completion": 0.0,
- "total": 0.0,
- }
-
- if span.metrics is not None:
- return {
- "prompt": span.metrics.get("acc.tokens.prompt", 0.0),
- "completion": span.metrics.get("acc.tokens.completion", 0.0),
- "total": span.metrics.get("acc.tokens.total", 0.0),
- }
-
- return _tokens
-
- def _acc(a: dict, b: dict):
- return {
- "prompt": a.get("prompt", 0.0) + b.get("prompt", 0.0),
- "completion": a.get("completion", 0.0) + b.get("completion", 0.0),
- "total": a.get("total", 0.0) + b.get("total", 0.0),
- }
-
- def _set(span: SpanDTO, tokens: dict):
- if span.metrics is None:
- span.metrics = {}
-
- if tokens.get("prompt", 0.0) != 0.0:
- span.metrics["acc.tokens.prompt"] = tokens.get("prompt", 0.0)
- if tokens.get("completion", 0.0) != 0.0:
- span.metrics["acc.tokens.completion"] = (
- tokens.get("completion", 0.0)
- if tokens.get("completion", 0.0) != 0.0
- else None
- )
- if tokens.get("total", 0.0) != 0.0:
- span.metrics["acc.tokens.total"] = (
- tokens.get("total", 0.0) if tokens.get("total", 0.0) != 0.0 else None
- )
-
- _cumulate_tree_dfs(spans_id_tree, spans_idx, _get_unit, _get_acc, _acc, _set)
-
-
-def _cumulate_tree_dfs(
- spans_id_tree: OrderedDict,
- spans_idx: Dict[str, SpanDTO],
- get_unit_metric,
- get_acc_metric,
- accumulate_metric,
- set_metric,
-):
- for span_id, children_spans_id_tree in spans_id_tree.items():
- children_spans_id_tree: OrderedDict
-
- cumulated_metric = get_unit_metric(spans_idx[span_id])
-
- _cumulate_tree_dfs(
- children_spans_id_tree,
- spans_idx,
- get_unit_metric,
- get_acc_metric,
- accumulate_metric,
- set_metric,
- )
-
- for child_span_id in children_spans_id_tree.keys():
- marginal_metric = get_acc_metric(spans_idx[child_span_id])
- cumulated_metric = accumulate_metric(cumulated_metric, marginal_metric)
-
- set_metric(spans_idx[span_id], cumulated_metric)
-
-
-def connect_children(
- spans_id_tree: OrderedDict,
- spans_idx: Dict[str, dict],
-) -> None:
- _connect_tree_dfs(spans_id_tree, spans_idx)
-
-
-def _connect_tree_dfs(
- spans_id_tree: OrderedDict,
- spans_idx: Dict[str, SpanDTO],
-):
- for span_id, children_spans_id_tree in spans_id_tree.items():
- children_spans_id_tree: OrderedDict
-
- parent_span = spans_idx[span_id]
-
- parent_span.nodes = dict()
-
- _connect_tree_dfs(children_spans_id_tree, spans_idx)
-
- for child_span_id in children_spans_id_tree.keys():
- child_span_name = spans_idx[child_span_id].node.name
- if child_span_name not in parent_span.nodes:
- parent_span.nodes[child_span_name] = spans_idx[child_span_id]
- else:
- if not isinstance(parent_span.nodes[child_span_name], list):
- parent_span.nodes[child_span_name] = [
- parent_span.nodes[child_span_name]
- ]
-
- parent_span.nodes[child_span_name].append(spans_idx[child_span_id])
-
- if len(parent_span.nodes) == 0:
- parent_span.nodes = None
-
-
-### ---------------------------- ###
-### services.observability.utils ###
-####################################
-
-
-########################################################
-### apis.fastapi.observability.opentelemetry.semconv ###
-### ------------------------------------------------ ###
-
-from json import loads
-
-VERSION = "0.4.1"
-
-V_0_4_1_ATTRIBUTES_EXACT = [
- # OPENLLMETRY
- ("gen_ai.system", "ag.meta.system"),
- ("gen_ai.request.base_url", "ag.meta.request.base_url"),
- ("gen_ai.request.endpoint", "ag.meta.request.endpoint"),
- ("gen_ai.request.headers", "ag.meta.request.headers"),
- ("gen_ai.request.type", "ag.type.node"),
- ("gen_ai.request.streaming", "ag.meta.request.streaming"),
- ("gen_ai.request.model", "ag.meta.request.model"),
- ("gen_ai.request.max_tokens", "ag.meta.request.max_tokens"),
- ("gen_ai.request.temperature", "ag.meta.request.temperature"),
- ("gen_ai.request.top_p", "ag.meta.request.top_p"),
- ("gen_ai.response.model", "ag.meta.response.model"),
- ("gen_ai.usage.prompt_tokens", "ag.metrics.unit.tokens.prompt"),
- ("gen_ai.usage.completion_tokens", "ag.metrics.unit.tokens.completion"),
- ("gen_ai.usage.total_tokens", "ag.metrics.unit.tokens.total"),
- ("llm.headers", "ag.meta.request.headers"),
- ("llm.request.type", "ag.type.node"),
- ("llm.top_k", "ag.meta.request.top_k"),
- ("llm.is_streaming", "ag.meta.request.streaming"),
- ("llm.usage.total_tokens", "ag.metrics.unit.tokens.total"),
- ("gen_ai.openai.api_base", "ag.meta.request.base_url"),
- ("db.system", "ag.meta.system"),
- ("db.vector.query.top_k", "ag.meta.request.top_k"),
- ("pinecone.query.top_k", "ag.meta.request.top_k"),
- ("traceloop.span.kind", "ag.type.node"),
- ("traceloop.entity.name", "ag.node.name"),
- # OPENINFERENCE
- ("output.value", "ag.data.outputs"),
- ("input.value", "ag.data.inputs"),
- ("embedding.model_name", "ag.meta.request.model"),
- ("llm.invocation_parameters", "ag.meta.request"),
- ("llm.model_name", "ag.meta.request.model"),
- ("llm.provider", "ag.meta.provider"),
- ("llm.system", "ag.meta.system"),
-]
-V_0_4_1_ATTRIBUTES_PREFIX = [
- # OPENLLMETRY
- ("gen_ai.prompt", "ag.data.inputs.prompt"),
- ("gen_ai.completion", "ag.data.outputs.completion"),
- ("llm.request.functions", "ag.data.inputs.functions"),
- ("llm.request.tools", "ag.data.inputs.tools"),
- # OPENINFERENCE
- ("llm.token_count", "ag.metrics.unit.tokens"),
- ("llm.input_messages", "ag.data.inputs.prompt"),
- ("llm.output_messages", "ag.data.outputs.completion"),
-]
-
-V_0_4_1_ATTRIBUTES_DYNAMIC = [
- # OPENLLMETRY
- ("traceloop.entity.input", lambda x: ("ag.data.inputs", loads(x).get("inputs"))),
- ("traceloop.entity.output", lambda x: ("ag.data.outputs", loads(x).get("outputs"))),
-]
-
-
-V_0_4_1_MAPS = {
- "attributes": {
- "exact": {
- "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]},
- "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_EXACT[::-1]},
- },
- "prefix": {
- "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]},
- "to": {agenta: otel for otel, agenta in V_0_4_1_ATTRIBUTES_PREFIX[::-1]},
- },
- "dynamic": {
- "from": {otel: agenta for otel, agenta in V_0_4_1_ATTRIBUTES_DYNAMIC[::-1]}
- },
- },
-}
-V_0_4_1_KEYS = {
- "attributes": {
- "exact": {
- "from": list(V_0_4_1_MAPS["attributes"]["exact"]["from"].keys()),
- "to": list(V_0_4_1_MAPS["attributes"]["exact"]["to"].keys()),
- },
- "prefix": {
- "from": list(V_0_4_1_MAPS["attributes"]["prefix"]["from"].keys()),
- "to": list(V_0_4_1_MAPS["attributes"]["prefix"]["to"].keys()),
- },
- "dynamic": {
- "from": list(V_0_4_1_MAPS["attributes"]["dynamic"]["from"].keys()),
- },
- },
-}
-
-
-MAPS = {
- "0.4.1": V_0_4_1_MAPS, # LATEST
-}
-KEYS = {
- "0.4.1": V_0_4_1_KEYS, # LATEST
-}
-
-CODEX = {"maps": MAPS[VERSION], "keys": KEYS[VERSION]}
-
-
-### ------------------------------------------------ ###
-### apis.fastapi.observability.opentelemetry.semconv ###
-########################################################
-
-
-########################################
-### apis.fastapi.observability.utils ###
-### -------------------------------- ###
-
-from typing import Optional, Union, Tuple, Any, List, Dict
-from uuid import UUID
-from collections import OrderedDict
-from json import loads, JSONDecodeError, dumps
-from copy import copy
-
-
-def _unmarshal_attributes(
- marshalled: Dict[str, Any],
-) -> Dict[str, Any]:
- """
- Unmarshals a dictionary of marshalled attributes into a nested dictionary
-
- Example:
- marshalled = {
- "ag.type": "tree",
- "ag.node.name": "root",
- "ag.node.children.0.name": "child1",
- "ag.node.children.1.name": "child2"
- }
- unmarshalled = {
- "ag": {
- "type": "tree",
- "node": {
- "name": "root",
- "children": [
- {
- "name": "child1",
- },
- {
- "name": "child2",
- }
- ]
- }
- }
- }
- """
- unmarshalled = {}
-
- for key, value in marshalled.items():
- keys = key.split(".")
-
- level = unmarshalled
-
- for i, part in enumerate(keys[:-1]):
- if part.isdigit():
- part = int(part)
-
- if not isinstance(level, list):
- level = []
-
- while len(level) <= part:
- level.append({})
-
- level = level[part]
-
- else:
- if part not in level:
- level[part] = {} if not keys[i + 1].isdigit() else []
-
- level = level[part]
-
- last_key = keys[-1]
-
- if last_key.isdigit():
- last_key = int(last_key)
-
- if not isinstance(level, list):
- level = []
-
- while len(level) <= last_key:
- level.append(None)
-
- level[last_key] = value
-
- else:
- level[last_key] = value
-
- return unmarshalled
-
-
-def _encode_key(
- namespace,
- key: str,
-) -> str:
- return f"ag.{namespace}.{key}"
-
-
-def _decode_key(
- namespace,
- key: str,
-) -> str:
- return key.replace(f"ag.{namespace}.", "")
-
-
-def _decode_value(
- value: Any,
-) -> Any:
- if isinstance(value, (int, float, bool, bytes)):
- return value
-
- if isinstance(value, str):
- if value == "@ag.type=none:":
- return None
-
- if value.startswith("@ag.type=json:"):
- encoded = value[len("@ag.type=json:") :]
- value = loads(encoded)
- return value
-
- return value
-
- return value
-
-
-def _get_attributes(
- attributes: Attributes,
- namespace: str,
-):
- return {
- _decode_key(namespace, key): _decode_value(value)
- for key, value in attributes.items()
- if key != _decode_key(namespace, key)
- }
-
-
-def _parse_from_types(
- otel_span_dto: OTelSpanDTO,
-) -> dict:
- types = _get_attributes(otel_span_dto.attributes, "type")
-
- if types.get("tree"):
- del otel_span_dto.attributes[_encode_key("type", "tree")]
-
- if types.get("node"):
- del otel_span_dto.attributes[_encode_key("type", "node")]
-
- return types
-
-
-def _parse_from_semconv(
- attributes: Attributes,
-) -> None:
- _attributes = copy(attributes)
-
- for old_key, value in _attributes.items():
- if old_key in CODEX["keys"]["attributes"]["exact"]["from"]:
- new_key = CODEX["maps"]["attributes"]["exact"]["from"][old_key]
-
- attributes[new_key] = value
-
- del attributes[old_key]
-
- else:
- for prefix_key in CODEX["keys"]["attributes"]["prefix"]["from"]:
- if old_key.startswith(prefix_key):
- prefix = CODEX["maps"]["attributes"]["prefix"]["from"][prefix_key]
-
- new_key = old_key.replace(prefix_key, prefix)
-
- attributes[new_key] = value
-
- del attributes[old_key]
-
- for dynamic_key in CODEX["keys"]["attributes"]["dynamic"]["from"]:
- if old_key == dynamic_key:
- try:
- new_key, new_value = CODEX["maps"]["attributes"]["dynamic"][
- "from"
- ][dynamic_key](value)
-
- attributes[new_key] = new_value
-
- except: # pylint: disable=bare-except
- pass
-
-
-def _parse_from_links(
- otel_span_dto: OTelSpanDTO,
-) -> dict:
- # LINKS
- links = None
- otel_links = None
-
- if otel_span_dto.links:
- links = list()
- otel_links = list()
-
- for link in otel_span_dto.links:
- _links = _get_attributes(link.attributes, "type")
-
- if _links:
- link_type = _links.get("link")
- link_tree_id = str(UUID(link.context.trace_id[2:]))
- link_node_id = str(
- UUID(link.context.trace_id[2 + 16 :] + link.context.span_id[2:])
- )
-
- links.append(
- LinkDTO(
- type=link_type,
- tree_id=link_tree_id,
- id=link_node_id,
- )
- )
- else:
- otel_links.append(link)
-
- links = links if links else None
- otel_links = otel_links if otel_links else None
-
- otel_span_dto.links = otel_links
-
- return links
-
-
-def _parse_from_attributes(
- otel_span_dto: OTelSpanDTO,
-) -> Tuple[dict, dict, dict, dict, dict]:
- # DATA
- _data = _get_attributes(otel_span_dto.attributes, "data")
-
- for key in _data.keys():
- del otel_span_dto.attributes[_encode_key("data", key)]
-
- # _data = _unmarshal_attributes(_data)
- _data = _data if _data else None
-
- # METRICS
- _metrics = _get_attributes(otel_span_dto.attributes, "metrics")
-
- for key in _metrics.keys():
- del otel_span_dto.attributes[_encode_key("metrics", key)]
-
- # _metrics = _unmarshal_attributes(_metrics)
- _metrics = _metrics if _metrics else None
-
- # META
- _meta = _get_attributes(otel_span_dto.attributes, "meta")
-
- for key in _meta.keys():
- del otel_span_dto.attributes[_encode_key("meta", key)]
-
- # _meta = _unmarshal_attributes(_meta)
- _meta = _meta if _meta else None
-
- # TAGS
- _tags = _get_attributes(otel_span_dto.attributes, "tags")
-
- for key in _tags.keys():
- del otel_span_dto.attributes[_encode_key("tags", key)]
-
- _tags = _tags if _tags else None
-
- # REFS
- _refs = _get_attributes(otel_span_dto.attributes, "refs")
-
- for key in _refs.keys():
- del otel_span_dto.attributes[_encode_key("refs", key)]
-
- _refs = _refs if _refs else None
-
- if len(otel_span_dto.attributes.keys()) < 1:
- otel_span_dto.attributes = None
-
- return _data, _metrics, _meta, _tags, _refs
-
-
-def parse_from_otel_span_dto(
- otel_span_dto: OTelSpanDTO,
-) -> SpanDTO:
- trace_id = str(otel_span_dto.context.trace_id[2:])
- span_id = str(otel_span_dto.context.span_id[2:])
-
- lifecyle = LifecycleDTO(
- created_at=datetime.now(),
- )
-
- _parse_from_semconv(otel_span_dto.attributes)
-
- types = _parse_from_types(otel_span_dto)
-
- tree_id = UUID(trace_id)
-
- tree_type: str = types.get("tree")
-
- tree = TreeDTO(
- id=tree_id,
- type=tree_type.lower() if tree_type else None,
- )
-
- node_id = UUID(trace_id[16:] + span_id)
-
- node_type = NodeType.TASK
- try:
- node_type = NodeType(types.get("node", "").lower())
- except: # pylint: disable=bare-except
- pass
-
- node = NodeDTO(
- id=node_id,
- type=node_type,
- name=otel_span_dto.name,
- )
-
- parent = (
- ParentDTO(
- id=(
- UUID(
- otel_span_dto.parent.trace_id[2 + 16 :]
- + otel_span_dto.parent.span_id[2:]
- )
- )
- )
- if otel_span_dto.parent
- else None
- )
-
- time = TimeDTO(
- start=otel_span_dto.start_time,
- end=otel_span_dto.end_time,
- )
-
- status = StatusDTO(
- code=otel_span_dto.status_code.value.replace("STATUS_CODE_", ""),
- message=otel_span_dto.status_message,
- )
-
- links = _parse_from_links(otel_span_dto)
-
- data, metrics, meta, tags, refs = _parse_from_attributes(otel_span_dto)
-
- duration = (otel_span_dto.end_time - otel_span_dto.start_time).total_seconds()
-
- if metrics is None:
- metrics = dict()
-
- metrics["acc.duration.total"] = round(duration * 1_000, 3) # milliseconds
-
- root_id = str(tree_id)
- if refs is not None:
- root_id = refs.get("scenario.id", root_id)
-
- root = RootDTO(id=UUID(root_id))
-
- otel = OTelExtraDTO(
- kind=otel_span_dto.kind.value,
- attributes=otel_span_dto.attributes,
- events=otel_span_dto.events,
- links=otel_span_dto.links,
- )
-
- span_dto = SpanDTO(
- trace_id=trace_id,
- span_id=span_id,
- lifecycle=lifecyle,
- root=root,
- tree=tree,
- node=node,
- parent=parent,
- time=time,
- status=status,
- data=data,
- metrics=metrics,
- meta=meta,
- tags=tags,
- refs=refs,
- links=links,
- otel=otel,
- )
-
- return span_dto
-
-
-def parse_to_agenta_span_dto(
- span_dto: SpanDTO,
-) -> SpanDTO:
- # DATA
- if span_dto.data:
- span_dto.data = _unmarshal_attributes(span_dto.data)
-
- if "outputs" in span_dto.data:
- if "__default__" in span_dto.data["outputs"]:
- span_dto.data["outputs"] = span_dto.data["outputs"]["__default__"]
-
- # METRICS
- if span_dto.metrics:
- span_dto.metrics = _unmarshal_attributes(span_dto.metrics)
-
- # META
- if span_dto.meta:
- span_dto.meta = _unmarshal_attributes(span_dto.meta)
-
- # TAGS
- if span_dto.tags:
- span_dto.tags = _unmarshal_attributes(span_dto.tags)
-
- # REFS
- if span_dto.refs:
- span_dto.refs = _unmarshal_attributes(span_dto.refs)
-
- if isinstance(span_dto.links, list):
- for link in span_dto.links:
- link.tree_id = None
-
- if span_dto.nodes:
- for v in span_dto.nodes.values():
- if isinstance(v, list):
- for n in v:
- parse_to_agenta_span_dto(n)
- else:
- parse_to_agenta_span_dto(v)
-
- # MASK LINKS FOR NOW
- span_dto.links = None
- # ------------------
-
- # MASK LIFECYCLE FOR NOW
- # span_dto.lifecycle = None
- if span_dto.lifecycle:
- span_dto.lifecycle.updated_at = None
- span_dto.lifecycle.updated_by_id = None
- # ----------------------
-
- return span_dto
-
-
-### -------------------------------- ###
-### apis.fastapi.observability.utils ###
-########################################
-
-
-from litellm import cost_calculator
-from opentelemetry.sdk.trace import ReadableSpan
-
-from agenta.sdk.types import AgentaNodeDto, AgentaNodesResponse
-
-
-def parse_inline_trace(
- spans: Dict[str, ReadableSpan],
-):
- otel_span_dtos = _parse_readable_spans(spans)
-
- ############################################################
- ### apis.fastapi.observability.api.otlp_collect_traces() ###
- ### ---------------------------------------------------- ###
- span_dtos = [
- parse_from_otel_span_dto(otel_span_dto) for otel_span_dto in otel_span_dtos
- ]
- ### ---------------------------------------------------- ###
- ### apis.fastapi.observability.api.otlp_collect_traces() ###
- ############################################################
-
- #####################################################
- ### services.observability.service.ingest/query() ###
- ### --------------------------------------------- ###
- span_idx = parse_span_dtos_to_span_idx(span_dtos)
- span_id_tree = parse_span_idx_to_span_id_tree(span_idx)
- ### --------------------------------------------- ###
- ### services.observability.service.ingest/query() ###
- #####################################################
-
- ###############################################
- ### services.observability.service.ingest() ###
- ### --------------------------------------- ###
- calculate_costs(span_idx)
- cumulate_costs(span_id_tree, span_idx)
- cumulate_tokens(span_id_tree, span_idx)
- ### --------------------------------------- ###
- ### services.observability.service.ingest() ###
- ###############################################
-
- ##############################################
- ### services.observability.service.query() ###
- ### -------------------------------------- ###
- connect_children(span_id_tree, span_idx)
- root_span_dtos = [span_idx[span_id] for span_id in span_id_tree.keys()]
- agenta_span_dtos = [
- parse_to_agenta_span_dto(span_dto) for span_dto in root_span_dtos
- ]
- ### -------------------------------------- ###
- ### services.observability.service.query() ###
- ##############################################
-
- spans = [
- span_dto.model_dump(
- mode="json",
- exclude_none=True,
- exclude_defaults=True,
- )
- for span_dto in agenta_span_dtos
- ]
- inline_trace = AgentaNodesResponse(
- version="1.0.0",
- nodes=[AgentaNodeDto(**span) for span in spans],
- ).model_dump(
- mode="json",
- exclude_none=True,
- exclude_unset=True,
- )
- return inline_trace
-
-
-def _parse_readable_spans(
- spans: List[ReadableSpan],
-) -> List[OTelSpanDTO]:
- otel_span_dtos = list()
-
- for span in spans:
- otel_events = [
- OTelEventDTO(
- name=event.name,
- timestamp=_timestamp_ns_to_datetime(event.timestamp),
- attributes=event.attributes,
- )
- for event in span.events
- ]
- otel_links = [
- OTelLinkDTO(
- context=OTelContextDTO(
- trace_id=_int_to_hex(link.context.trace_id, 128),
- span_id=_int_to_hex(link.context.span_id, 64),
- ),
- attributes=link.attributes,
- )
- for link in span.links
- ]
- otel_span_dto = OTelSpanDTO(
- context=OTelContextDTO(
- trace_id=_int_to_hex(span.get_span_context().trace_id, 128),
- span_id=_int_to_hex(span.get_span_context().span_id, 64),
- ),
- name=span.name,
- kind=OTelSpanKind(
- "SPAN_KIND_"
- + (span.kind if isinstance(span.kind, str) else span.kind.name)
- ),
- start_time=_timestamp_ns_to_datetime(span.start_time),
- end_time=_timestamp_ns_to_datetime(span.end_time),
- status_code=OTelStatusCode("STATUS_CODE_" + span.status.status_code.name),
- status_message=span.status.description,
- attributes=span.attributes,
- events=otel_events if len(otel_events) > 0 else None,
- parent=(
- OTelContextDTO(
- trace_id=_int_to_hex(span.parent.trace_id, 128),
- span_id=_int_to_hex(span.parent.span_id, 64),
- )
- if span.parent and not span.parent.is_remote
- else None
- ),
- links=otel_links if len(otel_links) > 0 else None,
- )
-
- otel_span_dtos.append(otel_span_dto)
-
- return otel_span_dtos
-
-
-def _int_to_hex(integer, bits):
- _hex = hex(integer)[2:]
-
- _hex = _hex.zfill(bits // 4)
-
- _hex = "0x" + _hex
-
- return _hex
-
-
-def _timestamp_ns_to_datetime(timestamp_ns):
- _datetime = datetime.fromtimestamp(
- timestamp_ns / 1_000_000_000,
- ).isoformat(
- timespec="microseconds",
- )
-
- return _datetime
-
-
-class LlmTokens(BaseModel):
- prompt_tokens: Optional[int] = 0
- completion_tokens: Optional[int] = 0
- total_tokens: Optional[int] = 0
-
-
-TYPES_WITH_COSTS = [
- "embedding",
- "query",
- "completion",
- "chat",
- "rerank",
-]
-
-
-def calculate_costs(span_idx: Dict[str, SpanDTO]):
- for span in span_idx.values():
- if (
- span.node.type
- and span.node.type.name.lower() in TYPES_WITH_COSTS
- and span.meta
- and span.metrics
- ):
- model = span.meta.get("response.model") or span.meta.get(
- "configuration.model"
- )
- prompt_tokens = span.metrics.get("unit.tokens.prompt", 0.0)
- completion_tokens = span.metrics.get("unit.tokens.completion", 0.0)
-
- try:
- costs = cost_calculator.cost_per_token(
- model=model,
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- )
-
- if not costs:
- continue
-
- prompt_cost, completion_cost = costs
- total_cost = prompt_cost + completion_cost
-
- span.metrics["unit.costs.prompt"] = prompt_cost
- span.metrics["unit.costs.completion"] = completion_cost
- span.metrics["unit.costs.total"] = total_cost
-
- except: # pylint: disable=bare-except
- pass
diff --git a/sdk/agenta/sdk/engines/tracing/processors.py b/sdk/agenta/sdk/engines/tracing/processors.py
deleted file mode 100644
index 963738e5fe..0000000000
--- a/sdk/agenta/sdk/engines/tracing/processors.py
+++ /dev/null
@@ -1,190 +0,0 @@
-from typing import Optional, Dict, List
-from threading import Lock
-
-from opentelemetry.baggage import get_all as get_baggage
-from opentelemetry.context import Context
-from opentelemetry.sdk.trace import Span, SpanProcessor
-from opentelemetry.sdk.trace.export import (
- SpanExporter,
- ReadableSpan,
- BatchSpanProcessor,
-)
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.engines.tracing.conventions import Reference
-
-log = get_module_logger(__name__)
-
-
-class TraceProcessor(SpanProcessor):
- def __init__(
- self,
- span_exporter: SpanExporter,
- references: Dict[str, str] = None,
- inline: bool = False,
- max_queue_size: int = None,
- schedule_delay_millis: float = None,
- max_export_batch_size: int = None,
- export_timeout_millis: float = None,
- ):
- self.references = references or dict()
- self.inline = inline is True
-
- self._registry = dict()
- self._exporter = span_exporter
- self._spans: Dict[int, List[ReadableSpan]] = dict()
-
- # --- DISTRIBUTED
- if not self.inline:
- self._delegate = BatchSpanProcessor(
- span_exporter,
- max_queue_size,
- schedule_delay_millis,
- max_export_batch_size,
- export_timeout_millis,
- )
- # --- DISTRIBUTED
-
- def on_start(
- self,
- span: Span,
- parent_context: Optional[Context] = None,
- ) -> None:
- for key in self.references.keys():
- span.set_attribute(f"ag.refs.{key}", self.references[key])
-
- baggage = get_baggage(parent_context)
-
- for key in baggage.keys():
- if key.startswith("ag.refs."):
- _key = key.replace("ag.refs.", "")
- if _key in [_.value for _ in Reference.__members__.values()]:
- span.set_attribute(key, baggage[key])
-
- trace_id = span.context.trace_id
- span_id = span.context.span_id
-
- self._registry.setdefault(trace_id, {})
- self._registry[trace_id][span_id] = True
-
- def on_end(
- self,
- span: ReadableSpan,
- ):
- trace_id = span.context.trace_id
- span_id = span.context.span_id
-
- self._spans.setdefault(trace_id, []).append(span)
- self._registry.setdefault(trace_id, {})
- self._registry[trace_id].pop(span_id, None)
-
- if not self._registry[trace_id]:
- spans = self._spans.pop(trace_id, [])
- self._registry.pop(trace_id, None)
-
- # --- INLINE
- if self.inline:
- self._exporter.export(spans)
- # --- INLINE
-
- # --- DISTRIBUTED
- else:
- for span in spans:
- self._delegate.on_end(span)
-
- self._delegate.force_flush()
- # --- DISTRIBUTED
-
- def force_flush(
- self,
- timeout_millis: int = None,
- ) -> bool:
- # --- INLINE
- if self.inline:
- try:
- ret = self._exporter.force_flush(timeout_millis)
- except: # pylint: disable=bare-except
- ret = True
- # --- INLINE
-
- # --- DISTRIBUTED
- else:
- ret = self._delegate.force_flush(timeout_millis)
- # --- DISTRIBUTED
-
- if not ret:
- log.warning("Agenta - Skipping export due to timeout.")
-
- return ret
-
- def shutdown(self) -> None:
- # --- INLINE
- if self.inline:
- self._exporter.shutdown()
- # --- INLINE
-
- # --- DISTRIBUTED
- else:
- self._delegate.shutdown()
- # --- DISTRIBUTED
-
- def is_ready(
- self,
- trace_id: Optional[int] = None,
- ) -> bool:
- is_ready = True
-
- # --- INLINE
- if self.inline:
- try:
- is_ready = self._exporter.is_ready(trace_id)
- except: # pylint: disable=bare-except
- pass
- # --- INLINE
-
- return is_ready
-
- def fetch(
- self,
- trace_id: Optional[int] = None,
- ) -> Dict[str, ReadableSpan]:
- trace = None
-
- # --- INLINE
- if self.inline:
- try:
- trace = self._exporter.fetch(trace_id) # type: ignore
- except: # pylint: disable=bare-except
- pass
- # --- INLINE
-
- return trace
-
-
-# Internal storage for the last ended span context
-_last_ended_span_context = None
-_lock = Lock()
-
-
-def _set_last_ended(span_ctx) -> None:
- """Set the last ended span context"""
- with _lock:
- global _last_ended_span_context
- _last_ended_span_context = span_ctx
-
-
-def _get_last_ended():
- """Get the last ended span context"""
- with _lock:
- return _last_ended_span_context
-
-
-class EndedSpanRecorder(SpanProcessor):
- """Records the last ended span context for later reference.
-
- This allows accessing span information even after the span has been ended,
- which is useful for linking annotations to auto-instrumented spans.
- """
-
- def on_end(self, span):
- _set_last_ended(span.get_span_context())
diff --git a/sdk/agenta/sdk/engines/tracing/propagation.py b/sdk/agenta/sdk/engines/tracing/propagation.py
deleted file mode 100644
index 8327c27ef1..0000000000
--- a/sdk/agenta/sdk/engines/tracing/propagation.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from typing import Tuple, Optional, Dict, Any
-
-from opentelemetry.trace import Span, set_span_in_context, get_current_span
-from opentelemetry.baggage.propagation import W3CBaggagePropagator
-from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
-from opentelemetry.baggage import set_baggage
-from opentelemetry.context import get_current
-
-from agenta.sdk.contexts.tracing import TracingContext
-
-import agenta as ag
-
-
-def extract(
- headers: Dict[str, str],
-) -> Tuple[Optional[str], Optional[Any], Dict[str, str]]:
- # --- Extract credentials --- #
- credentials = None
-
- try:
- credentials = (
- headers.get("Authorization") # Uppercase
- or headers.get("authorization") # Lowercase
- or None
- )
-
- except: # pylint: disable=bare-except
- pass
-
- # --- Extract traceparent --- #
- traceparent = None
-
- try:
- _carrier = {
- "traceparent": headers.get("Traceparent") # Uppercase
- or headers.get("traceparent") # Lowercase
- or "",
- }
-
- _context = TraceContextTextMapPropagator().extract(_carrier)
-
- traceparent = _context
- except: # pylint: disable=bare-except
- pass
-
- # --- Extract baggage --- #
- baggage = {}
-
- try:
- _carrier = {
- "baggage": headers.get("Baggage") # Uppercase
- or headers.get("baggage") # Lowercase
- or "",
- }
-
- _context = W3CBaggagePropagator().extract(_carrier)
-
- if _context:
- for partial in _context.values():
- for key, value in partial.items():
- baggage[key] = value
-
- except: # pylint: disable=bare-except
- pass
-
- # --- #
- return credentials, traceparent, baggage
-
-
-def inject(
- headers: Optional[Dict[str, str]] = None,
-) -> Dict[str, str]:
- headers = headers or {}
-
- _context = get_current()
-
- ctx = TracingContext.get()
-
- # --- Inject traceparent --- #
- try:
- TraceContextTextMapPropagator().inject(headers, context=_context)
-
- except: # pylint: disable=bare-except
- pass
-
- # --- Inject baggage --- #
- try:
- if ctx.baggage:
- for key, value in ctx.baggage.items():
- _context = set_baggage(key, value, context=_context)
-
- W3CBaggagePropagator().inject(headers, context=_context)
-
- except: # pylint: disable=bare-except
- pass
-
- # --- Inject credentials --- #
- if ctx.credentials:
- headers["Authorization"] = ctx.credentials
-
- # --- #
- return headers
diff --git a/sdk/agenta/sdk/engines/tracing/spans.py b/sdk/agenta/sdk/engines/tracing/spans.py
deleted file mode 100644
index 8b581013a5..0000000000
--- a/sdk/agenta/sdk/engines/tracing/spans.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from typing import Optional, Union, Any, Dict
-
-from opentelemetry.trace import SpanContext
-from opentelemetry.trace.status import Status, StatusCode
-from opentelemetry.sdk.trace import Span
-
-from agenta.sdk.engines.tracing.attributes import serialize
-
-
-class CustomSpan(Span): # INHERITANCE FOR TYPING ONLY
- def __init__(
- self,
- span: Span,
- ) -> None:
- super().__init__( # INHERITANCE FOR TYPING ONLY
- name=span.name,
- context=span.context,
- parent=span.parent,
- sampler=span._sampler,
- trace_config=span._trace_config,
- resource=span.resource,
- attributes=span.attributes,
- events=span.events,
- links=span.links,
- kind=span.kind,
- span_processor=span._span_processor,
- instrumentation_info=span.instrumentation_info,
- record_exception=span._record_exception,
- set_status_on_exception=span._set_status_on_exception,
- limits=span._limits,
- instrumentation_scope=span.instrumentation_scope,
- )
-
- self._span = span
-
- ## --- PROXY METHODS --- ##
-
- def get_span_context(self):
- return self._span.get_span_context()
-
- def is_recording(self) -> bool:
- return self._span.is_recording()
-
- def update_name(
- self,
- name: str,
- ) -> None:
- self._span.update_name(name)
-
- def set_status(
- self,
- status: Union[Status, StatusCode],
- description: Optional[str] = None,
- ) -> None:
- self._span.set_status(
- status=status,
- description=description,
- )
-
- def end(self) -> None:
- self._span.end()
-
- ## --- CUSTOM METHODS W/ ATTRIBUTES SERALIZATION --- ##
-
- def set_attributes(
- self,
- attributes: Dict[str, Any],
- namespace: Optional[str] = None,
- max_depth: Optional[int] = None,
- ) -> None:
- self._span.set_attributes(
- attributes=serialize(
- namespace=namespace,
- attributes=attributes,
- max_depth=max_depth,
- )
- )
-
- def set_attribute(
- self,
- key: str,
- value: Any,
- namespace: Optional[str] = None,
- ) -> None:
- self.set_attributes(
- attributes={key: value},
- namespace=namespace,
- )
-
- def add_event(
- self,
- name: str,
- attributes: Optional[Dict[str, Any]] = None,
- timestamp: Optional[int] = None,
- namespace: Optional[str] = None,
- ) -> None:
- self._span.add_event(
- name=name,
- attributes=serialize(
- namespace=namespace,
- attributes=attributes,
- ),
- timestamp=timestamp,
- )
-
- def add_link(
- self,
- context: SpanContext,
- attributes: Optional[Dict[str, Any]] = None,
- namespace: Optional[str] = None,
- ) -> None:
- self._span.add_link(
- context=context,
- attributes=serialize(
- namespace=namespace,
- attributes=attributes,
- ),
- )
-
- def record_exception(
- self,
- exception: BaseException,
- attributes: Optional[Dict[str, Any]] = None,
- timestamp: Optional[int] = None,
- escaped: bool = False,
- namespace: Optional[str] = None,
- ) -> None:
- self._span.record_exception(
- exception=exception,
- attributes=serialize(
- namespace=namespace,
- attributes=attributes,
- ),
- timestamp=timestamp,
- escaped=escaped,
- )
diff --git a/sdk/agenta/sdk/engines/tracing/tracing.py b/sdk/agenta/sdk/engines/tracing/tracing.py
deleted file mode 100644
index 081effcd39..0000000000
--- a/sdk/agenta/sdk/engines/tracing/tracing.py
+++ /dev/null
@@ -1,324 +0,0 @@
-from typing import Optional, Any, Dict, Callable
-from enum import Enum
-
-from pydantic import BaseModel
-
-
-from opentelemetry.trace import (
- get_current_span,
- set_tracer_provider,
- get_tracer_provider,
- Status,
- StatusCode,
-)
-from opentelemetry.sdk import trace
-from opentelemetry.sdk.trace import Span, Tracer, TracerProvider
-from opentelemetry.sdk.resources import Resource
-
-
-from agenta.sdk.utils.singleton import Singleton
-from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.engines.tracing.processors import (
- TraceProcessor,
- EndedSpanRecorder,
- _get_last_ended,
-)
-from agenta.sdk.engines.tracing.exporters import InlineExporter, OTLPExporter
-from agenta.sdk.engines.tracing.spans import CustomSpan
-from agenta.sdk.engines.tracing.inline import parse_inline_trace
-from agenta.sdk.engines.tracing.conventions import Reference, is_valid_attribute_key
-from agenta.sdk.engines.tracing.propagation import extract, inject
-from agenta.sdk.utils.cache import TTLLRUCache
-
-
-log = get_module_logger(__name__)
-
-
-_original_init = trace.TracerProvider.__init__
-
-
-def patched_init(self, *args, **kwargs):
- _original_init(self, *args, **kwargs)
- self.add_span_processor(EndedSpanRecorder())
-
-
-trace.TracerProvider.__init__ = patched_init
-
-
-class Link(BaseModel):
- trace_id: str
- span_id: str
-
-
-class Tracing(metaclass=Singleton):
- Status = Status
- StatusCode = StatusCode
-
- def __init__(
- self,
- url: str,
- redact: Optional[Callable[..., Any]] = None,
- redact_on_error: Optional[bool] = True,
- ) -> None:
- # ENDPOINT (OTLP)
- self.otlp_url = url
- # HEADERS (OTLP)
- self.headers: Dict[str, str] = dict()
- # REFERENCES
- self.references: Dict[str, str] = dict()
- # CREDENTIALS
- self.credentials: TTLLRUCache = TTLLRUCache(ttl=(60 * 60)) # 1 hour x 512 keys
-
- # TRACER PROVIDER
- self.tracer_provider: Optional[TracerProvider] = None
- # TRACE PROCESSORS -- INLINE
- self.inline: Optional[TraceProcessor] = None
- # TRACER
- self.tracer: Optional[Tracer] = None
- # INLINE SPANS for INLINE TRACES (INLINE PROCESSOR)
- self.inline_spans: Dict[str, Any] = dict()
-
- # REDACT
- self.redact = redact
- self.redact_on_error = redact_on_error
-
- # PUBLIC
-
- def configure(
- self,
- api_key: Optional[str] = None,
- inline: Optional[bool] = True,
- ):
- # HEADERS (OTLP)
- if api_key:
- self.headers["Authorization"] = f"ApiKey {api_key}"
-
- # TRACER PROVIDER
- self.tracer_provider = TracerProvider(
- resource=Resource(attributes={"service.name": "agenta-sdk"})
- )
-
- # --- INLINE
- if inline:
- # TRACE PROCESSORS -- INLINE
- self.inline = TraceProcessor(
- InlineExporter(
- registry=self.inline_spans,
- ),
- references=self.references,
- inline=inline,
- )
- self.tracer_provider.add_span_processor(self.inline)
- # --- INLINE
-
- # TRACE PROCESSORS -- OTLP
- try:
- log.info("Agenta - OLTP URL: %s", self.otlp_url)
-
- _otlp = TraceProcessor(
- OTLPExporter(
- endpoint=self.otlp_url,
- headers=self.headers,
- credentials=self.credentials,
- ),
- references=self.references,
- )
-
- self.tracer_provider.add_span_processor(_otlp)
- except: # pylint: disable=bare-except
- log.warning("Agenta - OLTP unreachable, skipping exports.")
-
- # GLOBAL TRACER PROVIDER -- INSTRUMENTATION LIBRARIES
- set_tracer_provider(self.tracer_provider)
- # TRACER
- self.tracer: Tracer = self.tracer_provider.get_tracer("agenta.tracer")
-
- def get_current_span(self):
- _span = None
-
- with suppress():
- _span = get_current_span()
-
- if _span.is_recording():
- return CustomSpan(_span)
-
- return _span
-
- def store_internals(
- self,
- attributes: Dict[str, Any],
- span: Optional[Span] = None,
- ):
- with suppress():
- if span is None:
- span = self.get_current_span()
-
- span.set_attributes(
- attributes={"internals": attributes},
- namespace="data",
- )
-
- def store_refs(
- self,
- refs: Dict[str, str],
- span: Optional[Span] = None,
- ):
- with suppress():
- if span is None:
- span = self.get_current_span()
-
- for key in refs.keys():
- if key in [_.value for _ in Reference.__members__.values()]:
- # ADD REFERENCE TO THIS SPAN
- span.set_attribute(
- key.value if isinstance(key, Enum) else key,
- refs[key],
- namespace="refs",
- )
-
- # AND TO ALL SPANS CREATED AFTER THIS ONE
- self.references[key] = refs[key]
- # TODO: THIS SHOULD BE REPLACED BY A TRACE CONTEXT !!!
-
- def store_meta(
- self,
- meta: Dict[str, Any],
- span: Optional[Span] = None,
- ):
- with suppress():
- if span is None:
- span = self.get_current_span()
-
- for key in meta.keys():
- if is_valid_attribute_key(key):
- span.set_attribute(
- key,
- meta[key],
- namespace="meta",
- )
-
- def store_metrics(
- self,
- metrics: Dict[str, Any],
- span: Optional[Span] = None,
- ):
- with suppress():
- if span is None:
- span = self.get_current_span()
-
- for key in metrics.keys():
- if is_valid_attribute_key(key):
- span.set_attribute(
- key,
- metrics[key],
- namespace="metrics",
- )
-
- def is_inline_trace_ready(
- self,
- trace_id: Optional[int] = None,
- ) -> bool:
- is_ready = True
-
- with suppress():
- if self.inline and trace_id:
- is_ready = self.inline.is_ready(trace_id)
-
- return is_ready
-
- def get_inline_trace(
- self,
- trace_id: Optional[int] = None,
- ) -> Dict[str, Any]:
- _inline_trace = {}
-
- with suppress():
- if self.inline and trace_id:
- is_ready = self.inline.is_ready(trace_id)
-
- if is_ready is True:
- otel_spans = self.inline.fetch(trace_id)
-
- if otel_spans:
- _inline_trace = parse_inline_trace(otel_spans)
-
- return _inline_trace
-
- def extract(
- self,
- *args,
- **kwargs,
- ):
- return extract(*args, **kwargs)
-
- def inject(
- self,
- *args,
- **kwargs,
- ):
- return inject(*args, **kwargs)
-
- def get_current_span_context(self):
- """Get the current active span context if available.
-
- Returns:
- SpanContext or None if no active span
- """
- span = get_current_span()
- ctx = span.get_span_context()
- return ctx if ctx and ctx.is_valid else None
-
- def get_last_span_context(self):
- """Get the last closed span context if available.
-
- This is useful for accessing span information after a span has closed,
- particularly with auto-instrumentation libraries.
-
- Returns:
- SpanContext or None if no spans have been closed
- """
- return _get_last_ended()
-
- def get_span_context(self):
- """Get the most relevant span context.
-
- First tries to get the current active span context.
- If no active span exists, falls back to the last closed span.
-
- Returns:
- SpanContext or None if no relevant span context is available
- """
- return self.get_current_span_context() or self.get_last_span_context()
-
- def build_invocation_link(self, span_ctx=None) -> Optional[Link]:
- """
- Builds a Link object containing the hex-formatted trace_id and span_id
- from the current (or fallback last ended) span context.
- Useful to link annotations to spans.
-
- Args:
- span_ctx: Optional SpanContext to convert to a Link
-
- Returns:
- Link object with trace_id and span_id or None if no valid context
- """
- if span_ctx is None:
- span_ctx = self.get_span_context()
-
- if span_ctx and span_ctx.is_valid:
- return Link(
- trace_id=f"{span_ctx.trace_id:032x}",
- span_id=f"{span_ctx.span_id:016x}",
- )
-
- return None
-
-
-def get_tracer(
- tracing: Tracing,
-) -> Tracer:
- if tracing is None or tracing.tracer is None or tracing.tracer_provider is None:
- return get_tracer_provider().get_tracer("default.tracer")
-
- return tracing.tracer
diff --git a/sdk/agenta/sdk/evaluations/__init__.py b/sdk/agenta/sdk/evaluations/__init__.py
deleted file mode 100644
index 4e5223d757..0000000000
--- a/sdk/agenta/sdk/evaluations/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .preview.evaluate import aevaluate
-from .preview.utils import display_evaluation_results as display
diff --git a/sdk/agenta/sdk/evaluations/metrics.py b/sdk/agenta/sdk/evaluations/metrics.py
deleted file mode 100644
index be684c5875..0000000000
--- a/sdk/agenta/sdk/evaluations/metrics.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from typing import Optional
-from uuid import UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.models.evaluations import EvaluationMetrics
-
-# TODO: ADD TYPES
-
-
-async def arefresh(
- run_id: UUID,
- scenario_id: Optional[UUID] = None,
- # timestamp: Optional[str] = None,
- # interval: Optional[float] = None,
-) -> EvaluationMetrics:
- payload = dict(
- run_id=str(run_id),
- scenario_id=str(scenario_id) if scenario_id else None,
- )
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/evaluations/metrics/refresh",
- params=payload,
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- metrics = EvaluationMetrics(**response["metrics"][0])
-
- return metrics
diff --git a/sdk/agenta/sdk/evaluations/preview/__init__.py b/sdk/agenta/sdk/evaluations/preview/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/evaluations/preview/evaluate.py b/sdk/agenta/sdk/evaluations/preview/evaluate.py
deleted file mode 100644
index c5e460ffb6..0000000000
--- a/sdk/agenta/sdk/evaluations/preview/evaluate.py
+++ /dev/null
@@ -1,770 +0,0 @@
-from typing import Dict, List, Any, Union, Optional, Tuple
-from uuid import UUID
-from copy import deepcopy
-from datetime import datetime
-
-from pydantic import BaseModel
-
-from agenta.sdk.models.evaluations import (
- Origin,
- Target,
- Link,
- Reference,
- SimpleEvaluationData,
-)
-from agenta.sdk.models.workflows import (
- ApplicationRevision,
- EvaluatorRevision,
- WorkflowServiceRequestData,
- ApplicationServiceRequest,
- EvaluatorServiceRequest,
-)
-from agenta.sdk.models.testsets import TestsetRevision
-
-from agenta.sdk.utils.references import get_slug_from_name_and_id
-from agenta.sdk.evaluations.preview.utils import fetch_trace_data
-
-from agenta.sdk.managers.testsets import (
- acreate as acreate_testset,
- aretrieve as aretrieve_testset,
-)
-from agenta.sdk.managers.applications import (
- aupsert as aupsert_application,
- aretrieve as aretrieve_application,
-)
-from agenta.sdk.managers.evaluators import (
- aupsert as aupsert_evaluator,
- aretrieve as aretrieve_evaluator,
-)
-from agenta.sdk.evaluations.runs import (
- acreate as acreate_run,
- aclose as aclose_run,
- aurl as aget_url,
-)
-from agenta.sdk.evaluations.scenarios import (
- acreate as aadd_scenario,
-)
-from agenta.sdk.evaluations.results import (
- acreate as alog_result,
-)
-from agenta.sdk.evaluations.metrics import (
- arefresh as acompute_metrics,
-)
-
-
-from agenta.sdk.models.workflows import (
- WorkflowServiceInterface,
- WorkflowServiceConfiguration,
-)
-from agenta.sdk.decorators.running import (
- invoke_application,
- invoke_evaluator,
-)
-
-
-class EvaluateSpecs(BaseModel):
- testsets: Optional[Target] = None
- applications: Optional[Target] = None
- evaluators: Optional[Target] = None
-
- repeats: Optional[int] = None
-
-
-async def _parse_evaluate_kwargs(
- *,
- testsets: Optional[Target] = None,
- applications: Optional[Target] = None,
- evaluators: Optional[Target] = None,
- #
- repeats: Optional[int] = None,
- #
- specs: Optional[Union[EvaluateSpecs, Dict[str, Any]]] = None,
-) -> SimpleEvaluationData:
- _specs = deepcopy(specs)
- if isinstance(_specs, dict):
- _specs = EvaluateSpecs(**_specs)
- if _specs and not isinstance(_specs, EvaluateSpecs):
- _specs = None
-
- simple_evaluation_data = SimpleEvaluationData(
- testset_steps=testsets or (_specs.testsets if _specs else None),
- application_steps=applications or (_specs.applications if _specs else None),
- evaluator_steps=evaluators or (_specs.evaluators if _specs else None),
- #
- repeats=repeats or (_specs.repeats if _specs else None),
- )
-
- if not simple_evaluation_data.testset_steps:
- raise ValueError("Invalid 'evaluate()' specs: missing testsets")
- if not simple_evaluation_data.application_steps:
- raise ValueError("Invalid 'evaluate()' specs: missing applications")
- if not simple_evaluation_data.evaluator_steps:
- raise ValueError("Invalid 'evaluate()' specs: missing evaluators")
-
- return simple_evaluation_data
-
-
-async def _upsert_entities(
- simple_evaluation_data: SimpleEvaluationData,
-) -> SimpleEvaluationData:
- if simple_evaluation_data.testset_steps:
- if isinstance(simple_evaluation_data.testset_steps, list):
- testset_steps: Dict[str, Origin] = {}
-
- if all(
- isinstance(testset_revision_id, UUID)
- for testset_revision_id in simple_evaluation_data.testset_steps
- ):
- for testset_revision_id in simple_evaluation_data.testset_steps:
- if isinstance(testset_revision_id, UUID):
- testset_steps[str(testset_revision_id)] = "custom"
-
- elif all(
- isinstance(testcases_data, List)
- for testcases_data in simple_evaluation_data.testset_steps
- ):
- for testcases_data in simple_evaluation_data.testset_steps:
- if isinstance(testcases_data, List):
- if all(isinstance(step, Dict) for step in testcases_data):
- testset_revision_id = await acreate_testset(
- data=testcases_data,
- )
- testset_steps[str(testset_revision_id)] = "custom"
-
- simple_evaluation_data.testset_steps = testset_steps
-
- if not simple_evaluation_data.testset_steps or not isinstance(
- simple_evaluation_data.testset_steps, dict
- ):
- raise ValueError(
- "Invalid 'evaluate()' specs: missing or invalid testset steps",
- )
-
- if simple_evaluation_data.application_steps:
- if isinstance(simple_evaluation_data.application_steps, list):
- application_steps: Dict[str, Origin] = {}
-
- if all(
- isinstance(application_revision_id, UUID)
- for application_revision_id in simple_evaluation_data.application_steps
- ):
- for application_revision_id in simple_evaluation_data.application_steps:
- if isinstance(application_revision_id, UUID):
- application_steps[str(application_revision_id)] = "custom"
-
- elif all(
- callable(application_handler)
- for application_handler in simple_evaluation_data.application_steps
- ):
- for application_handler in simple_evaluation_data.application_steps:
- if callable(application_handler):
- application_revision_id = await aupsert_application(
- handler=application_handler,
- )
- application_steps[str(application_revision_id)] = "custom"
-
- simple_evaluation_data.application_steps = application_steps
-
- if not simple_evaluation_data.application_steps or not isinstance(
- simple_evaluation_data.application_steps, dict
- ):
- raise ValueError(
- "Invalid 'evaluate()' specs: missing or invalid application steps",
- )
-
- if simple_evaluation_data.evaluator_steps:
- if isinstance(simple_evaluation_data.evaluator_steps, list):
- evaluator_steps: Dict[str, Origin] = {}
-
- if all(
- isinstance(evaluator_revision_id, UUID)
- for evaluator_revision_id in simple_evaluation_data.evaluator_steps
- ):
- for evaluator_revision_id in simple_evaluation_data.evaluator_steps:
- if isinstance(evaluator_revision_id, UUID):
- evaluator_steps[str(evaluator_revision_id)] = "custom"
-
- elif all(
- callable(evaluator_handler)
- for evaluator_handler in simple_evaluation_data.evaluator_steps
- ):
- for evaluator_handler in simple_evaluation_data.evaluator_steps:
- if callable(evaluator_handler):
- evaluator_revision_id = await aupsert_evaluator(
- handler=evaluator_handler,
- )
- evaluator_steps[str(evaluator_revision_id)] = "custom"
-
- simple_evaluation_data.evaluator_steps = evaluator_steps
-
- if not simple_evaluation_data.evaluator_steps or not isinstance(
- simple_evaluation_data.evaluator_steps, dict
- ):
- raise ValueError(
- "Invalid 'evaluate()' specs: missing or invalid evaluator steps",
- )
-
- return simple_evaluation_data
-
-
-async def _retrieve_entities(
- simple_evaluation_data: SimpleEvaluationData,
-) -> Tuple[
- Dict[UUID, TestsetRevision],
- Dict[UUID, ApplicationRevision],
- Dict[UUID, EvaluatorRevision],
-]:
- testset_revisions: Dict[UUID, TestsetRevision] = {}
- # for testset_revision_id, origin in simple_evaluation_data.testset_steps.items():
- # testset_revision = await retrieve_testset(
- # testset_revision_id=testset_revision_id,
- # )
- for testset_id, origin in simple_evaluation_data.testset_steps.items():
- testset_revision = await aretrieve_testset(
- testset_id=testset_id,
- )
-
- if not testset_revision or not testset_revision.id:
- continue
-
- testset_revisions[testset_revision.id] = testset_revision
-
- application_revisions: Dict[UUID, ApplicationRevision] = {}
- for (
- application_revision_id,
- origin,
- ) in simple_evaluation_data.application_steps.items():
- application_revision = await aretrieve_application(
- application_revision_id=application_revision_id,
- )
-
- if not application_revision:
- continue
-
- application_revisions[application_revision_id] = application_revision
-
- evaluator_revisions: Dict[UUID, EvaluatorRevision] = {}
- for evaluator_revision_id, origin in simple_evaluation_data.evaluator_steps.items():
- evaluator_revision = await aretrieve_evaluator(
- evaluator_revision_id=evaluator_revision_id,
- )
-
- if not evaluator_revision:
- continue
-
- evaluator_revisions[evaluator_revision_id] = evaluator_revision
-
- return testset_revisions, application_revisions, evaluator_revisions
-
-
-def _timestamp_suffix():
- suffix = datetime.now().strftime("%y-%m-%d · %H:%M")
- return f" [{suffix}]"
-
-
-UNICODE = {
- "here": "• ",
- "root": "┌─ ",
- "next": "├─ ",
- "last": "└─ ",
- "pipe": "│ ",
- "skip": " ",
- "this": "── ",
-}
-
-
-# @debug
-async def aevaluate(
- *,
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- testsets: Optional[Target] = None,
- applications: Optional[Target] = None,
- evaluators: Optional[Target] = None,
- #
- repeats: Optional[int] = None,
- #
- specs: Optional[Union[EvaluateSpecs, Dict[str, Any]]] = None,
-):
- simple_evaluation_data = await _parse_evaluate_kwargs(
- testsets=testsets,
- applications=applications,
- evaluators=evaluators,
- repeats=repeats,
- specs=specs,
- )
-
- simple_evaluation_data = await _upsert_entities(
- simple_evaluation_data=simple_evaluation_data,
- )
-
- print()
- print(
- "──────────────────────────────────────"
- "──────────────────────────────────────"
- )
- print(f"Evaluation running...")
- print(
- "──────────────────────────────────────"
- "──────────────────────────────────────"
- )
-
- suffix = _timestamp_suffix()
- name = f"{name}{suffix}"
-
- run = await acreate_run(
- name=name,
- description=description,
- #
- testset_steps=simple_evaluation_data.testset_steps,
- application_steps=simple_evaluation_data.application_steps,
- evaluator_steps=simple_evaluation_data.evaluator_steps,
- #
- repeats=simple_evaluation_data.repeats,
- )
-
- print(
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f" run_id={str(run.id)}",
- )
-
- if not run.id:
- print("[failure] could not create evaluation")
- return None
-
- (
- testset_revisions,
- application_revisions,
- evaluator_revisions,
- ) = await _retrieve_entities(
- simple_evaluation_data=simple_evaluation_data,
- )
-
- scenarios = list()
-
- metrics = dict()
-
- for testset_revision in testset_revisions.values():
- if not testset_revision.data or not testset_revision.data.testcases:
- continue
-
- testcases = testset_revision.data.testcases
-
- print(
- f"{UNICODE['next']}"
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f" testset_id={str(testset_revision.testset_id)}",
- )
-
- for testcase_idx, testcase in enumerate(testcases):
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- "-----------------------"
- "--------------------------------------"
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['next'if testcase_idx < len(testcases) - 1 else 'last']}"
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"testcase_id={str(testcase.id)}",
- )
-
- scenario = await aadd_scenario(
- run_id=run.id,
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}"
- f"{UNICODE['next']}"
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f"scenario_id={str(scenario.id)}",
- )
-
- results = dict()
-
- result = await alog_result(
- run_id=run.id,
- scenario_id=scenario.id,
- step_key="testset-" + testset_revision.slug, # type: ignore
- testcase_id=testcase.id,
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}"
- f"{UNICODE['pipe']}"
- f"{UNICODE['next']}"
- f"{UNICODE['here']}"
- f" result_id={str(result.id)} (testcase)",
- )
-
- results[testset_revision.slug] = result
-
- _testcase = testcase.model_dump(
- mode="json",
- exclude_none=True,
- ) # type: ignore
- inputs = testcase.data
- if isinstance(inputs, dict):
- if "testcase_dedup_id" in inputs:
- del inputs["testcase_dedup_id"]
-
- for application_revision in application_revisions.values():
- if not application_revision or not application_revision.data:
- print("Missing or invalid application revision")
- if application_revision:
- print(application_revision.model_dump(exclude_none=True))
- continue
-
- # print(f" Application {application_revision.model_dump(exclude_none=True)}") # type: ignore
-
- references = dict(
- testset=Reference(
- id=testset_revision.testset_id,
- ),
- testset_variant=Reference(
- id=testset_revision.testset_variant_id,
- ),
- testset_revision=Reference(
- id=testset_revision.id,
- slug=testset_revision.slug,
- version=testset_revision.version,
- ),
- application=Reference(
- id=application_revision.application_id,
- ),
- application_variant=Reference(
- id=application_revision.application_variant_id,
- ),
- application_revision=Reference(
- id=application_revision.id,
- slug=application_revision.slug,
- version=application_revision.version,
- ),
- )
- links = None
-
- _revision = application_revision.model_dump(
- mode="json",
- exclude_none=True,
- )
- interface = WorkflowServiceInterface(
- **(
- application_revision.data.model_dump()
- if application_revision.data
- else {}
- )
- )
- configuration = WorkflowServiceConfiguration(
- **(
- application_revision.data.model_dump()
- if application_revision.data
- else {}
- )
- )
- parameters = application_revision.data.parameters
-
- _trace = None
- outputs = None
-
- workflow_service_request_data = WorkflowServiceRequestData(
- revision=_revision,
- parameters=parameters,
- #
- testcase=_testcase,
- inputs=inputs,
- #
- trace=_trace,
- outputs=outputs,
- )
-
- application_request = ApplicationServiceRequest(
- interface=interface,
- configuration=configuration,
- #
- data=workflow_service_request_data,
- #
- references=references, # type: ignore
- links=links, # type: ignore
- )
-
- application_response = await invoke_application(
- request=application_request,
- )
-
- if (
- not application_response
- or not application_response.data
- or not application_response.trace_id
- ):
- print("Missing or invalid application response")
- if application_response:
- print(application_response.model_dump(exclude_none=True))
- continue
-
- trace_id = application_response.trace_id
-
- if not application_revision.id or not application_revision.name:
- print("Missing application revision ID or name")
- continue
-
- application_slug = get_slug_from_name_and_id(
- name=application_revision.name,
- id=application_revision.id,
- )
-
- trace = fetch_trace_data(trace_id, max_retries=30, delay=1.0)
-
- result = await alog_result(
- run_id=run.id,
- scenario_id=scenario.id,
- step_key="application-" + application_slug, # type: ignore
- trace_id=trace_id,
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}"
- f"{UNICODE['pipe']}"
- f"{UNICODE['next']}"
- f"{UNICODE['here']}"
- f" result_id={str(result.id)} (invocation)",
- )
-
- results[application_slug] = result
-
- trace = await trace
-
- if not trace:
- print("Failed to fetch trace data for application")
- continue
-
- root_span = list(trace.get("spans", {}).values())[0]
- trace_attributes: dict = root_span.get("attributes", {})
- trace_attributes_ag: dict = trace_attributes.get("ag", {})
- trace_attributes_ag_data: dict = trace_attributes_ag.get("data", {})
- outputs = trace_attributes_ag_data.get("outputs")
- inputs = inputs or trace_attributes_ag_data.get("inputs")
-
- for i, evaluator_revision in enumerate(evaluator_revisions.values()):
- if not evaluator_revision or not evaluator_revision.data:
- print("Missing or invalid evaluator revision")
- if evaluator_revision:
- print(evaluator_revision.model_dump(exclude_none=True))
- continue
-
- references = dict(
- testset=Reference(
- id=testset_revision.testset_id,
- ),
- testset_variant=Reference(
- id=testset_revision.testset_variant_id,
- ),
- testset_revision=Reference(
- id=testset_revision.id,
- slug=testset_revision.slug,
- version=testset_revision.version,
- ),
- evaluator=Reference(
- id=evaluator_revision.evaluator_id,
- ),
- evaluator_variant=Reference(
- id=evaluator_revision.evaluator_variant_id,
- ),
- evaluator_revision=Reference(
- id=evaluator_revision.id,
- slug=evaluator_revision.slug,
- version=evaluator_revision.version,
- ),
- )
- links = (
- dict(
- invocation=Link(
- trace_id=application_response.trace_id,
- span_id=application_response.span_id,
- )
- )
- if application_response.trace_id
- and application_response.span_id
- else None
- )
-
- _revision = evaluator_revision.model_dump(
- mode="json",
- exclude_none=True,
- )
- interface = WorkflowServiceInterface(
- **(
- evaluator_revision.data.model_dump()
- if evaluator_revision.data
- else {}
- )
- )
- configuration = WorkflowServiceConfiguration(
- **(
- evaluator_revision.data.model_dump()
- if evaluator_revision.data
- else {}
- )
- )
- parameters = evaluator_revision.data.parameters
-
- workflow_service_request_data = WorkflowServiceRequestData(
- revision=_revision,
- parameters=parameters,
- #
- testcase=_testcase,
- inputs=inputs,
- #
- trace=trace,
- outputs=outputs,
- )
-
- evaluator_request = EvaluatorServiceRequest(
- version="2025.07.14",
- #
- interface=interface,
- configuration=configuration,
- #
- data=workflow_service_request_data,
- #
- references=references, # type: ignore
- links=links, # type: ignore
- )
-
- evaluator_response = await invoke_evaluator(
- request=evaluator_request,
- #
- annotate=True,
- )
-
- if (
- not evaluator_response
- or not evaluator_response.data
- or not evaluator_response.trace_id
- ):
- print("Missing or invalid evaluator response")
- if evaluator_response:
- print(evaluator_response.model_dump(exclude_none=True))
- continue
-
- trace_id = evaluator_response.trace_id
-
- trace = fetch_trace_data(trace_id, max_retries=20, delay=1.0)
-
- result = await alog_result(
- run_id=run.id,
- scenario_id=scenario.id,
- step_key="evaluator-" + evaluator_revision.slug, # type: ignore
- trace_id=trace_id,
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}"
- f"{UNICODE['pipe']}"
- f"{UNICODE['last' if (i == len(evaluator_revisions) - 1) else 'next']}"
- f"{UNICODE['here']}"
- f" result_id={str(result.id)} (annotation)",
- )
-
- results[evaluator_revision.slug] = result
-
- trace = await trace
-
- if not trace:
- print("Failed to fetch trace data for evaluator")
- continue
-
- metrics = await acompute_metrics(
- run_id=run.id,
- scenario_id=scenario.id,
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}"
- f"{UNICODE['last']}"
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f" metrics_id={str(metrics.id)}",
- )
-
- scenarios.append(
- {
- "scenario": scenario,
- "results": results,
- "metrics": metrics,
- },
- )
-
- print(
- f"{UNICODE['pipe']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- "-----------------------"
- "--------------------------------------"
- )
-
- metrics = dict()
-
- if len(scenarios) > 0:
- metrics = await acompute_metrics(
- run_id=run.id,
- )
-
- print(
- f"{UNICODE['last']}"
- f"{UNICODE['here']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f"{UNICODE['skip']}"
- f" metrics_id={str(metrics.id)}",
- )
-
- run = await aclose_run(
- run_id=run.id,
- )
-
- run_url = await aget_url(run_id=run.id)
-
- print(
- "──────────────────────────────────────"
- "──────────────────────────────────────"
- )
- print(f"Evaluation finished.")
- print(
- "--------------------------------------"
- "--------------------------------------"
- )
- print(f"Evaluation URL: {run_url or '[unavailable]'}")
- print(
- "──────────────────────────────────────"
- "──────────────────────────────────────"
- )
- print()
-
- return dict(
- run=run,
- scenarios=scenarios,
- metrics=metrics,
- )
diff --git a/sdk/agenta/sdk/evaluations/preview/utils.py b/sdk/agenta/sdk/evaluations/preview/utils.py
deleted file mode 100644
index 609ecfce47..0000000000
--- a/sdk/agenta/sdk/evaluations/preview/utils.py
+++ /dev/null
@@ -1,861 +0,0 @@
-"""
-Utilities for formatting and displaying evaluation results.
-Contains helper functions for Rich text formatting and table generation.
-"""
-
-import json
-from typing import Dict, List, Any, Optional
-import asyncio
-from uuid import UUID
-from dataclasses import dataclass, field
-
-import unicodedata
-import re
-
-
-@dataclass
-class EvaluationTestcaseData:
- """
- Data model for a single evaluation testcase.
-
- Attributes:
- case_id: Unique identifier for the testcase
- inputs: Input data for the testcase
- application_outputs: Outputs from the application under test
- evaluator_outputs: Outputs from evaluators (scores and assertions)
- """
-
- case_id: str = ""
- inputs: Dict[str, Any] = field(default_factory=dict)
- application_outputs: Dict[str, Any] = field(default_factory=dict)
- evaluator_outputs: Dict[str, Any] = field(default_factory=dict)
-
- def get_scores(self) -> Dict[str, float]:
- """Extract numeric scores from evaluator outputs."""
- scores = {}
- for key, value in self.evaluator_outputs.items():
- if isinstance(value, (int, float)) and not isinstance(value, bool):
- scores[key] = value
- return scores
-
- def get_assertions(self) -> Dict[str, Any]:
- """Extract boolean assertions from evaluator outputs."""
- assertions = {}
- for key, value in self.evaluator_outputs.items():
- if isinstance(value, bool):
- assertions[key] = value
- elif isinstance(value, list) and all(isinstance(v, bool) for v in value):
- assertions[key] = value
- return assertions
-
-
-@dataclass
-class EvaluationReport:
- """
- Data model for the complete evaluation report.
-
- Attributes:
- run_id: Unique identifier for the evaluation run
- cases: List of evaluation case data
- summary: Summary statistics for the evaluation
- """
-
- run_id: str = ""
- cases: List[EvaluationTestcaseData] = field(default_factory=list)
- summary: Dict[str, Any] = field(default_factory=dict)
-
- def get_total_cases(self) -> int:
- """Get total number of testcases."""
- return len(self.cases)
-
- def get_all_evaluator_keys(self) -> set[str]:
- """Get all unique evaluator keys across all cases."""
- all_keys = set()
- for case in self.cases:
- all_keys.update(case.evaluator_outputs.keys())
- return all_keys
-
- def calculate_averages(self) -> Dict[str, float]:
- """Calculate average scores across all cases."""
- averages = {}
- all_scores = {}
-
- # Collect all scores
- for case in self.cases:
- case_scores = case.get_scores()
- for key, value in case_scores.items():
- if key not in all_scores:
- all_scores[key] = []
- all_scores[key].append(value)
-
- # Calculate averages
- for key, values in all_scores.items():
- if values:
- averages[key] = sum(values) / len(values)
-
- return averages
-
- def calculate_assertion_percentage(self) -> float:
- """Calculate overall assertion success percentage."""
- all_assertions = []
-
- for case in self.cases:
- case_assertions = case.get_assertions()
- for value in case_assertions.values():
- if isinstance(value, bool):
- all_assertions.append(value)
- elif isinstance(value, list):
- all_assertions.extend(value)
-
- if not all_assertions:
- return 0.0
-
- return (sum(all_assertions) / len(all_assertions)) * 100
-
-
-# Rich imports for progress tracking
-try:
- from rich.progress import track
-
- RICH_AVAILABLE = True
-except ImportError:
- RICH_AVAILABLE = False
-
- # Use simple iteration when Rich is not available
- def track(iterable, description="Processing..."):
- return iterable
-
-
-# Try to import Rich for enhanced formatting, fall back to plain text if not available
-try:
- from rich.console import Console
- from rich.table import Table
- from rich.text import Text
- from rich import box
-
- _HAS_RICH = True
-except ImportError:
- _HAS_RICH = False
-
- # Fallback implementations for when Rich is not available
- class Text:
- def __init__(self, text="", style=None):
- self.text = str(text)
-
- def __str__(self):
- return self.text
-
- @staticmethod
- def from_markup(text):
- # Remove Rich markup for plain text fallback
- import re
-
- clean_text = re.sub(r'\[/?\w+(?:\s+\w+="[^"]*")*\]', "", text)
- return Text(clean_text)
-
- class Table:
- def __init__(self, *args, **kwargs):
- self.rows = []
- self.headers = []
-
- def add_column(self, header, **kwargs):
- self.headers.append(header)
-
- def add_row(self, *args):
- self.rows.append([str(arg) for arg in args])
-
- def add_section(self):
- # Add separator in fallback mode
- pass
-
- class Console:
- def __init__(self, width=None, **kwargs):
- self.width = width
-
-
-def smart_format_content(content: Any, max_length: int = 200) -> str:
- """
- Smart content formatting with size awareness and Rich markup support.
-
- Args:
- content: Content to format (dict, list, str, etc.)
- max_length: Maximum character length before truncation
-
- Returns:
- Formatted string with optional Rich markup
- """
- if content is None:
- return ""
-
- if isinstance(content, str):
- if len(content) <= max_length:
- return content
- else:
- return f"{content[:max_length-3]}..."
-
- if isinstance(content, (dict, list)):
- try:
- json_str = json.dumps(content, indent=None, separators=(",", ":"))
- if len(json_str) <= max_length:
- return json_str
- else:
- # For large objects, show structure with key-value pairs
- if isinstance(content, dict):
- items = list(content.items())[:3]
- item_preview = ", ".join(f'"{k}": "{v}"' for k, v in items)
- more_indicator = (
- f" (+{len(content) - len(items)} more)"
- if len(content) > len(items)
- else ""
- )
- full_preview = f"{{{item_preview}{more_indicator}}}"
- # Truncate the entire string to fit the column width
- if len(full_preview) <= max_length:
- return full_preview
- else:
- return f"{full_preview[:max_length-3]}..."
- else: # list
- count = len(content)
- item_preview = (
- str(content[0])[:50] + "..."
- if content and len(str(content[0])) > 50
- else str(content[0])
- if content
- else ""
- )
- return (
- f"[{item_preview}] ({count} items)"
- if count > 1
- else f"[{item_preview}]"
- )
- except (TypeError, ValueError):
- # Fallback for non-serializable objects
- str_repr = str(content)
- return (
- str_repr[: max_length - 3] + "..."
- if len(str_repr) > max_length
- else str_repr
- )
-
- # For other types
- str_repr = str(content)
- return (
- str_repr[: max_length - 3] + "..." if len(str_repr) > max_length else str_repr
- )
-
-
-def format_number(value: float, max_precision: int = 3) -> str:
- """
- Format numbers with intelligent precision and comma separators.
-
- Args:
- value: The numeric value to format
- max_precision: Maximum decimal places to show
-
- Returns:
- Formatted number string
- """
- if abs(value) >= 1000:
- # Use comma separators for large numbers
- return f"{value:,.{max_precision}f}".rstrip("0").rstrip(".")
- elif abs(value) < 0.001 and value != 0:
- # Use scientific notation for very small numbers
- return f"{value:.{max_precision}e}"
- else:
- # Standard formatting with up to max_precision decimal places
- formatted = f"{value:.{max_precision}f}".rstrip("0").rstrip(".")
- return formatted if formatted else "0"
-
-
-def format_evaluation_report_rich(
- report_data: List[Dict[str, Any]], console_width: Optional[int] = None
-) -> str:
- """Format evaluation results using Rich tables with enhanced styling."""
- if not _HAS_RICH:
- return _format_with_unicode_table(report_data, console_width)
-
- if not report_data:
- return "No evaluation data available"
-
- # Create Rich table with responsive design
- table = Table(
- title="Evaluation Results",
- box=box.ROUNDED,
- show_header=True,
- header_style="bold magenta",
- width=console_width,
- )
-
- # Add columns with responsive widths
- table.add_column("Testcases", style="cyan", width=10)
- table.add_column("Inputs", style="green", width=40, overflow="fold")
- table.add_column("Outputs", style="blue", width=40, overflow="fold")
- table.add_column("Scores", style="yellow", width=40)
- table.add_column("Assertions", style="red", width=10)
-
- # Collect totals for summary
- total_scores = {}
- total_assertions = []
-
- for case_data in report_data:
- case_id = case_data.get("case_id", "unknown")
- inputs = case_data.get("inputs", {})
- outputs = case_data.get("application_outputs", {})
-
- # Format inputs and outputs with Rich Text for better display
- inputs_text = Text.from_markup(smart_format_content(inputs, 400))
- outputs_text = Text.from_markup(smart_format_content(outputs, 500))
-
- # Format scores (numeric values). One score per line for readability.
- scores_parts = []
- for key, value in case_data.get("evaluator_outputs", {}).items():
-
- def _maybe_add(k: str, v: Any):
- if isinstance(v, bool):
- return
- num: Optional[float] = None
- if isinstance(v, (int, float)):
- num = float(v)
- elif isinstance(v, str):
- try:
- num = float(v)
- except Exception:
- num = None
- if num is not None:
- formatted_value = format_number(num)
- scores_parts.append(f"{k}: {formatted_value}")
- if k not in total_scores:
- total_scores[k] = []
- total_scores[k].append(num)
-
- if isinstance(value, list):
- for idx, v in enumerate(value):
- _maybe_add(key, v)
- else:
- _maybe_add(key, value)
- scores_text = Text("\n".join(scores_parts))
-
- # Format assertions (boolean values) - show each evaluator's result
- assertions_parts = []
- for key, value in case_data.get("evaluator_outputs", {}).items():
- if isinstance(value, bool):
- symbol = "[green]✔[/green]" if value else "[red]✗[/red]"
- assertions_parts.append(symbol)
- total_assertions.append(value)
- elif isinstance(value, list) and all(isinstance(v, bool) for v in value):
- # Handle multiple evaluators with same key name
- for v in value:
- symbol = "[green]✔[/green]" if v else "[red]✗[/red]"
- assertions_parts.append(symbol)
- total_assertions.append(v)
- # Join with spaces to show multiple assertions clearly
- assertions_text = Text.from_markup(
- " ".join(assertions_parts) if assertions_parts else ""
- )
-
- table.add_row(case_id, inputs_text, outputs_text, scores_text, assertions_text)
- # Add a separator after each data row for readability
- table.add_section()
-
- # Add a separator line before averages
- table.add_section()
-
- # Add averages row
- avg_scores_parts = []
- for key, values in total_scores.items():
- avg = sum(values) / len(values) if values else 0
- avg_scores_parts.append(f"{key}: {format_number(avg)}")
-
- assertion_pct = (
- (sum(total_assertions) / len(total_assertions) * 100) if total_assertions else 0
- )
- assertion_summary = f"{assertion_pct:.1f}%"
-
- table.add_row(
- "[bold italic]Averages[/bold italic]",
- "",
- "",
- Text("\n".join(avg_scores_parts)),
- Text(assertion_summary),
- )
-
- # Render the table
- console = Console(width=console_width)
- from io import StringIO
-
- string_buffer = StringIO()
- console.file = string_buffer
- console.print(table)
- return string_buffer.getvalue()
-
-
-def _format_with_unicode_table(
- report_data: List[Dict[str, Any]], console_width: Optional[int]
-) -> str:
- """Fallback Unicode table formatting (enhanced version)"""
- if not report_data:
- return "No evaluation data available"
-
- # Enhanced table formatting helpers
- def make_border(widths, left="┏", mid="┳", right="┓", fill="━"):
- return left + mid.join(fill * w for w in widths) + right
-
- def make_separator(widths, left="├", mid="┼", right="┤", fill="─"):
- return left + mid.join(fill * w for w in widths) + right
-
- def make_row(values, widths, left="┃", mid="┃", right="┃"):
- formatted = []
- for val, width in zip(values, widths):
- # Handle multi-line content better
- val_str = str(val)
- if "\n" in val_str:
- # Take first line for table display
- val_str = val_str.split("\n")[0]
- formatted.append(f" {val_str:<{width-2}} ")
- return left + mid.join(formatted) + right
-
- # Responsive column widths
- if console_width and console_width < 120:
- col_widths = [12, 20, 30, 20, 10] # Compact
- else:
- col_widths = [15, 30, 40, 25, 12] # Full width
-
- # Build enhanced table
- lines = []
-
- # Header with styling
- lines.append(make_border(col_widths))
- lines.append(
- make_row(
- ["Testcase ID", "Inputs", "Outputs", "Scores", "Assertions"], col_widths
- )
- )
- lines.append(make_border(col_widths, "┡", "╇", "┩", "━"))
-
- # Data rows with improved formatting
- total_scores = {}
- total_assertions = []
-
- for case_data in report_data:
- case_id = case_data.get("case_id", "unknown")
-
- # Smart content formatting
- inputs = case_data.get("inputs", {})
- outputs = case_data.get("application_outputs", {})
-
- inputs_str = smart_format_content(inputs, col_widths[1] - 4)
- outputs_str = smart_format_content(outputs, col_widths[2] - 4)
-
- # Format scores with proper number formatting, one per line
- scores_parts = []
- for key, value in case_data.get("evaluator_outputs", {}).items():
- if isinstance(value, (int, float)) and not isinstance(value, bool):
- formatted_value = format_number(value)
- scores_parts.append(f"{key}: {formatted_value}")
- if key not in total_scores:
- total_scores[key] = []
- total_scores[key].append(value)
- # Preserve line breaks for better readability in plain table
- scores_str = "\n".join(scores_parts)
-
- # Format assertions with colored symbols (fallback) - show each evaluator's result
- assertions_parts = []
- for key, value in case_data.get("evaluator_outputs", {}).items():
- if isinstance(value, bool):
- assertions_parts.append("✔" if value else "✗")
- total_assertions.append(value)
- elif isinstance(value, list) and all(isinstance(v, bool) for v in value):
- # Handle multiple evaluators with same key name
- for v in value:
- assertions_parts.append("✔" if v else "✗")
- total_assertions.append(v)
- # Join with spaces to show multiple assertions clearly
- assertions_str = " ".join(assertions_parts) if assertions_parts else ""
-
- lines.append(
- make_row(
- [case_id, inputs_str, outputs_str, scores_str, assertions_str],
- col_widths,
- )
- )
- lines.append(make_separator(col_widths))
-
- # Enhanced summary row
- avg_scores_parts = []
- for key, values in total_scores.items():
- avg = sum(values) / len(values) if values else 0
- avg_scores_parts.append(f"{key}: {format_number(avg)}")
- avg_scores_str = smart_format_content(
- ", ".join(avg_scores_parts), col_widths[3] - 4
- )
-
- assertion_pct = (
- (sum(total_assertions) / len(total_assertions) * 100) if total_assertions else 0
- )
- assertion_summary = f"{assertion_pct:.1f}%"
-
- # Add separator line before averages for clarity
- lines.append(make_border(col_widths, "┠", "╂", "┨", "━"))
- lines.append(
- make_row(["Averages", "", "", avg_scores_str, assertion_summary], col_widths)
- )
- lines.append(make_border(col_widths, "└", "┴", "┘", "─"))
-
- return "\n".join(lines)
-
-
-# Main function that chooses the best available formatting
-def format_evaluation_report(
- report_data: List[Dict[str, Any]], console_width: Optional[int] = None
-) -> str:
- """Format evaluation results with best available method"""
- return format_evaluation_report_rich(report_data, console_width)
-
-
-async def display_evaluation_results(
- eval_data, show_detailed_logs=True, console_width=None
-):
- """Enhanced display evaluation results with Rich-like formatting and progress tracking"""
- # Give traces a moment to be stored
- print()
- print("⏳ Waiting for traces to be available...")
- await asyncio.sleep(2)
-
- print()
- print("📊 Processing evaluation results...")
- print(f" run_id={eval_data['run'].id}") # type:ignore
-
- # Collect data for the report table with progress tracking
- report_data = []
- scenarios_to_process = eval_data["scenarios"]
-
- # Use Rich progress bar if available, otherwise simple iteration
- if RICH_AVAILABLE:
- scenario_iterator = track(
- scenarios_to_process, description="📋 Processing scenarios"
- )
- else:
- scenario_iterator = scenarios_to_process
- print(f"📋 Processing {len(scenarios_to_process)} scenarios...")
-
- for i, scenario in enumerate(scenario_iterator):
- if not RICH_AVAILABLE and show_detailed_logs:
- print(
- f" 📄 scenario {i+1}/{len(scenarios_to_process)}: {scenario['scenario'].id}"
- ) # type:ignore
- elif show_detailed_logs:
- print(f" scenario_id={scenario['scenario'].id}") # type:ignore
-
- case_data = EvaluationTestcaseData().__dict__
-
- for step_key, result in scenario["results"].items(): # type:ignore
- if result.testcase_id:
- if show_detailed_logs:
- print(
- f" step_key={str(step_key).ljust(32)}, testcase_id={result.testcase_id}"
- )
- # Use a more readable case ID
- testcase_short = str(result.testcase_id)[:8]
- case_data["case_id"] = f"{testcase_short}..."
-
- elif result.trace_id:
- if show_detailed_logs:
- print(
- f" step_key={str(step_key).ljust(32)}, trace_id={result.trace_id}"
- )
-
- # Fetch and process trace data using services module
- try:
- trace_data = await fetch_trace_data(result.trace_id)
- if trace_data and "spans" in trace_data:
- for span_key in trace_data["spans"].keys():
- step_data = extract_trace_step_data(trace_data, span_key)
- if step_data:
- inputs = step_data["inputs"]
- outputs = step_data["outputs"]
- trace_type = step_data["trace_type"]
- trace_evaluator_name = step_data.get("evaluator_name")
-
- # Store inputs for report
- if inputs:
- case_data["inputs"] = clean_inputs_for_display(
- **(inputs if isinstance(inputs, dict) else {})
- )
- if show_detailed_logs:
- print(
- f" inputs={inputs}"
- )
-
- # Determine if this is application or evaluator
- if outputs:
- # Heuristic to classify outputs:
- # 1. If outputs is a single string value, it's likely the application output
- # 2. If outputs is a dict with keys like 'score', 'myscore', 'success', it's evaluator output
- # 3. If we already have application_outputs, everything else is evaluator output
-
- is_application_output = False
- if not case_data.get("application_outputs"):
- # Check if this looks like a simple application output (single string)
- if isinstance(outputs, str):
- is_application_output = True
- elif (
- isinstance(outputs, dict)
- and len(outputs) == 0
- ):
- # Empty dict, skip
- is_application_output = False
- elif isinstance(outputs, dict):
- # If it's a dict with typical evaluator keys, it's an evaluator
- evaluator_keys = {
- "score",
- "myscore",
- "success",
- "failure",
- "passed",
- "failed",
- }
- if any(
- key in evaluator_keys
- for key in outputs.keys()
- ):
- is_application_output = False
- else:
- # Otherwise, it might be application output
- is_application_output = True
-
- if is_application_output:
- case_data["application_outputs"] = outputs
- else:
- # This is an evaluator output
- # Use the evaluator name from trace data, or fall back to step_key hash
- evaluator_name = trace_evaluator_name or (
- step_key[:8] if step_key else None
- )
- process_evaluator_outputs(
- case_data,
- outputs,
- evaluator_name=evaluator_name,
- )
-
- if show_detailed_logs:
- print(
- f" outputs={outputs}"
- )
- else:
- if show_detailed_logs:
- print(
- f" ⚠️ no_trace_data"
- )
- except Exception as e:
- if show_detailed_logs:
- print(
- f" ❌ trace_fetch_error: {e}"
- )
- else:
- if show_detailed_logs:
- print(
- f" step_key={str(step_key).ljust(32)}, ❌ error={result.error}"
- )
-
- if case_data["case_id"]:
- report_data.append(case_data)
-
- # if show_detailed_logs:
- # print(
- # f"📈 metrics={json.dumps(eval_data['metrics'].data, indent=4)}"
- # ) # type:ignore
-
- # Display the enhanced formatted report table
- print()
- print("📋 Evaluation Report:")
- print(format_evaluation_report(report_data, console_width))
-
- # Add summary statistics
- if report_data:
- print()
- print(f"✅ Successfully processed {len(report_data)} testcases")
-
- # Count total evaluators
- all_evaluator_keys = set()
- for case in report_data:
- all_evaluator_keys.update(case.get("evaluator_outputs", {}).keys())
-
- if all_evaluator_keys:
- print(
- f"🔍 Evaluated with {len(all_evaluator_keys)} metrics: {', '.join(sorted(all_evaluator_keys))}"
- )
- else:
- print("⚠️ No evaluation data found")
-
-
-from typing import Callable, Dict, Optional, Any
-
-from agenta.sdk.utils.client import authed_api
-import asyncio
-import json
-from typing import Dict, Any, Optional
-
-
-async def fetch_trace_data(
- trace_id: str, max_retries: int = 3, delay: float = 1.0
-) -> Optional[Dict[str, Any]]:
- """
- Fetch trace data from the API with retry logic.
-
- Args:
- trace_id: The trace ID to fetch
- max_retries: Maximum number of retry attempts
- delay: Delay between retries in seconds
-
- Returns:
- Trace data dictionary or None if not found
- """
- for attempt in range(max_retries):
- try:
- response = authed_api()(
- method="GET", endpoint=f"/preview/tracing/traces/{trace_id}"
- )
- response.raise_for_status()
- trace_data = response.json()
-
- # print(trace_data)
-
- # Get the traces dictionary
- traces = trace_data.get("traces", {})
- if traces:
- # Get the first (and usually only) trace
- for trace_key, trace_content in traces.items():
- if (
- trace_content
- and "spans" in trace_content
- and trace_content["spans"]
- ):
- return trace_content
-
- # If no data yet, retry on next iteration
- if attempt < max_retries - 1:
- await asyncio.sleep(delay)
-
- except Exception as e:
- if attempt < max_retries - 1:
- await asyncio.sleep(delay)
- continue
- else:
- print(f"Error fetching trace data: {e}")
- return None
-
- print("Failed to fetch trace data after retries")
- return None
-
-
-def extract_trace_step_data(
- trace_data: Dict[str, Any], step_key: str
-) -> Optional[Dict[str, Any]]:
- """
- Extract step data from trace information.
-
- Args:
- trace_data: The complete trace data
- step_key: The step key to extract data for
-
- Returns:
- Step data dictionary or None if not found
- """
- if not trace_data:
- return None
-
- spans = trace_data.get("spans", {})
- if not spans or step_key not in spans:
- return None
-
- span_info = spans[step_key]
- # Extract the actual evaluation data using the correct data structure
- ag_data = span_info.get("attributes", {}).get("ag", {}).get("data", {})
-
- if not ag_data:
- return None
-
- # Try to extract evaluator/application name from span
- # The span_name field contains the workflow/evaluator name
- evaluator_name = span_info.get("span_name") or span_info.get("name")
-
- return {
- "inputs": ag_data.get("inputs", {}),
- "outputs": ag_data.get("outputs", {}),
- "trace_type": span_info.get("trace_type"),
- "evaluator_name": evaluator_name,
- "span_info": span_info,
- }
-
-
-def process_evaluator_outputs(
- case_data: Dict[str, Any],
- outputs: Dict[str, Any],
- evaluator_name: Optional[str] = None,
-) -> None:
- """
- Process evaluator outputs and handle multiple evaluators with same key names.
-
- Args:
- case_data: The case data to update
- outputs: The evaluator outputs to process
- evaluator_name: Optional evaluator identifier for labeling
- """
- # Handle multiple evaluators with same key names (like 'success', 'score')
- for key, value in outputs.items():
- # Label numeric scores by evaluator to distinguish between multiple evaluators
- display_key = key
-
- # If we have an evaluator name and this is a numeric value, prefix it
- if (
- evaluator_name
- and isinstance(value, (int, float))
- and not isinstance(value, bool)
- ):
- display_key = f"{evaluator_name}.{key}"
-
- # Store the value - if the key already exists, convert to list to preserve all values
- if display_key in case_data["evaluator_outputs"]:
- # Create lists for duplicate keys to preserve all values
- existing = case_data["evaluator_outputs"][display_key]
- if not isinstance(existing, list):
- case_data["evaluator_outputs"][display_key] = [existing]
- case_data["evaluator_outputs"][display_key].append(value)
- else:
- case_data["evaluator_outputs"][display_key] = value
-
-
-def clean_inputs_for_display(**kwargs) -> Dict[str, Any]:
- """
- Clean inputs by removing internal IDs and trace data for cleaner display.
-
- Args:
- inputs: Raw inputs dictionary
-
- Returns:
- Cleaned inputs dictionary with only user-facing testcase fields
- """
- inputs = kwargs.get("inputs")
- if inputs:
- # List of keys to exclude from display
- # - Internal IDs (ending with _id)
- # - Testcase internal fields (starting with testcase_)
- # - Trace data (the 'trace' key which contains the full trace structure)
- excluded_keys = {
- "revision",
- "parameters",
- "testcase",
- # "inputs",
- "trace",
- "outputs",
- }
-
- clean_inputs = {
- k: v
- for k, v in inputs.items()
- if not k.endswith("_id")
- and not k.startswith("testcase_")
- and k not in excluded_keys
- }
- return clean_inputs or inputs
- return inputs
diff --git a/sdk/agenta/sdk/evaluations/results.py b/sdk/agenta/sdk/evaluations/results.py
deleted file mode 100644
index 56ab1b99cb..0000000000
--- a/sdk/agenta/sdk/evaluations/results.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from typing import Optional, Dict, Any
-from uuid import UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.models.evaluations import EvaluationResult
-
-# TODO: ADD TYPES
-
-
-async def acreate(
- *,
- run_id: UUID,
- scenario_id: UUID,
- step_key: str,
- # repeat_idx: str,
- # timestamp: datetime,
- # interval: float,
- #
- testcase_id: Optional[UUID] = None,
- trace_id: Optional[str] = None,
- error: Optional[dict] = None,
- #
- flags: Optional[Dict[str, Any]] = None,
- tags: Optional[Dict[str, Any]] = None,
- meta: Optional[Dict[str, Any]] = None,
-) -> EvaluationResult:
- payload = dict(
- results=[
- dict(
- flags=flags,
- tags=tags,
- meta=meta,
- #
- testcase_id=str(testcase_id) if testcase_id else None,
- trace_id=trace_id,
- error=error,
- #
- # interval=interval,
- # timestamp=timestamp,
- # repeat_idx=repeat_idx,
- step_key=step_key,
- run_id=str(run_id),
- scenario_id=str(scenario_id),
- #
- status="success",
- )
- ]
- )
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/evaluations/results/",
- json=payload,
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- result = EvaluationResult(**response["results"][0])
-
- return result
diff --git a/sdk/agenta/sdk/evaluations/runs.py b/sdk/agenta/sdk/evaluations/runs.py
deleted file mode 100644
index c4a12a40f1..0000000000
--- a/sdk/agenta/sdk/evaluations/runs.py
+++ /dev/null
@@ -1,153 +0,0 @@
-from typing import Optional, Dict, Any
-from uuid import UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.models.evaluations import EvaluationRun, Target
-
-import agenta as ag
-
-# TODO: ADD TYPES
-
-
-async def afetch(
- *,
- run_id: UUID,
-) -> Optional[EvaluationRun]:
- response = authed_api()(
- method="GET",
- endpoint=f"/preview/evaluations/runs/{run_id}",
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- if (not "count" in response) or (response["count"] == 0) or (not "run" in response):
- return None
-
- run = EvaluationRun(**response["run"])
-
- return run
-
-
-async def acreate(
- *,
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- flags: Optional[Dict[str, Any]] = None,
- tags: Optional[Dict[str, Any]] = None,
- meta: Optional[Dict[str, Any]] = None,
- #
- query_steps: Optional[Target] = None,
- testset_steps: Optional[Target] = None,
- application_steps: Optional[Target] = None,
- evaluator_steps: Optional[Target] = None,
- #
- repeats: Optional[int] = None,
-) -> Optional[EvaluationRun]:
- payload = dict(
- evaluation=dict(
- name=name,
- description=description,
- #
- flags=flags,
- tags=tags,
- meta=meta,
- #
- data=dict(
- status="running",
- query_steps=query_steps,
- testset_steps=testset_steps,
- application_steps=application_steps,
- evaluator_steps=evaluator_steps,
- repeats=repeats,
- ),
- #
- jit={"testsets": True, "evaluators": False},
- )
- )
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/simple/evaluations/",
- json=payload,
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- if (not "evaluation" in response) or (not "id" in response["evaluation"]):
- return None
-
- run_id = UUID(response["evaluation"]["id"])
-
- return await afetch(run_id=run_id)
-
-
-async def aclose(
- *,
- run_id: UUID,
- #
- status: Optional[str] = "success",
-) -> Optional[EvaluationRun]:
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/evaluations/runs/{run_id}/close/{status}",
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- if (not "run" in response) or (not "id" in response["run"]):
- return None
-
- run_id = UUID(response["run"]["id"])
-
- return await afetch(run_id=run_id)
-
-
-async def aurl(
- *,
- run_id: UUID,
-) -> str:
- response = authed_api()(
- method="GET",
- endpoint=f"/projects",
- params={"scope": "project"},
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- if len(response.json()) != 1:
- return None
-
- project_info = response.json()[0]
-
- workspace_id = project_info.get("workspace_id")
- project_id = project_info.get("project_id")
-
- return (
- f"{ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host}"
- f"/w/{workspace_id}"
- f"/p/{project_id}"
- f"/evaluations/results/{run_id}"
- )
diff --git a/sdk/agenta/sdk/evaluations/scenarios.py b/sdk/agenta/sdk/evaluations/scenarios.py
deleted file mode 100644
index 98c9c47e1f..0000000000
--- a/sdk/agenta/sdk/evaluations/scenarios.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from typing import Optional, Dict, Any
-from uuid import UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.models.evaluations import EvaluationScenario
-
-# TODO: ADD TYPES
-
-
-async def acreate(
- *,
- run_id: UUID,
- #
- flags: Optional[Dict[str, Any]] = None,
- tags: Optional[Dict[str, Any]] = None,
- meta: Optional[Dict[str, Any]] = None,
-) -> EvaluationScenario:
- payload = dict(
- scenarios=[
- dict(
- flags=flags,
- tags=tags,
- meta=meta,
- #
- run_id=str(run_id),
- #
- status="success",
- )
- ]
- )
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/evaluations/scenarios/",
- json=payload,
- )
-
- try:
- response.raise_for_status()
- except:
- print(response.text)
- raise
-
- response = response.json()
-
- scenario = EvaluationScenario(**response["scenarios"][0])
-
- return scenario
diff --git a/sdk/agenta/sdk/litellm/mockllm.py b/sdk/agenta/sdk/litellm/mockllm.py
index 01b9fecff2..e0acf13307 100644
--- a/sdk/agenta/sdk/litellm/mockllm.py
+++ b/sdk/agenta/sdk/litellm/mockllm.py
@@ -2,12 +2,10 @@
from os import environ
from contextlib import contextmanager
-import litellm
-
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.litellm.mocks import MOCKS
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
AGENTA_LITELLM_MOCK = environ.get("AGENTA_LITELLM_MOCK") or None
@@ -71,11 +69,14 @@ async def acompletion(self, *args: Any, **kwargs: Any) -> Any:
...
+litellm: Optional[LitellmProtocol] = None # pylint: disable=invalid-name
+
+
async def acompletion(*args, **kwargs):
- mock = AGENTA_LITELLM_MOCK or RoutingContext.get().mock
+ mock = AGENTA_LITELLM_MOCK or serving_context.get().mock
if mock:
- # log.debug("Mocking litellm: %s.", mock)
+ log.debug("Mocking litellm: %s.", mock)
if mock not in MOCKS:
mock = "hello"
diff --git a/sdk/agenta/sdk/litellm/mocks/__init__.py b/sdk/agenta/sdk/litellm/mocks/__init__.py
index 59f3947c8b..9d60694428 100644
--- a/sdk/agenta/sdk/litellm/mocks/__init__.py
+++ b/sdk/agenta/sdk/litellm/mocks/__init__.py
@@ -3,7 +3,7 @@
from pydantic import BaseModel
-from agenta.sdk.decorators.tracing import instrument
+import agenta as ag
class MockMessageModel(BaseModel):
@@ -18,7 +18,7 @@ class MockResponseModel(BaseModel):
choices: list[MockChoiceModel]
-@instrument()
+@ag.instrument()
def hello_mock_response(*args, **kwargs) -> MockResponseModel:
return MockResponseModel(
choices=[
@@ -31,7 +31,7 @@ def hello_mock_response(*args, **kwargs) -> MockResponseModel:
)
-@instrument()
+@ag.instrument()
def chat_mock_response(*args, **kwargs) -> MockResponseModel:
return MockResponseModel(
choices=[
@@ -45,7 +45,7 @@ def chat_mock_response(*args, **kwargs) -> MockResponseModel:
)
-@instrument()
+@ag.instrument()
def delay_mock_response(*args, **kwargs) -> MockResponseModel:
sleep(2)
@@ -60,7 +60,7 @@ def delay_mock_response(*args, **kwargs) -> MockResponseModel:
)
-@instrument()
+@ag.instrument()
def capital_mock_response(*args, **kwargs) -> MockResponseModel:
country = kwargs.get("messages", [{}, {}])[1].get(
"content", "What is the capital of _____?"
diff --git a/sdk/agenta/sdk/managers/applications.py b/sdk/agenta/sdk/managers/applications.py
deleted file mode 100644
index a5600bc3d8..0000000000
--- a/sdk/agenta/sdk/managers/applications.py
+++ /dev/null
@@ -1,304 +0,0 @@
-from typing import Dict, Any, Callable, Optional
-from uuid import uuid4, UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.decorators.running import auto_workflow, is_workflow
-from agenta.sdk.models.workflows import (
- ApplicationRevision,
- #
- ApplicationRevisionResponse,
- #
- LegacyApplicationFlags,
- LegacyApplicationData,
- LegacyApplicationCreate,
- LegacyApplicationEdit,
- #
- LegacyApplicationResponse,
- #
- Reference,
-)
-
-from agenta.sdk.utils.references import get_slug_from_name_and_id
-
-
-async def _retrieve_application(
- application_id: Optional[UUID] = None,
- application_slug: Optional[str] = None,
- application_revision_id: Optional[UUID] = None,
- application_revision_slug: Optional[str] = None,
-) -> Optional[ApplicationRevision]:
- payload = {
- "application_ref": (
- {
- "id": str(application_id) if application_id else None,
- "slug": str(application_slug),
- }
- if application_id or application_slug
- else None
- ),
- "application_revision_ref": (
- {
- "id": (
- str(application_revision_id) if application_revision_id else None
- ),
- "slug": application_revision_slug,
- }
- if application_revision_id or application_revision_slug
- else None
- ),
- }
-
- # print(" --- payload:", payload)
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/legacy/applications/revisions/retrieve",
- json=payload,
- )
- response.raise_for_status()
-
- application_revision_response = ApplicationRevisionResponse(**response.json())
-
- application_revision = application_revision_response.application_revision
-
- # print(" --- application_revision:", application_revision)
-
- return application_revision
-
-
-async def aretrieve(
- application_revision_id: Optional[UUID] = None,
-) -> Optional[ApplicationRevision]:
- # print("\n--------- RETRIEVE APPLICATION")
-
- response = await _retrieve_application(
- application_revision_id=application_revision_id,
- )
-
- return response
-
-
-async def aupsert(
- *,
- application_id: Optional[UUID] = None,
- application_slug: Optional[str] = None,
- application_revision_id: Optional[UUID] = None,
- application_revision_slug: Optional[str] = None,
- #
- handler: Callable,
- script: Optional[str] = None,
- parameters: Optional[Dict[str, Any]] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
-) -> Optional[UUID]:
- # print("\n--------- UPSERT APPLICATION")
- try:
- if not is_workflow(handler):
- application_workflow = auto_workflow(
- handler,
- #
- script=script,
- parameters=parameters,
- #
- name=name,
- description=description,
- )
- else:
- application_workflow = handler
-
- req = await application_workflow.inspect()
-
- legacy_application_flags = LegacyApplicationFlags(**req.flags)
-
- legacy_application_data = LegacyApplicationData(
- **(
- req.interface.model_dump(mode="json", exclude_none=True)
- if req and req.interface
- else {}
- ),
- **(
- req.configuration.model_dump(mode="json", exclude_none=True)
- if req and req.configuration
- else {}
- ),
- )
-
- # print(
- # " ---:", legacy_application_data.model_dump(mode="json", exclude_none=True)
- # )
-
- retrieve_response = None
-
- if req.references is not None:
- _application_revision_ref = req.references.get("application_revision", {})
- if isinstance(_application_revision_ref, Reference):
- _application_revision_ref = _application_revision_ref.model_dump(
- mode="json",
- exclude_none=True,
- )
- if not isinstance(_application_revision_ref, dict):
- _application_revision_ref = {}
- _application_revision_id = _application_revision_ref.get("id")
- _application_revision_slug = _application_revision_ref.get("slug")
-
- application_revision_id = (
- application_revision_id or _application_revision_id
- )
- application_revision_slug = (
- application_revision_slug or _application_revision_slug
- )
-
- _application_ref = req.references.get("application", {})
- if isinstance(_application_ref, Reference):
- _application_ref = _application_ref.model_dump(
- mode="json",
- exclude_none=True,
- )
- if not isinstance(_application_ref, dict):
- _application_ref = {}
- _application_id = _application_ref.get("id")
- _application_slug = _application_ref.get("slug")
-
- application_id = application_id or _application_id
- application_slug = application_slug or _application_slug
-
- revision = req.data.revision if req and req.data else None
- if revision:
- name = name or revision.get("name")
- description = description or revision.get("description")
-
- name = (
- name or req.data.revision.get("name")
- if req and req.data and req.data.revision
- else None
- )
-
- description = (
- description or req.data.revision.get("description")
- if req and req.data and req.data.revision
- else None
- )
-
- application_slug = (
- application_slug
- or get_slug_from_name_and_id(
- name=name,
- id=application_id or uuid4(),
- )
- if name
- else uuid4().hex[-12:]
- )
-
- # print(
- # application_id,
- # application_slug,
- # application_revision_id,
- # application_revision_slug,
- # )
-
- if application_revision_id or application_revision_slug:
- retrieve_response = await _retrieve_application(
- application_revision_id=application_revision_id,
- application_revision_slug=application_revision_slug,
- )
- elif application_id or application_slug:
- retrieve_response = await _retrieve_application(
- application_id=application_id,
- application_slug=application_slug,
- )
-
- except Exception as e:
- print("[ERROR]: Failed to prepare application:", e)
- return None
-
- # print("Retrieve response:", retrieve_response)
-
- if retrieve_response and retrieve_response.id and retrieve_response.application_id:
- application_id = retrieve_response.application_id
- # print(" --- Updating application...", application_id)
- application_edit_request = LegacyApplicationEdit(
- id=application_id,
- #
- name=name,
- description=description,
- #
- flags=legacy_application_flags,
- #
- data=legacy_application_data,
- )
-
- # print(" --- application_edit_request:", application_edit_request)
-
- response = authed_api()(
- method="PUT",
- endpoint=f"/preview/legacy/applications/{application_id}",
- json={
- "application": application_edit_request.model_dump(
- mode="json",
- exclude_none=True,
- )
- },
- )
-
- # print(" --- response:", response.status_code, response.text)
-
- try:
- response.raise_for_status()
- except Exception as e:
- print("[ERROR]: Failed to update application:", e)
- return None
-
- else:
- # print(" --- Creating application...")
- application_create_request = LegacyApplicationCreate(
- slug=application_slug or uuid4().hex[-12:],
- #
- name=name,
- description=description,
- #
- flags=legacy_application_flags,
- #
- data=legacy_application_data,
- )
-
- # print(" --- application_create_request:", application_create_request)
-
- response = authed_api()(
- method="POST",
- endpoint="/preview/legacy/applications/",
- json={
- "application": application_create_request.model_dump(
- mode="json",
- exclude_none=True,
- )
- },
- )
-
- # print(" --- response:", response.status_code, response.text)
-
- try:
- response.raise_for_status()
- except Exception as e:
- print("[ERROR]: Failed to create application:", e)
- return None
-
- application_response = LegacyApplicationResponse(**response.json())
-
- application = application_response.application
-
- if not application or not application.id:
- return None
-
- # print(" --- application:", application)
-
- application_revision = await _retrieve_application(
- application_id=application.id,
- )
-
- if not application_revision or not application_revision.id:
- return None
-
- # print(application_revision, "----------")
-
- return application_revision.id
diff --git a/sdk/agenta/sdk/managers/config.py b/sdk/agenta/sdk/managers/config.py
index 7e2513a74b..9b6420a41c 100644
--- a/sdk/agenta/sdk/managers/config.py
+++ b/sdk/agenta/sdk/managers/config.py
@@ -7,7 +7,7 @@
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.managers.shared import SharedManager
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
T = TypeVar("T", bound=BaseModel)
@@ -45,7 +45,7 @@ def get_from_route(
Only one of these should be provided.
"""
- context = RoutingContext.get()
+ context = serving_context.get()
parameters = context.parameters
diff --git a/sdk/agenta/sdk/managers/evaluations.py b/sdk/agenta/sdk/managers/evaluations.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/managers/evaluators.py b/sdk/agenta/sdk/managers/evaluators.py
deleted file mode 100644
index c948e23da4..0000000000
--- a/sdk/agenta/sdk/managers/evaluators.py
+++ /dev/null
@@ -1,303 +0,0 @@
-from typing import Dict, Any, Callable, Optional
-from uuid import uuid4, UUID
-from traceback import print_exc
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.decorators.running import auto_workflow, is_workflow
-from agenta.sdk.models.workflows import (
- EvaluatorRevision,
- #
- EvaluatorRevisionResponse,
- #
- SimpleEvaluatorFlags,
- SimpleEvaluatorData,
- SimpleEvaluatorCreate,
- SimpleEvaluatorEdit,
- #
- SimpleEvaluatorResponse,
- #
- Reference,
-)
-
-from agenta.sdk.utils.references import get_slug_from_name_and_id
-
-
-async def _retrieve_evaluator(
- evaluator_id: Optional[UUID] = None,
- evaluator_slug: Optional[str] = None,
- evaluator_revision_id: Optional[UUID] = None,
- evaluator_revision_slug: Optional[str] = None,
-) -> Optional[EvaluatorRevision]:
- payload = {
- "evaluator_ref": (
- {
- "id": str(evaluator_id) if evaluator_id else None,
- "slug": str(evaluator_slug),
- }
- if evaluator_id or evaluator_slug
- else None
- ),
- "evaluator_revision_ref": (
- {
- "id": str(evaluator_revision_id) if evaluator_revision_id else None,
- "slug": evaluator_revision_slug,
- }
- if evaluator_revision_id or evaluator_revision_slug
- else None
- ),
- }
-
- # print(" --- payload:", payload)
-
- response = authed_api()(
- method="POST",
- endpoint=f"/preview/evaluators/revisions/retrieve",
- json=payload,
- )
-
- response.raise_for_status()
-
- evaluator_revision_response = EvaluatorRevisionResponse(**response.json())
-
- evaluator_revision = evaluator_revision_response.evaluator_revision
-
- # print(" --- evaluator_revision:", evaluator_revision)
-
- return evaluator_revision
-
-
-async def aretrieve(
- evaluator_revision_id: Optional[UUID] = None,
-) -> Optional[EvaluatorRevision]:
- # print("\n--------- RETRIEVE EVALUATOR")
- response = await _retrieve_evaluator(
- evaluator_revision_id=evaluator_revision_id,
- )
-
- return response
-
-
-async def aupsert(
- *,
- evaluator_id: Optional[UUID] = None,
- evaluator_slug: Optional[str] = None,
- evaluator_revision_id: Optional[UUID] = None,
- evaluator_revision_slug: Optional[str] = None,
- #
- handler: Callable,
- script: Optional[str] = None,
- parameters: Optional[Dict[str, Any]] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
-) -> Optional[UUID]:
- # print("\n--------- UPSERT EVALUATOR")
- try:
- if not is_workflow(handler):
- evaluator_workflow = auto_workflow(
- handler,
- #
- script=script,
- parameters=parameters,
- #
- name=name,
- description=description,
- )
- else:
- evaluator_workflow = handler
-
- req = await evaluator_workflow.inspect()
-
- legacy_application_flags = SimpleEvaluatorFlags(**req.flags)
-
- simple_evaluator_data = SimpleEvaluatorData(
- **(
- req.interface.model_dump(mode="json", exclude_none=True)
- if req and req.interface
- else {}
- ),
- **(
- req.configuration.model_dump(mode="json", exclude_none=True)
- if req and req.configuration
- else {}
- ),
- )
- # print(" ---:", simple_evaluator_data.model_dump(mode="json", exclude_none=True))
-
- retrieve_response = None
-
- if req.references is not None:
- _evaluator_revision_ref = req.references.get("evaluator_revision", {})
- if isinstance(_evaluator_revision_ref, Reference):
- _evaluator_revision_ref = _evaluator_revision_ref.model_dump(
- mode="json",
- exclude_none=True,
- )
- if not isinstance(_evaluator_revision_ref, dict):
- _evaluator_revision_ref = {}
-
- _evaluator_revision_id = _evaluator_revision_ref.get("id")
- _evaluator_revision_slug = _evaluator_revision_ref.get("slug")
-
- evaluator_revision_id = evaluator_revision_id or _evaluator_revision_id
- evaluator_revision_slug = (
- evaluator_revision_slug or _evaluator_revision_slug
- )
-
- _evaluator_ref = req.references.get("evaluator", {})
- if isinstance(_evaluator_ref, Reference):
- _evaluator_ref = _evaluator_ref.model_dump(
- mode="json",
- exclude_none=True,
- )
- if not isinstance(_evaluator_ref, dict):
- _evaluator_ref = {}
-
- _evaluator_id = _evaluator_ref.get("id")
- _evaluator_slug = _evaluator_ref.get("slug")
-
- evaluator_id = evaluator_id or _evaluator_id
- evaluator_slug = evaluator_slug or _evaluator_slug
-
- revision = req.data.revision if req and req.data else None
- if revision:
- name = name or revision.get("name")
- description = description or revision.get("description")
-
- name = (
- name or req.data.revision.get("name")
- if req and req.data and req.data.revision
- else None
- )
-
- description = (
- description or req.data.revision.get("description")
- if req and req.data and req.data.revision
- else None
- )
-
- evaluator_slug = (
- evaluator_slug
- or get_slug_from_name_and_id(
- name=name,
- id=evaluator_id or uuid4(),
- )
- if name
- else uuid4().hex[-12:]
- )
-
- # print(
- # evaluator_id,
- # evaluator_slug,
- # evaluator_revision_id,
- # evaluator_revision_slug,
- # )
-
- if evaluator_revision_id or evaluator_revision_slug:
- retrieve_response = await _retrieve_evaluator(
- evaluator_revision_id=evaluator_revision_id,
- evaluator_revision_slug=evaluator_revision_slug,
- )
- elif evaluator_id or evaluator_slug:
- retrieve_response = await _retrieve_evaluator(
- evaluator_id=evaluator_id,
- evaluator_slug=evaluator_slug,
- )
-
- except Exception as e:
- print("[ERROR]: Failed to prepare evaluator:")
- print_exc()
- return None
-
- # print("Retrieve response:", retrieve_response)
-
- if retrieve_response and retrieve_response.id and retrieve_response.evaluator_id:
- evaluator_id = retrieve_response.evaluator_id
- # print(" --- Updating evaluator...", evaluator_id)
- evaluator_edit_request = SimpleEvaluatorEdit(
- id=evaluator_id,
- #
- name=name,
- description=description,
- #
- flags=legacy_application_flags,
- #
- data=simple_evaluator_data,
- )
-
- # print(" --- evaluator_edit_request:", evaluator_edit_request)
-
- response = authed_api()(
- method="PUT",
- endpoint=f"/preview/simple/evaluators/{evaluator_id}",
- json={
- "evaluator": evaluator_edit_request.model_dump(
- mode="json",
- exclude_none=True,
- )
- },
- )
-
- # print(" --- response:", response.status_code, response.text)
-
- try:
- response.raise_for_status()
- except Exception as e:
- print("[ERROR]: Failed to update evaluator:", e)
- print_exc()
- return None
-
- else:
- # print(" --- Creating evaluator...")
- evaluator_create_request = SimpleEvaluatorCreate(
- slug=evaluator_slug or uuid4().hex[-12:],
- #
- name=name,
- description=description,
- #
- flags=legacy_application_flags,
- #
- data=simple_evaluator_data,
- )
-
- # print(" --- evaluator_create_request:", evaluator_create_request)
-
- response = authed_api()(
- method="POST",
- endpoint="/preview/simple/evaluators/",
- json={
- "evaluator": evaluator_create_request.model_dump(
- mode="json",
- exclude_none=True,
- )
- },
- )
-
- # print(" --- response:", response.status_code, response.text)
-
- try:
- response.raise_for_status()
- except Exception as e:
- print("[ERROR]: Failed to create evaluator:", e)
- print_exc()
- return None
-
- evaluator_response = SimpleEvaluatorResponse(**response.json())
-
- evaluator = evaluator_response.evaluator
-
- if not evaluator or not evaluator.id:
- return None
-
- # print(" --- evaluator:", evaluator)
-
- evaluator_revision = await _retrieve_evaluator(
- evaluator_id=evaluator.id,
- )
-
- if not evaluator_revision or not evaluator_revision.id:
- return None
-
- # print(evaluator_revision, "----------")
-
- return evaluator_revision.id
diff --git a/sdk/agenta/sdk/managers/secrets.py b/sdk/agenta/sdk/managers/secrets.py
index 1e3344e9cf..299a3d4af1 100644
--- a/sdk/agenta/sdk/managers/secrets.py
+++ b/sdk/agenta/sdk/managers/secrets.py
@@ -1,24 +1,19 @@
import re
from typing import Optional, Dict, Any, List
-from agenta.sdk.contexts.routing import RoutingContext
-from agenta.sdk.contexts.running import RunningContext
+from agenta.sdk.context.serving import serving_context
from agenta.sdk.assets import model_to_provider_mapping as _standard_providers
-from agenta.sdk.middlewares.running.vault import get_secrets
-
-import agenta as ag
-
class SecretsManager:
@staticmethod
def get_from_route() -> Optional[List[Dict[str, Any]]]:
- context = RoutingContext.get()
+ context = serving_context.get()
secrets = context.secrets
if not secrets:
- return []
+ return None
return secrets
@@ -124,17 +119,11 @@ def _get_compatible_model(*, model: str, provider_slug: str):
# The reason is that custom providers are in fact openai compatible providers
# They need to be passed in litellm as openai/modelname
- modified_model = model
-
- if "custom" in modified_model:
- modified_model = modified_model.replace(
- f"{provider_slug}/custom/", "openai/"
- )
-
- if provider_slug:
- modified_model = modified_model.replace(f"{provider_slug}/", "")
+ if "custom" in model:
+ modified_model = model.replace(f"{provider_slug}/custom/", "openai/")
+ return modified_model.replace(f"{provider_slug}/", "")
- return modified_model
+ return model.replace(f"{provider_slug}/", "")
@staticmethod
def get_provider_settings(model: str) -> Optional[Dict]:
@@ -148,12 +137,10 @@ def get_provider_settings(model: str) -> Optional[Dict]:
Dict: A dictionary containing all parameters needed for litellm.completion
"""
- request_provider_model = model
-
# STEP 1: get vault secrets from route context and transform it
secrets = SecretsManager.get_from_route()
if not secrets:
- return []
+ return None
# STEP 1b: Parse secrets into usable format
secrets = SecretsManager._parse_secrets(secrets=secrets)
@@ -213,108 +200,3 @@ def get_provider_settings(model: str) -> Optional[Dict]:
continue
return provider_settings
-
- @staticmethod
- async def retrieve_secrets():
- return await get_secrets(
- f"{ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host}/api",
- RunningContext.get().credentials,
- )
-
- @staticmethod
- async def ensure_secrets_in_workflow():
- ctx = RunningContext.get()
-
- ctx.secrets = await SecretsManager.retrieve_secrets()
-
- RunningContext.set(ctx)
-
- return ctx.secrets
-
- @staticmethod
- def get_provider_settings_from_workflow(model: str) -> Optional[Dict]:
- """
- Builds the LLM request with appropriate kwargs based on the custom provider/model
-
- Args:
- model (str): The name of the model
-
- Returns:
- Dict: A dictionary containing all parameters needed for litellm.completion
- """
-
- request_provider_model = model
-
- # STEP 1: get vault secrets from route context and transform it
- secrets = RunningContext.get().secrets
- if not secrets:
- return []
-
- # STEP 1b: Parse secrets into usable format
- secrets = SecretsManager._parse_secrets(secrets=secrets)
-
- # STEP 2: check model exists in supported standard models
- provider = _standard_providers.get(request_provider_model)
- if not provider:
- # check and get provider kind if model exists in custom provider models
- provider = SecretsManager._custom_providers_get(
- model=request_provider_model,
- secrets=secrets,
- )
-
- # STEP 2b: return None in the case provider is None
- if not provider:
- return None
-
- # STEP 2c: get litellm compatible model
- request_provider_slug = (
- SecretsManager._custom_provider_slug_get(
- model=request_provider_model, secrets=secrets
- )
- or ""
- )
- compatible_provider_model = SecretsManager._get_compatible_model(
- model=request_provider_model, provider_slug=request_provider_slug
- )
-
- # STEP 3: initialize provider settings and simplify provider name
- provider_settings = dict(model=compatible_provider_model)
- request_provider_kind = re.sub(
- r"[\s-]+", "", provider.lower()
- ) # normalizing other special characters too (azure-openai)
-
- # STEP 4: get credentials for model
- for secret in secrets:
- secret_data = secret.get("data", {})
- provider_info = secret_data.get("provider", {})
-
- # i). Extract API key if present
- # (for standard models -- openai/anthropic/gemini, etc)
- if secret.get("kind") == "provider_key":
- secret_provider_kind = secret_data.get("kind", "")
-
- if request_provider_kind == secret_provider_kind:
- if "key" in provider_info:
- provider_settings["api_key"] = provider_info["key"]
- continue
-
- # ii). Extract Credentials if present
- # (for custom providers -- aws bedrock/sagemaker, vertex_ai, etc)
- elif secret.get("kind") == "custom_provider":
- secret_provider_kind = (
- provider_info.get("kind", "").lower().replace(" ", "")
- )
- secret_provider_slug = secret_data.get("provider_slug", "")
- secret_provider_models = secret_data.get("models", "")
- secret_provider_extras = provider_info.get("extras", {})
-
- if (
- request_provider_kind == secret_provider_kind
- and request_provider_slug == secret_provider_slug
- and request_provider_model in secret_provider_models
- ):
- if secret_provider_extras:
- provider_settings.update(secret_provider_extras)
- continue
-
- return provider_settings
diff --git a/sdk/agenta/sdk/managers/testsets.py b/sdk/agenta/sdk/managers/testsets.py
deleted file mode 100644
index c10cb0f49e..0000000000
--- a/sdk/agenta/sdk/managers/testsets.py
+++ /dev/null
@@ -1,441 +0,0 @@
-from typing import List, Dict, Any, Optional
-from uuid import UUID
-
-from agenta.sdk.utils.client import authed_api
-from agenta.sdk.utils.references import get_slug_from_name_and_id
-from agenta.sdk.models.testsets import (
- LegacyTestset,
- #
- Testcase,
- TestsetRevisionData,
- TestsetRevision,
- #
- TestsetRevisionResponse,
-)
-
-
-async def _create_legacy_testset(
- *,
- csvdata: List[Dict[str, Any]],
- name: str,
- testset_id: Optional[UUID] = None,
-) -> Optional[TestsetRevision]:
- response = authed_api()(
- method="POST",
- endpoint="/testsets/",
- json={
- "testset_id": str(testset_id) if testset_id else None,
- "name": name,
- "csvdata": csvdata,
- },
- )
-
- if response.status_code != 200:
- print("Failed to create testset:", response.status_code, response.text)
- return None
-
- legacy_testset = LegacyTestset(**response.json())
-
- # print(" --- legacy_testset:", legacy_testset)
-
- if not legacy_testset.id or not legacy_testset.name:
- return None
-
- testset_revision = TestsetRevision(
- id=UUID(legacy_testset.id),
- slug=get_slug_from_name_and_id(
- name=legacy_testset.name,
- id=UUID(legacy_testset.id),
- ),
- name=legacy_testset.name,
- data=TestsetRevisionData(
- testcases=[
- Testcase(
- data=testcase_data,
- testset_id=UUID(legacy_testset.id),
- )
- for testcase_data in csvdata
- ]
- ),
- )
-
- # print(" --- testset_revision:", testset_revision)
-
- return testset_revision
-
-
-async def _fetch_legacy_testset(
- testset_id: Optional[UUID] = None,
- #
- name: Optional[str] = None,
-) -> Optional[TestsetRevision]:
- legacy_testset = None
-
- if testset_id:
- response = authed_api()(
- method="GET",
- endpoint=f"/testsets/{testset_id}",
- )
-
- if response.status_code != 200:
- if response.status_code != 404:
- print("Failed to fetch testset:", response.status_code, response.text)
- return None
-
- legacy_testset = LegacyTestset(**response.json())
- elif name:
- response = authed_api()(
- method="GET",
- endpoint="/testsets/",
- params={"name": name},
- )
-
- if response.status_code != 200:
- print("Failed to list testsets:", response.status_code, response.text)
- return None
-
- _testsets = response.json()
-
- for testset in _testsets:
- _id = testset.pop("_id", None)
- testset["id"] = _id
-
- legacy_testsets = [LegacyTestset(**testset) for testset in _testsets]
-
- if len(legacy_testsets) != 1:
- print("Expected exactly one testset with name:", name)
- return None
-
- legacy_testset = legacy_testsets[0]
-
- # print(" --- legacy_testset:", legacy_testset)
-
- if not legacy_testset.id or not legacy_testset.name:
- return None
-
- testset_revision = TestsetRevision(
- testset_id=UUID(legacy_testset.id),
- slug=get_slug_from_name_and_id(
- name=legacy_testset.name,
- id=UUID(legacy_testset.id),
- ),
- name=legacy_testset.name,
- data=(
- TestsetRevisionData(
- testcases=[
- Testcase(
- data=testcase_data,
- testset_id=UUID(legacy_testset.id),
- )
- for testcase_data in legacy_testset.csvdata
- ]
- )
- if legacy_testset.csvdata
- else None
- ),
- )
-
- # print(" --- testset_revision:", testset_revision)
-
- return testset_revision
-
-
-async def _edit_legacy_testset(
- *,
- testset_id: UUID,
- csvdata: List[Dict[str, Any]],
- name: Optional[str] = None,
-) -> Optional[TestsetRevision]:
- response = authed_api()(
- method="PUT",
- endpoint=f"/testsets/{testset_id}",
- json={
- "name": name,
- "csvdata": csvdata,
- },
- )
-
- if response.status_code != 200:
- print("Failed to edit testset:", response.status_code, response.text)
- return None
-
- response = authed_api()(
- method="GET",
- endpoint=f"/testsets/{testset_id}",
- )
-
- legacy_testset = LegacyTestset(**response.json())
-
- # print(" --- legacy_testset:", legacy_testset)
-
- if not legacy_testset.id or not legacy_testset.name:
- return None
-
- testset_revision = TestsetRevision(
- id=UUID(legacy_testset.id),
- slug=get_slug_from_name_and_id(
- name=legacy_testset.name,
- id=UUID(legacy_testset.id),
- ),
- name=legacy_testset.name,
- data=(
- TestsetRevisionData(
- testcases=[
- Testcase(
- data=testcase_data,
- testset_id=UUID(legacy_testset.id),
- )
- for testcase_data in legacy_testset.csvdata
- ]
- )
- if legacy_testset.csvdata
- else None
- ),
- )
-
- # print(" --- testset_revision:", testset_revision)
-
- return testset_revision
-
-
-async def _list_legacy_testsets(
- #
-) -> List[TestsetRevision]:
- response = authed_api()(
- method="GET",
- endpoint="/testsets/",
- )
-
- if response.status_code != 200:
- print("Failed to list testsets:", response.status_code, response.text)
- return []
-
- legacy_testsets = [LegacyTestset(**testset) for testset in response.json()]
-
- # print(" --- legacy_testsets:", legacy_testsets)
-
- testset_revisions = [
- TestsetRevision(
- id=UUID(legacy_testset.id),
- slug=get_slug_from_name_and_id(
- name=legacy_testset.name,
- id=UUID(legacy_testset.id),
- ),
- name=legacy_testset.name,
- data=(
- TestsetRevisionData(
- testcases=[
- Testcase(
- data=testcase_data,
- testset_id=UUID(legacy_testset.id),
- )
- for testcase_data in legacy_testset.csvdata
- ]
- )
- if legacy_testset.csvdata
- else None
- ),
- )
- for legacy_testset in legacy_testsets
- if legacy_testset.id and legacy_testset.name
- ]
-
- # print(" --- testset_revisions:", testset_revisions)
-
- return testset_revisions
-
-
-async def _retrieve_testset(
- testset_id: Optional[UUID] = None,
- testset_revision_id: Optional[UUID] = None,
-) -> Optional[TestsetRevision]:
- payload = {
- "testset_ref": (
- {
- "id": str(testset_id) if testset_id else None,
- }
- if testset_id
- else None
- ),
- "testset_revision_ref": (
- {
- "id": str(testset_revision_id) if testset_revision_id else None,
- }
- if testset_revision_id
- else None
- ),
- }
-
- # print(" --- payload:", payload)
-
- response = authed_api()(
- method="POST",
- endpoint="/preview/testsets/revisions/retrieve",
- json=payload,
- )
- response.raise_for_status()
-
- testset_revision_response = TestsetRevisionResponse(**response.json())
-
- testset_revision = testset_revision_response.testset_revision
-
- # print(" --- testset_revision:", testset_revision)
-
- return testset_revision
-
-
-async def _sync_legacy_testset(
- *,
- testset_id: Optional[UUID] = None,
- #
- csvdata: List[Dict[str, Any]],
- #
- name: Optional[str] = None,
-) -> Optional[TestsetRevision]:
- try:
- # print("\n--------- UPSERT TESTSET")
-
- # print(" ---:", testset_revision_data.model_dump(mode="json", exclude_none=True))
-
- testset_revision = await _fetch_legacy_testset(
- testset_id=testset_id,
- name=name,
- )
-
- except Exception as e:
- print("[ERROR]: Failed to prepare testset:", e)
- return None
-
- # print("Fetch response:", testset_revision)
-
- if testset_revision and testset_revision.testset_id:
- # print(" --- Editing testset...", testset_id)
-
- testset_revision = await _edit_legacy_testset(
- testset_id=testset_revision.testset_id,
- name=name,
- csvdata=csvdata,
- )
-
- # print("Edit response:", testset_revision)
-
- else:
- # print(" --- Creating testset...", name, data)
-
- testset_revision = await _create_legacy_testset(
- testset_id=testset_id,
- name=name,
- csvdata=csvdata,
- )
-
- if not testset_revision or not testset_revision.id:
- return None
-
- # print(" --- testset_revision:", testset_revision)
-
- return testset_revision
-
-
-async def aupsert(
- *,
- testset_id: Optional[UUID] = None,
- #
- name: Optional[str] = None,
- #
- data: List[Dict[str, Any]] | TestsetRevisionData,
-) -> Optional[TestsetRevision]:
- csvdata = list()
- if isinstance(data, TestsetRevisionData) and data.testcases:
- csvdata = [testcase.data for testcase in data.testcases]
- elif isinstance(data, list):
- csvdata = data
- else:
- csvdata = list()
-
- return await _sync_legacy_testset(
- testset_id=testset_id,
- name=name,
- csvdata=csvdata, # type: ignore
- )
-
-
-async def acreate(
- *,
- testset_id: Optional[UUID | str] = None,
- #
- name: Optional[str] = None,
- #
- data: List[Dict[str, Any]] | TestsetRevisionData,
-) -> Optional[TestsetRevision]:
- csvdata = list()
- if isinstance(data, TestsetRevisionData) and data.testcases:
- csvdata = [testcase.data for testcase in data.testcases]
- elif isinstance(data, list):
- csvdata = data
- else:
- csvdata = list()
-
- return await _create_legacy_testset(
- testset_id=(
- testset_id
- if isinstance(testset_id, UUID)
- else UUID(testset_id)
- if testset_id
- else None
- ),
- name=name,
- csvdata=csvdata, # type: ignore
- )
-
-
-async def aedit(
- *,
- testset_id: UUID | str,
- #
- name: Optional[str] = None,
- #
- data: List[Dict[str, Any]] | TestsetRevisionData,
-) -> Optional[TestsetRevision]:
- csvdata = list()
- if isinstance(data, TestsetRevisionData) and data.testcases:
- csvdata = [testcase.data for testcase in data.testcases]
- elif isinstance(data, list):
- csvdata = data
- else:
- csvdata = list()
-
- return await _edit_legacy_testset(
- testset_id=testset_id if isinstance(testset_id, UUID) else UUID(testset_id),
- name=name,
- csvdata=csvdata, # type: ignore
- )
-
-
-async def afetch(
- *,
- testset_id: UUID | str,
-) -> Optional[TestsetRevision]:
- return await _fetch_legacy_testset(
- testset_id=testset_id if isinstance(testset_id, UUID) else UUID(testset_id)
- )
-
-
-async def alist(
- #
-) -> List[TestsetRevision]:
- return await _list_legacy_testsets()
-
-
-async def aretrieve(
- testset_id: Optional[UUID] = None,
- #
- testset_revision_id: Optional[UUID] = None,
-) -> Optional[TestsetRevision]:
- # print("\n--------- RETRIEVE TESTSET")
-
- response = await _retrieve_testset(
- testset_id=testset_id,
- testset_revision_id=testset_revision_id,
- )
-
- return response
diff --git a/sdk/agenta/sdk/managers/vault.py b/sdk/agenta/sdk/managers/vault.py
index ec641881d4..fa6a4a39e8 100644
--- a/sdk/agenta/sdk/managers/vault.py
+++ b/sdk/agenta/sdk/managers/vault.py
@@ -1,16 +1,16 @@
from typing import Optional, Dict, Any
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
class VaultManager:
@staticmethod
def get_from_route() -> Optional[Dict[str, Any]]:
- context = RoutingContext.get()
+ context = serving_context.get()
secrets = context.secrets
if not secrets:
- return []
+ return None
return secrets
diff --git a/sdk/agenta/sdk/middleware/adapt.py b/sdk/agenta/sdk/middleware/adapt.py
new file mode 100644
index 0000000000..26b527a3d8
--- /dev/null
+++ b/sdk/agenta/sdk/middleware/adapt.py
@@ -0,0 +1,253 @@
+from typing import Callable
+from inspect import signature
+from uuid import uuid4
+
+from agenta.sdk.utils.logging import get_module_logger
+
+from agenta.sdk.middleware.base import (
+ WorkflowMiddleware,
+ middleware_as_decorator,
+)
+from agenta.sdk.workflows.types import (
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowServiceData,
+ WorkflowRevision,
+ Status,
+)
+
+log = get_module_logger(__name__)
+
+DEFAULT_INPUTS_MAPPINGS = {
+ "request": "request",
+ "revision": "revision",
+ "parameters": "revision.data.parameters",
+ "inputs": "request.data.inputs",
+ "outputs": "request.data.traces.0.attributes.ag.data.outputs",
+ "trace": "request.data.traces.0",
+ "trace_outputs": "request.data.traces.0.attributes.ag.data.outputs",
+ "traces": "request.data.traces",
+ "traces_outputs": "request.data.traces.{}.attributes.ag.data.outputs",
+}
+
+ALLOWED_INPUTS_KEYS = set(DEFAULT_INPUTS_MAPPINGS.keys())
+
+ALLOWED_OUTPUTS_KEYS = {
+ "outputs",
+ "trace",
+}
+
+DEFAULT_MAPPINGS = {}
+
+CURRENT_VERSION = "2025.07.14"
+
+
+@middleware_as_decorator
+class AdaptMiddleware(WorkflowMiddleware):
+ def __init__(self):
+ pass
+
+ async def __call__(
+ self,
+ request: WorkflowServiceRequest,
+ revision: WorkflowRevision,
+ handler: Callable,
+ ) -> WorkflowServiceResponse:
+ request_data_dict = request.data.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+
+ revision_data_dict = revision.data.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+
+ provided_request_keys = sorted(
+ {"request", "revision", "parameters"} | set(request_data_dict.keys())
+ )
+
+ handler_signature = signature(handler)
+
+ requested_inputs_keys = sorted(set(handler_signature.parameters.keys()))
+
+ kwargs = dict()
+
+ try:
+ for requested_input_key in requested_inputs_keys:
+ if requested_input_key not in ALLOWED_INPUTS_KEYS:
+ kwargs[requested_input_key] = None
+ continue
+
+ if requested_input_key in provided_request_keys:
+ if requested_input_key == "parameters":
+ kwargs[requested_input_key] = (
+ revision.data.parameters
+ if revision.data.parameters
+ else None
+ )
+ elif requested_input_key == "request":
+ kwargs[requested_input_key] = request
+ elif requested_input_key == "revision":
+ kwargs[requested_input_key] = revision
+ else:
+ kwargs[requested_input_key] = request_data_dict[
+ requested_input_key
+ ]
+
+ else:
+ kwargs[requested_input_key] = self._apply_request_mapping(
+ request=request_data_dict,
+ revision=revision_data_dict,
+ key=requested_input_key,
+ )
+
+ except: # pylint: disable=bare-except
+ # handle the error
+ pass
+
+ try:
+ # inputs = kwargs.get("inputs", None)
+
+ # inputs_schema =
+
+ # self._check_request_schema(
+ # inputs,
+ # inputs_schema,
+ # )
+
+ # parameters = kwargs.get("parameters", None)
+
+ # parameters_schema =
+
+ # self._check_request_schema(
+ # parameters,
+ # parameters_schema,
+ # )
+
+ pass
+
+ except: # pylint: disable=bare-except
+ # handle the error
+ pass
+
+ try:
+ handler_signature.bind(**kwargs)
+
+ except: # pylint: disable=bare-except
+ # handle the error
+ pass
+
+ try:
+ outputs = await handler(**kwargs)
+
+ trace = None # get trace
+
+ except: # pylint: disable=bare-except
+ # handle the error
+ log.debug()
+ raise
+
+ try:
+ # outputs_schema =
+
+ # self._check_request_schema(
+ # outputs,
+ # outputs_schema,
+ # )
+
+ pass
+
+ except: # pylint: disable=bare-except
+ # handle the error
+ pass
+
+ return WorkflowServiceResponse(
+ id=uuid4(),
+ version=CURRENT_VERSION,
+ # status=Status(code=200, message="Success"),
+ data=WorkflowServiceData(
+ outputs=outputs,
+ trace=trace,
+ ),
+ )
+
+ def _apply_request_mapping(
+ self,
+ request: dict,
+ revision: dict,
+ key: str,
+ ):
+ mapping = DEFAULT_INPUTS_MAPPINGS[key]
+
+ parts = mapping.split(".")
+
+ base_part = parts.pop(0)
+ data_part = parts.pop(0)
+
+ base = (
+ request
+ if base_part == "request" and data_part == "data"
+ else (revision if base_part == "revision" and data_part == "data" else {})
+ )
+
+ scalar = True
+ is_list = False
+ is_dict = False
+
+ for part in parts:
+ _is_index = part.isdigit()
+ _is_list = part == "[]"
+ _is_dict = part == "{}"
+
+ _scalar = not (_is_list or _is_dict)
+
+ if not scalar and not _scalar:
+ # handle error once we start using mappings
+ pass
+
+ if _is_index:
+ if isinstance(base, list):
+ base = base[int(part)]
+ elif isinstance(base, dict):
+ base = base[list(base.keys())[int(part)]]
+ else:
+ # handle error once we start using mappings
+ pass
+
+ elif _is_list:
+ if not isinstance(base, list):
+ # handle error once we start using mappings
+ pass
+ elif _is_dict:
+ if not isinstance(base, dict):
+ # handle error once we start using mappings
+ pass
+
+ else:
+ if isinstance(base, dict):
+ if is_list:
+ base = [
+ (item.get(part, None) if isinstance(item, dict) else None)
+ for item in base
+ ]
+ elif is_dict:
+ base = {
+ key: (
+ value.get(part, None)
+ if isinstance(value, dict)
+ else None
+ )
+ for key, value in base.items()
+ }
+ else:
+ base = base.get(part, None)
+ else:
+ # handle error once we start using mappings
+ pass
+
+ scalar = _scalar
+ is_list = _is_list
+ is_dict = _is_dict
+
+ return base
diff --git a/sdk/agenta/sdk/middleware/auth.py b/sdk/agenta/sdk/middleware/auth.py
index 0e77dae130..0174529f3c 100644
--- a/sdk/agenta/sdk/middleware/auth.py
+++ b/sdk/agenta/sdk/middleware/auth.py
@@ -15,6 +15,17 @@
import agenta as ag
+from agenta.sdk.middleware.base import (
+ WorkflowMiddleware,
+ middleware_as_decorator,
+)
+from agenta.sdk.workflows.types import (
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowRevision,
+ WorkflowServiceHandler,
+)
+
log = get_module_logger(__name__)
AGENTA_RUNTIME_PREFIX = getenv("AGENTA_RUNTIME_PREFIX", "")
@@ -253,3 +264,168 @@ async def _get_credentials(self, request: Request) -> Optional[str]:
status_code=500,
content=f"Could not verify credentials: unexpected error - {str(exc)}. Please try again later or contact support if the issue persists.",
) from exc
+
+
+from agenta.sdk.context.tracing import (
+ tracing_context_manager,
+ tracing_context,
+ TracingContext,
+)
+
+
+@middleware_as_decorator
+class AuthMiddleware(WorkflowMiddleware):
+ def __init__(self):
+ pass
+
+ async def __call__(
+ self,
+ request: WorkflowServiceRequest,
+ revision: WorkflowRevision,
+ handler: WorkflowServiceHandler,
+ ) -> WorkflowServiceResponse:
+ ctx = tracing_context.get()
+
+ ctx.credentials = request.credentials
+
+ with tracing_context_manager(context=ctx):
+ return await handler(request, revision)
+
+
+# @middleware_as_decorator
+# class AuthMiddleware(WorkflowMiddleware):
+# def __init__(self):
+# self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host
+# self.scope_type = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.scope_type
+# self.scope_id = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.scope_id
+
+# async def __call__(
+# self,
+# request: WorkflowServiceRequest,
+# revision: WorkflowRevision,
+# handler: WorkflowServiceHandler,
+# ) -> WorkflowServiceResponse:
+# try:
+# request.credentials = await self._get_credentials(request)
+
+# except DenyException as deny:
+# display_exception("Auth Middleware Exception")
+
+# raise deny
+
+# except Exception as exc:
+# display_exception("Auth Middleware Exception")
+
+# raise DenyException(
+# status_code=500,
+# content="Auth Middleware Unexpected Error.",
+# ) from exc
+
+# return await handler(request, revision)
+
+# async def _get_credentials(
+# self,
+# request: WorkflowServiceRequest,
+# ) -> Optional[str]:
+# credentials = request.credentials
+
+# headers = {"Authorization": credentials} if credentials else None
+
+# params = {
+# "action": "run_workflow",
+# "resource_type": "workflow",
+# }
+# if self.scope_type and self.scope_id:
+# params["scope_type"] = self.scope_type
+# params["scope_id"] = self.scope_id
+
+# _hash = dumps(
+# {
+# "headers": headers,
+# "params": params,
+# },
+# sort_keys=True,
+# )
+
+# if _CACHE_ENABLED:
+# cached = _cache.get(_hash)
+# if cached:
+# return cached
+
+# try:
+# async with httpx.AsyncClient() as client:
+# response = await client.get(
+# f"{self.host}/api/permissions/verify",
+# params=params,
+# headers=headers,
+# timeout=5 * 60,
+# )
+
+# except httpx.TimeoutException as exc:
+# raise DenyException(
+# status_code=504,
+# content=f"Could not verify credentials: connection to {self.host} timed out. Please check your network connection.",
+# ) from exc
+# except httpx.ConnectError as exc:
+# raise DenyException(
+# status_code=503,
+# content=f"Could not verify credentials: connection to {self.host} failed. Please check if agenta is available.",
+# ) from exc
+# except httpx.NetworkError as exc:
+# raise DenyException(
+# status_code=503,
+# content=f"Could not verify credentials: connection to {self.host} failed. Please check your network connection.",
+# ) from exc
+# except httpx.HTTPError as exc:
+# raise DenyException(
+# status_code=502,
+# content=f"Could not verify credentials: connection to {self.host} failed. Please check if agenta is available.",
+# ) from exc
+# except Exception as exc:
+# raise DenyException(
+# 500,
+# f"Could not verify credentials: unexpected error.\n {exc}",
+# ) from exc
+
+# if response.status_code == 401:
+# raise DenyException(
+# status_code=401,
+# content="Invalid credentials. Please check your credentials or login again.",
+# )
+# if response.status_code == 403:
+# raise DenyException(
+# status_code=403,
+# content="Permission denied. Please check your permissions or contact your administrator.",
+# )
+# if response.status_code != 200:
+# raise DenyException(
+# status_code=500,
+# content=f"Could not verify credentials: {self.host} returned unexpected status code {response.status_code}. Please try again later or contact support if the issue persists.",
+# )
+
+# try:
+# auth = response.json()
+# except ValueError as exc:
+# raise DenyException(
+# status_code=500,
+# content=f"Could not verify credentials: {self.host} returned unexpected invalid JSON response. Please try again later or contact support if the issue persists.",
+# ) from exc
+
+# if not isinstance(auth, dict):
+# raise DenyException(
+# status_code=500,
+# content=f"Could not verify credentials: {self.host} returned unexpected invalid response format. Please try again later or contact support if the issue persists.",
+# )
+
+# if auth.get("effect") != "allow":
+# raise DenyException(
+# status_code=403,
+# content="Permission denied. Please check your permissions or contact your administrator.",
+# )
+
+# credentials: str = auth.get("credentials")
+
+# if credentials is not None:
+# _cache.put(_hash, credentials)
+
+# return credentials
diff --git a/sdk/agenta/sdk/middleware/base.py b/sdk/agenta/sdk/middleware/base.py
new file mode 100644
index 0000000000..435fdee97d
--- /dev/null
+++ b/sdk/agenta/sdk/middleware/base.py
@@ -0,0 +1,40 @@
+from typing import Protocol, Callable, Any, Union
+
+from agenta.sdk.workflows.types import (
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowRevision,
+ WorkflowServiceHandler,
+)
+
+
+class WorkflowMiddleware(Protocol):
+ async def __call__(
+ self,
+ request: WorkflowServiceRequest,
+ revision: WorkflowRevision,
+ handler: Callable,
+ ) -> Any:
+ ...
+
+
+WorkflowMiddlewareDecorator = Callable[[WorkflowServiceHandler], WorkflowServiceHandler]
+
+
+def middleware_as_decorator(
+ middleware: Union[WorkflowMiddleware, type[WorkflowMiddleware]],
+) -> WorkflowMiddlewareDecorator:
+ middleware = middleware() if isinstance(middleware, type) else middleware
+
+ def decorator(
+ handler: WorkflowServiceHandler,
+ ) -> WorkflowServiceHandler:
+ async def wrapped(
+ request: WorkflowServiceRequest,
+ revision: WorkflowRevision,
+ ) -> WorkflowServiceResponse:
+ return await middleware(request, revision, handler)
+
+ return wrapped
+
+ return decorator
diff --git a/sdk/agenta/sdk/middleware/flags.py b/sdk/agenta/sdk/middleware/flags.py
new file mode 100644
index 0000000000..02ced47b34
--- /dev/null
+++ b/sdk/agenta/sdk/middleware/flags.py
@@ -0,0 +1,40 @@
+from agenta.sdk.utils.logging import get_module_logger
+
+from agenta.sdk.middleware.base import (
+ WorkflowMiddleware,
+ middleware_as_decorator,
+)
+from agenta.sdk.workflows.types import (
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowRevision,
+ WorkflowServiceHandler,
+)
+
+from agenta.sdk.context.tracing import (
+ tracing_context_manager,
+ tracing_context,
+)
+
+
+log = get_module_logger(__name__)
+
+
+@middleware_as_decorator
+class FlagsMiddleware(WorkflowMiddleware):
+ def __init__(self):
+ pass
+
+ async def __call__(
+ self,
+ request: WorkflowServiceRequest,
+ revision: WorkflowRevision,
+ handler: WorkflowServiceHandler,
+ ) -> WorkflowServiceResponse:
+ ctx = tracing_context.get()
+
+ if isinstance(request.flags, dict) and request.flags.get("is_annotation"):
+ ctx.type = "annotation"
+
+ with tracing_context_manager(context=ctx):
+ return await handler(request, revision)
diff --git a/sdk/agenta/sdk/middleware/vault.py b/sdk/agenta/sdk/middleware/vault.py
index 52c02fa186..b03e638184 100644
--- a/sdk/agenta/sdk/middleware/vault.py
+++ b/sdk/agenta/sdk/middleware/vault.py
@@ -85,7 +85,7 @@ async def _get_secrets(self, request: Request) -> Optional[Dict]:
continue
secret = SecretDTO(
- kind="provider_key", # type: ignore
+ kind="provider_kind", # type: ignore
data=StandardProviderDTO(
kind=provider,
provider=StandardProviderSettingsDTO(key=key),
diff --git a/sdk/agenta/sdk/middlewares/__init__.py b/sdk/agenta/sdk/middlewares/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/middlewares/routing/__init__.py b/sdk/agenta/sdk/middlewares/routing/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/middlewares/routing/auth.py b/sdk/agenta/sdk/middlewares/routing/auth.py
deleted file mode 100644
index f176a118fb..0000000000
--- a/sdk/agenta/sdk/middlewares/routing/auth.py
+++ /dev/null
@@ -1,263 +0,0 @@
-from typing import Callable, Optional
-from os import getenv
-from json import dumps
-
-import httpx
-
-from starlette.types import ASGIApp
-from starlette.middleware.base import BaseHTTPMiddleware
-from fastapi import Request
-from fastapi.responses import JSONResponse
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.utils.exceptions import display_exception
-from agenta.sdk.utils.cache import TTLLRUCache
-from agenta.sdk.utils.constants import TRUTHY
-
-# import agenta as ag
-
-
-log = get_module_logger(__name__)
-
-AGENTA_RUNTIME_PREFIX = getenv("AGENTA_RUNTIME_PREFIX", "")
-
-_AUTH_ENABLED = (
- getenv("AGENTA_SERVICE_MIDDLEWARE_AUTH_ENABLED", "true").lower() in TRUTHY
-)
-
-_CACHE_ENABLED = (
- getenv("AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY
-)
-
-_ALWAYS_ALLOW_LIST = [f"{AGENTA_RUNTIME_PREFIX}/health"]
-
-_cache = TTLLRUCache()
-
-
-class DenyResponse(JSONResponse):
- def __init__(
- self,
- status_code: int = 401,
- detail: str = "Unauthorized",
- ) -> None:
- super().__init__(
- status_code=status_code,
- content={"detail": detail},
- )
-
-
-class DenyException(Exception):
- def __init__(
- self,
- status_code: int = 401,
- content: str = "Unauthorized",
- ) -> None:
- super().__init__()
-
- self.status_code = status_code
- self.content = content
-
-
-class AuthMiddleware(BaseHTTPMiddleware):
- def __init__(self, app: ASGIApp, **options):
- super().__init__(app)
-
- # self.host = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host
-
- # self.scope_type = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.scope_type
- # self.scope_id = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.scope_id
-
- async def dispatch(self, request: Request, call_next: Callable):
- try:
- if request.url.path in _ALWAYS_ALLOW_LIST:
- request.state.auth = {}
-
- else:
- credentials = await self._get_credentials(request)
-
- request.state.auth = {"credentials": credentials}
-
- return await call_next(request)
-
- except DenyException as deny:
- display_exception("Auth Middleware Exception")
-
- return DenyResponse(
- status_code=deny.status_code,
- detail=deny.content,
- )
-
- except: # pylint: disable=bare-except
- display_exception("Auth Middleware Exception")
-
- return DenyResponse(
- status_code=500,
- detail="Auth: Unexpected Error.",
- )
-
- async def _get_credentials(self, request: Request) -> Optional[str]:
- try:
- if not _AUTH_ENABLED:
- return request.headers.get("authorization", None)
-
- # HEADERS
- authorization = request.headers.get("authorization", None)
- headers = {"Authorization": authorization} if authorization else None
-
- # COOKIES
- access_token = request.cookies.get("sAccessToken", None)
- cookies = {"sAccessToken": access_token} if access_token else None
-
- # if not headers and not cookies:
- # log.debug("No auth header nor auth cookie found in the request")
-
- # PARAMS
- params = {}
- ## PROJECT_ID
- project_id = (
- # CLEANEST
- request.state.otel["baggage"].get("project_id")
- # ALTERNATIVE
- or request.query_params.get("project_id")
- )
- # if not project_id:
- # log.debug("No project ID found in request")
-
- if project_id:
- params["project_id"] = project_id
- ## SCOPE
- if self.scope_type and self.scope_id:
- params["scope_type"] = self.scope_type
- params["scope_id"] = self.scope_id
- ## ACTION
- params["action"] = "run_service"
- ## RESOURCE
- params["resource_type"] = "service"
- # params["resource_id"] = None
-
- _hash = dumps(
- {
- "headers": headers,
- "cookies": cookies,
- "params": params,
- },
- sort_keys=True,
- )
-
- if _CACHE_ENABLED:
- credentials = _cache.get(_hash)
-
- if credentials:
- # log.debug("Using cached credentials")
- return credentials
-
- try:
- async with httpx.AsyncClient() as client:
- try:
- response = await client.get(
- f"{self.host}/api/permissions/verify",
- headers=headers,
- cookies=cookies,
- params=params,
- timeout=30.0,
- )
- except httpx.TimeoutException as exc:
- # log.debug(f"Timeout error while verify credentials: {exc}")
- raise DenyException(
- status_code=504,
- content=f"Could not verify credentials: connection to {self.host} timed out. Please check your network connection.",
- ) from exc
- except httpx.ConnectError as exc:
- # log.debug(f"Connection error while verify credentials: {exc}")
- raise DenyException(
- status_code=503,
- content=f"Could not verify credentials: connection to {self.host} failed. Please check if agenta is available.",
- ) from exc
- except httpx.NetworkError as exc:
- # log.debug(f"Network error while verify credentials: {exc}")
- raise DenyException(
- status_code=503,
- content=f"Could not verify credentials: connection to {self.host} failed. Please check your network connection.",
- ) from exc
- except httpx.HTTPError as exc:
- # log.debug(f"HTTP error while verify credentials: {exc}")
- raise DenyException(
- status_code=502,
- content=f"Could not verify credentials: connection to {self.host} failed. Please check if agenta is available.",
- ) from exc
-
- if response.status_code == 401:
- # log.debug("Agenta returned 401 - Invalid credentials")
- raise DenyException(
- status_code=401,
- content="Invalid credentials. Please check your credentials or login again.",
- )
- elif response.status_code == 403:
- # log.debug("Agenta returned 403 - Permission denied")
- raise DenyException(
- status_code=403,
- content="Permission denied. Please check your permissions or contact your administrator.",
- )
- elif response.status_code != 200:
- # log.debug(
- # f"Agenta returned {response.status_code} - Unexpected status code"
- # )
- raise DenyException(
- status_code=500,
- content=f"Could not verify credentials: {self.host} returned unexpected status code {response.status_code}. Please try again later or contact support if the issue persists.",
- )
-
- try:
- auth = response.json()
- except ValueError as exc:
- # log.debug(f"Agenta returned invalid JSON response: {exc}")
- raise DenyException(
- status_code=500,
- content=f"Could not verify credentials: {self.host} returned unexpected invalid JSON response. Please try again later or contact support if the issue persists.",
- ) from exc
-
- if not isinstance(auth, dict):
- # log.debug(
- # f"Agenta returned invalid response format: {type(auth)}"
- # )
- raise DenyException(
- status_code=500,
- content=f"Could not verify credentials: {self.host} returned unexpected invalid response format. Please try again later or contact support if the issue persists.",
- )
-
- effect = auth.get("effect")
- if effect != "allow":
- # log.debug("Access denied by Agenta - effect: {effect}")
- raise DenyException(
- status_code=403,
- content="Permission denied. Please check your permissions or contact your administrator.",
- )
-
- credentials = auth.get("credentials")
-
- # if not credentials:
- # log.debug("No credentials found in the response")
-
- _cache.put(_hash, credentials)
-
- return credentials
-
- except DenyException as deny:
- raise deny
- except Exception as exc: # pylint: disable=bare-except
- # log.debug(
- # f"Unexpected error while verifying credentials (remote): {exc}"
- # )
- raise DenyException(
- status_code=500,
- content=f"Could not verify credentials: unexpected error - {str(exc)}. Please try again later or contact support if the issue persists.",
- ) from exc
-
- except DenyException as deny:
- raise deny
- except Exception as exc:
- # log.debug(f"Unexpected error while verifying credentials (local): {exc}")
- raise DenyException(
- status_code=500,
- content=f"Could not verify credentials: unexpected error - {str(exc)}. Please try again later or contact support if the issue persists.",
- ) from exc
diff --git a/sdk/agenta/sdk/middlewares/routing/cors.py b/sdk/agenta/sdk/middlewares/routing/cors.py
deleted file mode 100644
index 934efa86f1..0000000000
--- a/sdk/agenta/sdk/middlewares/routing/cors.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from os import getenv
-
-from starlette.types import ASGIApp, Receive, Scope, Send
-from fastapi.middleware.cors import CORSMiddleware as BaseCORSMiddleware
-
-from agenta.sdk.utils.constants import TRUTHY
-
-_USE_CORS = getenv("AGENTA_USE_CORS", "enable").lower() in TRUTHY
-
-
-class CORSMiddleware(BaseCORSMiddleware):
- def __init__(self, app: ASGIApp, **options):
- self.app = app
-
- if _USE_CORS:
- super().__init__(
- app=app,
- allow_origins=["*"],
- allow_methods=["*"],
- allow_headers=["*"],
- allow_credentials=True,
- expose_headers=None,
- max_age=None,
- )
-
- async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
- if _USE_CORS:
- return await super().__call__(scope, receive, send)
-
- return await self.app(scope, receive, send)
diff --git a/sdk/agenta/sdk/middlewares/routing/otel.py b/sdk/agenta/sdk/middlewares/routing/otel.py
deleted file mode 100644
index e1aeecc698..0000000000
--- a/sdk/agenta/sdk/middlewares/routing/otel.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typing import Callable
-
-from starlette.types import ASGIApp
-from starlette.middleware.base import BaseHTTPMiddleware
-from fastapi import Request
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.engines.tracing.propagation import extract
-
-
-log = get_module_logger(__name__)
-
-
-class OTelMiddleware(BaseHTTPMiddleware):
- async def dispatch(self, request: Request, call_next: Callable):
- request.state.otel = {"baggage": {}, "traceparent": None}
-
- headers: dict = dict(request.headers)
-
- if "newrelic" in headers:
- headers["traceparent"] = None
-
- with suppress():
- _, traceparent, baggage = extract(headers)
-
- request.state.otel = {"baggage": baggage, "traceparent": traceparent}
-
- return await call_next(request)
diff --git a/sdk/agenta/sdk/middlewares/running/__init__.py b/sdk/agenta/sdk/middlewares/running/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/middlewares/running/normalizer.py b/sdk/agenta/sdk/middlewares/running/normalizer.py
deleted file mode 100644
index e3fdd2acdd..0000000000
--- a/sdk/agenta/sdk/middlewares/running/normalizer.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# /agenta/sdk/middlewares/running/normalizer.py
-import inspect
-from typing import Any, Dict, Callable, Union
-from inspect import isawaitable, isasyncgen, isgenerator
-from traceback import format_exception
-from uuid import UUID
-
-
-from agenta.sdk.utils.exceptions import suppress
-from agenta.sdk.models.workflows import (
- WorkflowServiceStatus,
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
- WorkflowServiceRequest,
- WorkflowServiceBatchResponse,
- WorkflowServiceStreamResponse,
-)
-from agenta.sdk.workflows.errors import ErrorStatus
-from agenta.sdk.contexts.running import RunningContext
-from agenta.sdk.contexts.tracing import TracingContext
-
-
-class NormalizerMiddleware:
- """Middleware that normalizes workflow service requests and responses.
-
- This middleware performs two key normalization operations:
-
- 1. **Request Normalization**: Transforms a WorkflowServiceRequest into the appropriate
- keyword arguments for the workflow handler function by:
- - Mapping request data fields to handler function parameters
- - Extracting inputs from request.data.inputs and mapping them to function parameters
- - Handling special parameters like 'request' and WorkflowServiceRequestData fields
- - Supporting **kwargs expansion for additional fields
-
- 2. **Response Normalization**: Transforms handler function results into standardized
- WorkflowServiceBatchResponse or WorkflowServiceStreamResponse objects by:
- - Handling various return types (plain values, awaitables, generators, async generators)
- - Aggregating streaming results into batches when aggregate flag is set
- - Extracting trace_id and span_id from TracingContext for observability
- - Wrapping raw outputs in proper response structures
-
- The middleware ensures consistent interfaces between the workflow service layer and
- the actual handler functions, allowing handlers to use simple function signatures
- while maintaining structured request/response formats at the service boundary.
- """
-
- DATA_FIELDS = set(("request",)) | set(
- WorkflowServiceRequestData.model_fields.keys()
- )
-
- async def _normalize_request(
- self,
- request: WorkflowServiceRequest,
- handler: Callable,
- ) -> Dict[str, Any]:
- """Transform a WorkflowServiceRequest into kwargs for the handler function.
-
- Inspects the handler's function signature and maps the request data to the
- appropriate parameter names and values. The mapping follows this priority order:
-
- 1. If parameter name is 'request': passes the entire WorkflowServiceRequest
- 2. If parameter name matches DATA_FIELDS (like 'inputs', 'outputs', 'parameters'):
- extracts that field from request.data
- 3. If parameter is **kwargs: includes all unconsumed DATA_FIELDS
- 4. Otherwise: looks up the parameter name in request.data.inputs dict
-
- Args:
- request: The workflow service request containing inputs and data
- handler: The callable workflow handler whose signature to inspect
-
- Returns:
- Dictionary mapping parameter names to values for calling the handler
- """
- sig = inspect.signature(handler)
- params = sig.parameters
- normalized: Dict[str, Any] = {}
- consumed = set()
-
- for name, param in params.items():
- if name == "request":
- normalized[name] = request
- consumed.add(name)
-
- elif name in self.DATA_FIELDS:
- normalized[name] = (
- getattr(request.data, name, None) if request.data else None
- )
- consumed.add(name)
-
- elif param.kind == inspect.Parameter.VAR_KEYWORD:
- if request.data:
- for f in self.DATA_FIELDS - consumed:
- normalized[f] = getattr(request.data, f, None)
- consumed |= self.DATA_FIELDS
-
- else:
- if request.data and isinstance(request.data.inputs, dict):
- if name in request.data.inputs:
- normalized[name] = request.data.inputs[name]
- consumed.add(name)
- continue
- normalized[name] = None
-
- return normalized
-
- async def _normalize_response(
- self,
- result: Any,
- ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse,]:
- if isawaitable(result):
- result = await result
-
- if isinstance(
- result, (WorkflowServiceBatchResponse, WorkflowServiceStreamResponse)
- ):
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- result.trace_id = trace_id
- result.span_id = span_id
-
- return result
-
- if isasyncgen(result):
- if RunningContext.get().aggregate:
- collected = [item async for item in result]
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- return WorkflowServiceBatchResponse(
- data=WorkflowServiceResponseData(outputs=collected),
- trace_id=trace_id,
- span_id=span_id,
- )
-
- async def iterator():
- async for item in result:
- yield item
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- return WorkflowServiceStreamResponse(
- generator=iterator,
- trace_id=trace_id,
- span_id=span_id,
- )
-
- if isgenerator(result):
- if RunningContext.get().aggregate:
- collected = list(result)
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- return WorkflowServiceBatchResponse(
- data=WorkflowServiceResponseData(outputs=collected),
- trace_id=trace_id,
- span_id=span_id,
- )
-
- async def iterator():
- for item in result:
- yield item
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- return WorkflowServiceStreamResponse(
- generator=iterator,
- trace_id=trace_id,
- span_id=span_id,
- )
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- return WorkflowServiceBatchResponse(
- data=WorkflowServiceResponseData(outputs=result),
- trace_id=trace_id,
- span_id=span_id,
- )
-
- async def _normalize_exception(
- self,
- exc: Exception,
- ) -> WorkflowServiceBatchResponse:
- error_status = None
-
- if isinstance(exc, ErrorStatus):
- error_status = WorkflowServiceStatus(
- type=exc.type,
- code=exc.code,
- message=exc.message,
- stacktrace=exc.stacktrace,
- )
- else:
- type = "https://docs.agenta.ai/errors#v1:sdk:unknown-workflow-invoke-error"
-
- code = getattr(exc, "status_code") if hasattr(exc, "status_code") else 500
-
- if code in [401, 403]:
- code = 424
-
- message = str(exc) or "Internal Server Error"
-
- stacktrace = format_exception(
- exc, # type: ignore
- value=exc,
- tb=exc.__traceback__,
- )
-
- error_status = WorkflowServiceStatus(
- type=type,
- code=code,
- message=message,
- stacktrace=stacktrace,
- )
-
- trace_id = None
- span_id = None
-
- with suppress():
- link = (TracingContext.get().link) or {}
-
- _trace_id = link.get("trace_id") if link else None # in int format
- _span_id = link.get("span_id") if link else None # in int format
-
- trace_id = UUID(int=_trace_id).hex if _trace_id else None
- span_id = UUID(int=_span_id).hex[16:] if _span_id else None
-
- error_response = WorkflowServiceBatchResponse(
- status=error_status,
- trace_id=trace_id,
- span_id=span_id,
- )
-
- return error_response
-
- async def __call__(
- self,
- request: WorkflowServiceRequest,
- call_next: Callable[[WorkflowServiceRequest], Any],
- ):
- ctx = RunningContext.get()
- handler = ctx.handler
-
- if not handler:
- raise RuntimeError("NormalizerMiddleware: no handler set in context")
-
- kwargs = await self._normalize_request(request, handler)
-
- try:
- response = handler(**kwargs)
-
- normalized = await self._normalize_response(response)
-
- except Exception as exception:
- normalized = await self._normalize_exception(exception)
-
- return normalized
-
- return normalized
diff --git a/sdk/agenta/sdk/middlewares/running/resolver.py b/sdk/agenta/sdk/middlewares/running/resolver.py
deleted file mode 100644
index 8081313c75..0000000000
--- a/sdk/agenta/sdk/middlewares/running/resolver.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# /agenta/sdk/middlewares/running/resolver.py
-from typing import Callable, Any, Optional
-
-from agenta.sdk.utils.logging import get_module_logger
-from agenta.sdk.models.workflows import (
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
- WorkflowServiceRequest,
- WorkflowServiceInterface,
- WorkflowServiceConfiguration,
-)
-from agenta.sdk.contexts.running import RunningContext
-from agenta.sdk.workflows.utils import (
- retrieve_handler,
- retrieve_interface,
- retrieve_configuration,
-)
-from agenta.sdk.workflows.errors import InvalidInterfaceURIV0Error
-
-
-log = get_module_logger(__name__)
-
-
-async def resolve_interface(
- *,
- request: Optional[WorkflowServiceRequest] = None,
- interface: Optional[WorkflowServiceInterface] = None,
-) -> Optional[WorkflowServiceInterface]:
- """Resolve the workflow service interface from multiple sources.
-
- Checks for interface in this priority order:
- 1. Provided interface parameter
- 2. Interface from the request
- 3. Interface from the RunningContext
-
- Args:
- request: Optional workflow service request that may contain an interface
- interface: Optional interface to use directly
-
- Returns:
- The resolved WorkflowServiceInterface or None if not found
- """
- if interface is not None:
- return interface
-
- if request and request.interface:
- return request.interface
-
- ctx = RunningContext.get()
- return ctx.interface
-
-
-async def resolve_configuration(
- *,
- request: Optional[WorkflowServiceRequest] = None,
- configuration: Optional[WorkflowServiceConfiguration] = None,
-) -> Optional[WorkflowServiceConfiguration]:
- """Resolve workflow parameters from multiple sources.
-
- Checks for parameters in this priority order:
- 1. Provided parameters parameter
- 2. Parameters from request.data.parameters
- 3. Parameters from the RunningContext
-
- Args:
- request: Optional workflow service request that may contain parameters
- parameters: Optional parameters dict to use directly
-
- Returns:
- The resolved parameters dict or None if not found
- """
- if configuration is not None:
- return configuration
-
- if request and request.configuration:
- return request.configuration
-
- ctx = RunningContext.get()
- return ctx.configuration
-
-
-async def resolve_handler(
- *,
- uri: Optional[str] = None,
-):
- """Retrieve and validate a workflow handler by its URI.
-
- Looks up a registered handler function using the provided URI.
- Raises an exception if the URI is None or if no handler is found.
-
- Args:
- uri: The service URI identifying the handler to retrieve
-
- Returns:
- The resolved handler callable
-
- Raises:
- InvalidInterfaceURIV0Error: If uri is None or if no handler found for the URI
- """
- if uri is None:
- raise InvalidInterfaceURIV0Error(got="None")
-
- handler = retrieve_handler(uri)
-
- if handler is None:
- raise InvalidInterfaceURIV0Error(got=uri)
-
- return handler
-
-
-class ResolverMiddleware:
- """Middleware that resolves workflow components before execution.
-
- This middleware is responsible for resolving three critical components needed
- to execute a workflow:
-
- 1. **Interface**: The WorkflowServiceInterface containing the service URI and schemas
- 2. **Parameters**: Configuration parameters for the workflow
- 3. **Handler**: The actual callable function that implements the workflow logic
-
- The middleware resolves these components from various sources (request, context, registry)
- and stores them in the RunningContext for downstream middleware and the handler to use.
- It also ensures the request.data.parameters is populated for the workflow execution.
- """
-
- async def __call__(
- self,
- request: WorkflowServiceRequest,
- call_next: Callable[[WorkflowServiceRequest], Any],
- ):
- """Resolve workflow components and populate the running context.
-
- Args:
- request: The workflow service request being processed
- call_next: The next middleware or handler in the chain
-
- Returns:
- The result from calling the next middleware/handler in the chain
-
- Raises:
- InvalidInterfaceURIV0Error: If the handler cannot be resolved from the interface URI
- """
- interface = await resolve_interface(request=request)
- configuration = await resolve_configuration(request=request)
- handler = await resolve_handler(uri=(interface.uri if interface else None))
-
- ctx = RunningContext.get()
- ctx.interface = interface
- ctx.configuration = configuration
- ctx.handler = handler
-
- if not request.data:
- request.data = WorkflowServiceRequestData()
-
- request.data.parameters = (
- request.data.parameters or configuration.parameters
- if configuration
- else None
- )
-
- return await call_next(request)
diff --git a/sdk/agenta/sdk/middlewares/running/vault.py b/sdk/agenta/sdk/middlewares/running/vault.py
deleted file mode 100644
index f35a233e95..0000000000
--- a/sdk/agenta/sdk/middlewares/running/vault.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from os import getenv
-from json import dumps
-from typing import Callable, Dict, Optional, List, Any
-
-import httpx
-
-from agenta.sdk.utils.constants import TRUTHY
-from agenta.sdk.utils.cache import TTLLRUCache
-from agenta.sdk.utils.exceptions import suppress, display_exception
-
-from agenta.sdk.models.workflows import WorkflowServiceRequest
-from agenta.sdk.contexts.running import RunningContext
-
-from agenta.client.backend.types import SecretDto as SecretDTO
-from agenta.client.backend.types import (
- StandardProviderKind,
- StandardProviderDto as StandardProviderDTO,
- StandardProviderSettingsDto as StandardProviderSettingsDTO,
-)
-
-import agenta as ag
-
-
-_PROVIDER_KINDS = []
-
-for provider_kind in StandardProviderKind.__args__[0].__args__: # type: ignore
- _PROVIDER_KINDS.append(provider_kind)
-
-_CACHE_ENABLED = (
- getenv("AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED", "true").lower() in TRUTHY
-)
-
-_cache = TTLLRUCache()
-
-
-async def get_secrets(api_url, credentials) -> list:
- headers = None
- if credentials:
- headers = {"Authorization": credentials}
-
- _hash = dumps(
- {
- "headers": headers,
- },
- sort_keys=True,
- )
-
- if _CACHE_ENABLED:
- secrets_cache = _cache.get(_hash)
-
- if secrets_cache:
- secrets = secrets_cache.get("secrets")
-
- return secrets
-
- local_secrets: List[Dict[str, Any]] = []
-
- try:
- for provider_kind in _PROVIDER_KINDS:
- provider = provider_kind
- key_name = f"{provider.upper()}_API_KEY"
- key = getenv(key_name)
-
- if not key:
- continue
-
- secret = SecretDTO(
- kind="provider_key", # type: ignore
- data=StandardProviderDTO(
- kind=provider,
- provider=StandardProviderSettingsDTO(key=key),
- ),
- )
-
- local_secrets.append(secret.model_dump())
- except: # pylint: disable=bare-except
- display_exception("Vault: Local Secrets Exception")
-
- vault_secrets: List[Dict[str, Any]] = []
-
- try:
- async with httpx.AsyncClient() as client:
- response = await client.get(
- f"{api_url}/vault/v1/secrets",
- headers=headers,
- )
-
- if response.status_code != 200:
- vault_secrets = []
-
- else:
- vault_secrets = response.json()
- except: # pylint: disable=bare-except
- display_exception("Vault: Vault Secrets Exception")
-
- secrets = local_secrets + vault_secrets
-
- standard_secrets = {}
- custom_secrets = []
-
- if local_secrets:
- for secret in local_secrets:
- standard_secrets[secret["data"]["kind"]] = secret # type: ignore
-
- if vault_secrets:
- for secret in vault_secrets:
- if secret["kind"] == "provider_key": # type: ignore
- standard_secrets[secret["data"]["kind"]] = secret # type: ignore
- elif secret["kind"] == "custom_provider": # type: ignore
- custom_secrets.append(secret)
-
- standard_secrets = list(standard_secrets.values())
-
- secrets = standard_secrets + custom_secrets
-
- _cache.put(_hash, {"secrets": secrets})
-
- return secrets
-
-
-class VaultMiddleware:
- async def __call__(
- self,
- request: WorkflowServiceRequest,
- call_next: Callable[[WorkflowServiceRequest], Any],
- ):
- api_url = f"{ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host}/api"
-
- with suppress():
- ctx = RunningContext.get()
- credentials = ctx.credentials
-
- secrets = await get_secrets(api_url, credentials)
-
- ctx.secrets = secrets
-
- return await call_next(request)
diff --git a/sdk/agenta/sdk/models/__init__.py b/sdk/agenta/sdk/models/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/sdk/agenta/sdk/models/blobs.py b/sdk/agenta/sdk/models/blobs.py
deleted file mode 100644
index 188dbe2601..0000000000
--- a/sdk/agenta/sdk/models/blobs.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from typing import Optional
-from uuid import UUID
-
-
-from agenta.sdk.models.shared import (
- TraceID,
- SpanID,
- Link,
- Identifier,
- Slug,
- Version,
- Reference,
- Lifecycle,
- Header,
- Flags,
- Tags,
- Meta,
- Metadata,
- Data,
- Commit,
- AliasConfig,
- sync_alias,
-)
-
-
-class Blob(Identifier, Lifecycle):
- flags: Optional[Flags] = None # type: ignore
- tags: Optional[Tags] = None # type: ignore
- meta: Optional[Meta] = None # type: ignore
-
- data: Optional[Data] = None # type: ignore
-
- set_id: Optional[UUID] = None
diff --git a/sdk/agenta/sdk/models/evaluations.py b/sdk/agenta/sdk/models/evaluations.py
deleted file mode 100644
index 38c22cf6cd..0000000000
--- a/sdk/agenta/sdk/models/evaluations.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from typing import Dict, List, Optional, Union, Literal, Callable, Any
-from enum import Enum
-from uuid import UUID
-from datetime import datetime
-
-from pydantic import BaseModel
-
-from agenta.sdk.models.shared import (
- TraceID,
- SpanID,
- Link,
- Identifier,
- Slug,
- Version,
- Reference,
- Lifecycle,
- Header,
- Flags,
- Tags,
- Meta,
- Metadata,
- Data,
- Commit,
- AliasConfig,
- sync_alias,
-)
-
-
-# ------------------------------------------------------------------------------
-
-
-Origin = Literal["custom", "human", "auto"]
-# Target = Union[List[UUID], Dict[UUID, Origin], List[Callable]]
-Target = Union[
- List[List[Dict[str, Any]]], # testcases_data
- List[Callable], # workflow_handlers
- List[UUID], # entity_ids
- Dict[UUID, Origin], # entity_ids with origins
-]
-
-
-# oss.src.core.evaluations.types
-
-
-class EvaluationStatus(str, Enum):
- PENDING = "pending"
- QUEUED = "queued"
- RUNNING = "running"
- SUCCESS = "success"
- FAILURE = "failure"
- ERRORS = "errors"
- CANCELLED = "cancelled"
-
-
-class EvaluationRunFlags(BaseModel):
- is_closed: Optional[bool] = None # Indicates if the run is immutable
- is_live: Optional[bool] = None # Indicates if the run is updated periodically
- is_active: Optional[bool] = None # Indicates if the run is currently active
-
-
-class SimpleEvaluationFlags(EvaluationRunFlags):
- pass
-
-
-SimpleEvaluationStatus = EvaluationStatus
-
-
-class SimpleEvaluationData(BaseModel):
- status: Optional[SimpleEvaluationStatus] = None
-
- query_steps: Optional[Target] = None
- testset_steps: Optional[Target] = None
- application_steps: Optional[Target] = None
- evaluator_steps: Optional[Target] = None
-
- repeats: Optional[int] = None
-
-
-class EvaluationRun(BaseModel):
- id: UUID
-
-
-class EvaluationScenario(BaseModel):
- id: UUID
-
- run_id: UUID
-
-
-class EvaluationResult(BaseModel):
- id: UUID
-
- run_id: UUID
- scenario_id: UUID
- step_key: str
-
- testcase_id: Optional[UUID] = None
- trace_id: Optional[UUID] = None
- error: Optional[dict] = None
-
- flags: Optional[Dict[str, Any]] = None
- tags: Optional[Dict[str, Any]] = None
- meta: Optional[Dict[str, Any]] = None
-
-
-class EvaluationMetrics(Identifier, Lifecycle):
- flags: Optional[Dict[str, Any]] = None
- tags: Optional[Dict[str, Any]] = None
- meta: Optional[Dict[str, Any]] = None
-
- status: Optional[EvaluationStatus] = None
-
- timestamp: Optional[datetime] = None
- interval: Optional[int] = None
-
- data: Optional[Data] = None
-
- scenario_id: Optional[UUID] = None
-
- run_id: UUID
diff --git a/sdk/agenta/sdk/models/git.py b/sdk/agenta/sdk/models/git.py
deleted file mode 100644
index 57c2028038..0000000000
--- a/sdk/agenta/sdk/models/git.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from typing import List, Optional
-from uuid import UUID
-
-from pydantic import BaseModel, Field
-
-from agenta.sdk.models.shared import (
- TraceID,
- SpanID,
- Link,
- Identifier,
- Slug,
- Version,
- Reference,
- Lifecycle,
- Header,
- Flags,
- Tags,
- Meta,
- Metadata,
- Data,
- Commit,
- AliasConfig,
- sync_alias,
-)
-
-
-from typing import Optional, List
-from uuid import UUID
-
-from pydantic import BaseModel
-
-
-# artifacts --------------------------------------------------------------------
-
-
-class Artifact(Identifier, Slug, Lifecycle, Header, Metadata):
- pass
-
-
-class ArtifactCreate(Slug, Header, Metadata):
- pass
-
-
-class ArtifactEdit(Identifier, Header, Metadata):
- pass
-
-
-class ArtifactQuery(Metadata):
- pass
-
-
-# variants ---------------------------------------------------------------------
-
-
-class Variant(Identifier, Slug, Lifecycle, Header, Metadata):
- artifact_id: Optional[UUID] = None
-
-
-class VariantCreate(Slug, Header, Metadata):
- artifact_id: Optional[UUID] = None
-
-
-class VariantEdit(Identifier, Header, Metadata):
- pass
-
-
-class VariantQuery(Metadata):
- pass
-
-
-# revisions --------------------------------------------------------------------
-
-
-class Revision(Identifier, Slug, Version, Lifecycle, Header, Metadata, Commit):
- data: Optional[Data] = None
-
- artifact_id: Optional[UUID] = None
- variant_id: Optional[UUID] = None
-
-
-class RevisionCreate(Slug, Header, Metadata):
- artifact_id: Optional[UUID] = None
- variant_id: Optional[UUID] = None
-
-
-class RevisionEdit(Identifier, Header, Metadata):
- pass
-
-
-class RevisionQuery(Metadata):
- authors: Optional[List[UUID]] = None
-
-
-class RevisionCommit(Slug, Header, Metadata):
- data: Optional[Data] = None
-
- message: Optional[str] = None
-
- artifact_id: Optional[UUID] = None
- variant_id: Optional[UUID] = None
-
-
-class RevisionsLog(BaseModel):
- artifact_id: Optional[UUID] = None
- variant_id: Optional[UUID] = None
- revision_id: Optional[UUID] = None
-
- depth: Optional[int] = None
-
-
-# forks ------------------------------------------------------------------------
-
-
-class RevisionFork(Slug, Header, Metadata):
- data: Optional[Data] = None
-
- message: Optional[str] = None
-
-
-class VariantFork(Slug, Header, Metadata):
- pass
-
-
-class ArtifactFork(RevisionsLog):
- variant: Optional[VariantFork] = None
- revision: Optional[RevisionFork] = None
diff --git a/sdk/agenta/sdk/models/shared.py b/sdk/agenta/sdk/models/shared.py
deleted file mode 100644
index 4425e0fd50..0000000000
--- a/sdk/agenta/sdk/models/shared.py
+++ /dev/null
@@ -1,167 +0,0 @@
-from typing import Optional, Dict, List, Union, Literal
-from typing_extensions import TypeAliasType
-from datetime import datetime
-from uuid import UUID
-from re import match
-
-from pydantic import BaseModel, field_validator
-
-BoolJson = TypeAliasType( # type: ignore
- "BoolJson",
- Union[bool, Dict[str, "BoolJson"]], # type: ignore
-)
-
-StringJson = TypeAliasType( # type: ignore
- "StringJson",
- Union[str, Dict[str, "StringJson"]], # type: ignore
-)
-
-FullJson = TypeAliasType( # type: ignore
- "FullJson",
- Union[str, int, float, bool, None, Dict[str, "FullJson"], List["FullJson"]], # type: ignore
-)
-
-NumericJson = TypeAliasType( # type: ignore
- "NumericJson",
- Union[int, float, Dict[str, "NumericJson"]], # type: ignore
-)
-
-NoListJson = TypeAliasType( # type: ignore
- "NoListJson",
- Union[str, int, float, bool, None, Dict[str, "NoListJson"]], # type: ignore
-)
-
-LabelJson = TypeAliasType( # type: ignore
- "LabelJson",
- Union[bool, str, Dict[str, "LabelJson"]], # type: ignore
-)
-
-Json = Dict[str, FullJson] # type: ignore
-
-Data = Dict[str, FullJson] # type: ignore
-
-Flags = Dict[str, LabelJson] # type: ignore
-
-Tags = Dict[str, LabelJson] # type: ignore
-
-Meta = Dict[str, FullJson] # type: ignore
-
-Hashes = Dict[str, StringJson] # type: ignore
-
-Metrics = Dict[str, NumericJson] # type: ignore
-
-Schema = Dict[str, FullJson] # type: ignore
-
-Mappings = Dict[str, str]
-
-
-class Lifecycle(BaseModel):
- created_at: Optional[datetime] = None
- updated_at: Optional[datetime] = None
- deleted_at: Optional[datetime] = None
-
- created_by_id: Optional[UUID] = None
- updated_by_id: Optional[UUID] = None
- deleted_by_id: Optional[UUID] = None
-
-
-class TraceID(BaseModel):
- trace_id: Optional[str] = None
-
-
-class SpanID(BaseModel):
- span_id: Optional[str] = None
-
-
-class Link(TraceID, SpanID):
- pass
-
-
-class Identifier(BaseModel):
- id: Optional[UUID] = None
-
-
-class Slug(BaseModel):
- slug: Optional[str] = None
-
- @field_validator("slug")
- def check_url_safety(cls, v):
- if v is not None:
- if not match(r"^[a-zA-Z0-9_-]+$", v):
- raise ValueError("slug must be URL-safe.")
- return v
-
-
-class Version(BaseModel):
- version: Optional[str] = None
-
-
-class Reference(Identifier, Slug, Version):
- pass
-
-
-class Header(BaseModel):
- name: Optional[str] = None
- description: Optional[str] = None
-
-
-class Metadata(BaseModel):
- flags: Optional[Flags] = None
- tags: Optional[Tags] = None
- meta: Optional[Meta] = None
-
-
-class Commit(BaseModel):
- author: Optional[UUID] = None
- date: Optional[datetime] = None
- message: Optional[str] = None
-
-
-class Status(BaseModel):
- code: Optional[int] = 200
- message: Optional[str] = "Success"
-
-
-class AliasConfig(BaseModel):
- model_config = {
- "populate_by_name": True,
- "from_attributes": True,
- }
-
-
-def sync_alias(primary: str, alias: str, instance: BaseModel) -> None:
- primary_val = getattr(instance, primary)
- alias_val = getattr(instance, alias)
-
- if primary_val and alias_val is None:
- object.__setattr__(instance, alias, primary_val)
- elif alias_val and primary_val is None:
- object.__setattr__(instance, primary, alias_val)
-
-
-class Windowing(BaseModel):
- # RANGE
- newest: Optional[datetime] = None
- oldest: Optional[datetime] = None
- # TOKEN
- next: Optional[UUID] = None
- # LIMIT
- limit: Optional[int] = None
- # ORDER
- order: Optional[Literal["ascending", "descending"]] = None
- # BUCKETS
- interval: Optional[int] = None
- # SAMPLES
- rate: Optional[float] = None
-
- @field_validator("rate")
- def check_rate(cls, v):
- if v is not None and (v < 0.0 or v > 1.0):
- raise ValueError("Sampling rate must be between 0.0 and 1.0.")
- return v
-
- @field_validator("interval")
- def check_interval(cls, v):
- if v is not None and v <= 0:
- raise ValueError("Bucket interval must be a positive integer.")
- return v
diff --git a/sdk/agenta/sdk/models/testsets.py b/sdk/agenta/sdk/models/testsets.py
deleted file mode 100644
index 25b73f0224..0000000000
--- a/sdk/agenta/sdk/models/testsets.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from typing import List, Optional, Dict, Any
-from uuid import UUID
-
-from pydantic import BaseModel, Field
-
-from agenta.sdk.models.shared import (
- TraceID,
- SpanID,
- Link,
- Identifier,
- Slug,
- Version,
- Reference,
- Lifecycle,
- Header,
- Flags,
- Tags,
- Meta,
- Metadata,
- Data,
- Commit,
- AliasConfig,
- sync_alias,
-)
-
-from agenta.sdk.models.git import (
- Artifact,
- ArtifactCreate,
- ArtifactEdit,
- ArtifactQuery,
- ArtifactFork,
- Variant,
- VariantCreate,
- VariantEdit,
- VariantQuery,
- VariantFork,
- Revision,
- RevisionCreate,
- RevisionEdit,
- RevisionQuery,
- RevisionCommit,
- RevisionsLog,
- RevisionFork,
-)
-
-from agenta.sdk.models.blobs import (
- Blob,
-)
-
-
-class TestsetIdAlias(AliasConfig):
- testset_id: Optional[UUID] = None
- set_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="testset_id",
- )
-
-
-class TestsetVariantIdAlias(AliasConfig):
- testset_variant_id: Optional[UUID] = None
- variant_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="testset_variant_id",
- )
-
-
-class Testcase(Blob, TestsetIdAlias):
- def model_post_init(self, __context) -> None:
- sync_alias("testset_id", "set_id", self)
-
-
-class TestsetFlags(BaseModel):
- has_testcases: Optional[bool] = None
- has_traces: Optional[bool] = None
-
-
-class TestsetRevisionData(BaseModel):
- testcase_ids: Optional[List[UUID]] = None
- testcases: Optional[List[Testcase]] = None
-
-
-class SimpleTestset(
- Identifier,
- Slug,
- Lifecycle,
- Header,
-):
- flags: Optional[TestsetFlags] = None
- tags: Optional[Tags] = None # type: ignore
- meta: Optional[Meta] = None # type: ignore
-
- data: Optional[TestsetRevisionData] = None
-
-
-class Testset(Artifact):
- flags: Optional[TestsetFlags] = None # type: ignore
-
-
-class TestsetRevision(
- Revision,
- TestsetIdAlias,
- TestsetVariantIdAlias,
-):
- flags: Optional[TestsetFlags] = None # type: ignore
-
- data: Optional[TestsetRevisionData] = None # type: ignore
-
- def model_post_init(self, __context) -> None:
- sync_alias("testset_id", "artifact_id", self)
- sync_alias("testset_variant_id", "variant_id", self)
-
-
-class SimpleTestsetCreate(Slug, Header):
- tags: Optional[Tags] = None # type: ignore
- meta: Optional[Meta] = None # type: ignore
- data: Optional[TestsetRevisionData] = None
-
-
-class SimpleTestsetEdit(
- Identifier,
- Header,
-):
- # flags: Optional[TestsetFlags] = None
- tags: Optional[Tags] = None # type: ignore
- meta: Optional[Meta] = None # type: ignore
-
- data: Optional[TestsetRevisionData] = None
-
-
-class TestsetResponse(BaseModel):
- count: int = 0
- testset: Optional[Testset] = None
-
-
-class TestsetRevisionResponse(BaseModel):
- count: int = 0
- testset_revision: Optional[TestsetRevision] = None
-
-
-class SimpleTestsetResponse(BaseModel):
- count: int = 0
- testset: Optional[SimpleTestset] = None
-
-
-class TestsetsResponse(BaseModel):
- count: int = 0
- testsets: List[Testset] = []
-
-
-class SimpleTestsetsResponse(BaseModel):
- count: int = 0
- testsets: List[SimpleTestset] = []
-
-
-# LEGACY TESTSETS --------------------------------------------------------------
-
-
-class LegacyTestset(BaseModel):
- id: str
- name: Optional[str] = None
- csvdata: Optional[List[Dict[str, Any]]] = None
diff --git a/sdk/agenta/sdk/models/tracing.py b/sdk/agenta/sdk/models/tracing.py
deleted file mode 100644
index 6b13ad0e0a..0000000000
--- a/sdk/agenta/sdk/models/tracing.py
+++ /dev/null
@@ -1,202 +0,0 @@
-import random
-import string
-from enum import Enum
-from datetime import datetime, timezone
-from typing import List, Dict, Any, Union, Optional
-
-from pydantic import BaseModel, model_validator, Field
-
-
-from agenta.sdk.models.shared import (
- Json,
- Data,
- Flags,
- Tags,
- Meta,
- Metrics,
- Lifecycle,
- TraceID,
- SpanID,
- Identifier,
- Reference,
-)
-
-
-class TraceType(Enum):
- INVOCATION = "invocation"
- ANNOTATION = "annotation"
- UNKNOWN = "unknown"
-
-
-class SpanType(Enum):
- AGENT = "agent"
- CHAIN = "chain"
- WORKFLOW = "workflow"
- TASK = "task"
- TOOL = "tool"
- EMBEDDING = "embedding"
- QUERY = "query"
- LLM = "llm"
- COMPLETION = "completion"
- CHAT = "chat"
- RERANK = "rerank"
- UNKNOWN = "unknown"
-
-
-class AgMetricEntryAttributes(BaseModel):
- cumulative: Optional[Metrics] = None
- incremental: Optional[Metrics] = None
-
-
-class AgMetricsAttributes(BaseModel):
- duration: Optional[AgMetricEntryAttributes] = None
- errors: Optional[AgMetricEntryAttributes] = None
- tokens: Optional[AgMetricEntryAttributes] = None
- costs: Optional[AgMetricEntryAttributes] = None
-
-
-class AgTypeAttributes(BaseModel):
- trace: Optional[TraceType] = TraceType.INVOCATION
- span: Optional[SpanType] = SpanType.TASK
-
-
-class AgDataAttributes(BaseModel):
- inputs: Optional[Dict[str, Any]] = None
- outputs: Optional[Any] = None
- internals: Optional[Dict[str, Any]] = None
-
-
-class AgAttributes(BaseModel):
- type: AgTypeAttributes = Field(default_factory=AgTypeAttributes)
- data: AgDataAttributes = Field(default_factory=AgDataAttributes)
-
- metrics: Optional[AgMetricsAttributes] = None
- flags: Optional[Flags] = None
- tags: Optional[Tags] = None
- meta: Optional[Meta] = None
- exception: Optional[Data] = None
- references: Optional[Dict[str, "OTelReference"]] = None
- unsupported: Optional[Data] = None
-
-
-class OTelStatusCode(Enum):
- STATUS_CODE_UNSET = "STATUS_CODE_UNSET"
- STATUS_CODE_OK = "STATUS_CODE_OK"
- STATUS_CODE_ERROR = "STATUS_CODE_ERROR"
-
-
-class OTelSpanKind(Enum):
- SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED"
- SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL"
- SPAN_KIND_SERVER = "SPAN_KIND_SERVER"
- SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT"
- SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER"
- SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER"
-
-
-OTelAttributes = Json
-OTelMetrics = Metrics
-OTelTags = Tags
-
-
-class OTelEvent(BaseModel):
- name: str
- timestamp: Union[datetime, int]
-
- attributes: Optional[OTelAttributes] = None
-
-
-OTelEvents = List[OTelEvent]
-
-
-class OTelHash(Identifier):
- attributes: Optional[OTelAttributes] = None
-
-
-OTelHashes = List[OTelHash]
-
-
-class OTelLink(TraceID, SpanID):
- attributes: Optional[OTelAttributes] = None
-
-
-OTelLinks = List[OTelLink]
-
-
-class OTelReference(Reference):
- attributes: Optional[OTelAttributes] = None
-
-
-OTelReferences = List[OTelReference]
-
-
-class OTelSpansTree(BaseModel):
- spans: Optional["OTelNestedSpans"] = None
-
-
-OTelSpansTrees = List[OTelSpansTree]
-
-
-class OTelFlatSpan(Lifecycle):
- trace_id: str
- span_id: str
- parent_id: Optional[str] = None
-
- trace_type: Optional[TraceType] = None
- span_type: Optional[SpanType] = None
-
- span_kind: Optional[OTelSpanKind] = None
- span_name: Optional[str] = None
-
- start_time: Optional[Union[datetime, int]] = None
- end_time: Optional[Union[datetime, int]] = None
-
- status_code: Optional[OTelStatusCode] = None
- status_message: Optional[str] = None
-
- attributes: Optional[OTelAttributes] = None
- references: Optional[OTelReferences] = None
- links: Optional[OTelLinks] = None
- hashes: Optional[OTelHashes] = None
-
- exception: Optional[Data] = None
-
- events: Optional[OTelEvents] = None
-
- @model_validator(mode="after")
- def set_defaults(self):
- if self.trace_type is None:
- self.trace_type = TraceType.INVOCATION
- if self.span_type is None:
- self.span_type = SpanType.TASK
- if self.span_kind is None:
- self.span_kind = OTelSpanKind.SPAN_KIND_UNSPECIFIED
- if self.status_code is None:
- self.status_code = OTelStatusCode.STATUS_CODE_UNSET
- if self.end_time is None and self.start_time is not None:
- self.end_time = self.start_time
- if self.start_time is None and self.end_time is not None:
- self.start_time = self.end_time
- if self.start_time is None and self.end_time is None:
- now = datetime.now(timezone.utc)
- self.start_time = now
- self.end_time = now
- if self.span_name is None:
- self.span_name = "".join(
- random.choices(string.ascii_letters + string.digits, k=8)
- )
- return self
-
-
-class OTelSpan(OTelFlatSpan, OTelSpansTree):
- pass
-
-
-OTelFlatSpans = List[OTelFlatSpan]
-OTelNestedSpans = Dict[str, Union[OTelSpan, List[OTelSpan]]]
-OTelTraceTree = Dict[str, OTelSpansTree]
-OTelTraceTrees = List[OTelTraceTree]
-OTelSpans = List[OTelSpan]
-
-Attributes = OTelAttributes
-Trace = OTelTraceTree
diff --git a/sdk/agenta/sdk/models/workflows.py b/sdk/agenta/sdk/models/workflows.py
deleted file mode 100644
index 43f4350d7b..0000000000
--- a/sdk/agenta/sdk/models/workflows.py
+++ /dev/null
@@ -1,753 +0,0 @@
-# /agenta/sdk/models/running.py
-
-from typing import Any, Dict, Optional, Union, List
-from uuid import UUID
-from urllib.parse import urlparse
-
-from jsonschema.exceptions import SchemaError
-from jsonschema import (
- Draft202012Validator,
- Draft201909Validator,
- Draft7Validator,
- Draft4Validator,
- Draft6Validator,
-)
-from pydantic import (
- BaseModel,
- ConfigDict,
- model_validator,
- ValidationError,
- Field,
-)
-
-from agenta.sdk.models.shared import (
- TraceID,
- SpanID,
- Link,
- Identifier,
- Slug,
- Reference,
- Lifecycle,
- Header,
- Metadata,
- Data,
- Schema,
- Status,
- Commit,
- AliasConfig,
- sync_alias,
-)
-
-from agenta.sdk.models.git import (
- Artifact,
- ArtifactCreate,
- ArtifactEdit,
- ArtifactQuery,
- ArtifactFork,
- Variant,
- VariantCreate,
- VariantEdit,
- VariantQuery,
- VariantFork,
- Revision,
- RevisionCreate,
- RevisionEdit,
- RevisionQuery,
- RevisionCommit,
- RevisionsLog,
- RevisionFork,
-)
-
-
-# oss.src.core.workflows.dtos
-from typing import Optional, Dict, Any
-from uuid import UUID, uuid4
-from urllib.parse import urlparse
-
-from pydantic import (
- BaseModel,
- Field,
- model_validator,
- ValidationError,
-)
-
-from jsonschema import (
- Draft202012Validator,
- Draft201909Validator,
- Draft7Validator,
- Draft4Validator,
- Draft6Validator,
-)
-from jsonschema.exceptions import SchemaError
-
-
-class JsonSchemas(BaseModel):
- parameters: Optional[Schema] = None
- inputs: Optional[Schema] = None
- outputs: Optional[Schema] = None
-
-
-class WorkflowFlags(BaseModel):
- is_custom: bool = False
- is_evaluator: bool = False
- is_human: bool = False
-
-
-class WorkflowServiceInterface(BaseModel):
- version: str = "2025.07.14"
-
- uri: Optional[str] = None
- url: Optional[str] = None
- headers: Optional[Dict[str, Union[str, Reference]]] = None
- schemas: Optional[JsonSchemas] = None
-
- @model_validator(mode="after")
- def validate_jsonschemas_and_url(self) -> "WorkflowServiceInterface":
- errors = []
-
- if self.schemas:
- for key, schema in self.schemas.model_dump().items():
- try:
- if not schema:
- continue
-
- validator_class = self._get_validator_class_from_schema(schema)
- validator_class.check_schema(schema)
- except SchemaError as e:
- errors.append(
- {
- "loc": ("schemas", key),
- "msg": f"Invalid JSON Schema: {e.message}",
- "type": "value_error.jsonschema",
- "ctx": {"error": str(e)},
- "input": schema,
- }
- )
-
- if self.url:
- if not self._is_valid_http_url(self.url):
- errors.append(
- {
- "loc": ("url",),
- "msg": "Invalid HTTP(S) URL",
- "type": "value_error.url",
- "ctx": {"error": "Invalid URL format"},
- "input": self.url,
- }
- )
-
- if errors:
- raise ValidationError.from_exception_data(
- self.__class__.__name__,
- errors, # type: ignore
- )
-
- return self
-
- @staticmethod
- def _get_validator_class_from_schema(schema: Dict[str, Any]):
- """Detect JSON Schema draft from $schema or fallback to 2020-12."""
- schema_uri = schema.get(
- "$schema", "https://json-schema.org/draft/2020-12/schema"
- )
- if "2020-12" in schema_uri:
- return Draft202012Validator
- elif "2019-09" in schema_uri:
- return Draft201909Validator
- elif "draft-07" in schema_uri:
- return Draft7Validator
- elif "draft-06" in schema_uri:
- return Draft6Validator
- elif "draft-04" in schema_uri:
- return Draft4Validator
- else:
- return Draft202012Validator
-
- @staticmethod
- def _is_valid_http_url(url: str) -> bool:
- parsed = urlparse(url)
- return parsed.scheme in ("http", "https") and bool(parsed.netloc)
-
-
-class WorkflowServiceConfiguration(BaseModel):
- script: Optional[Data] = None
- parameters: Optional[Data] = None
-
-
-class WorkflowRevisionData(
- WorkflowServiceInterface,
- WorkflowServiceConfiguration,
-):
- pass
-
-
-class WorkflowServiceStatus(Status):
- type: Optional[str] = None
- stacktrace: Optional[Union[list[str], str]] = None
-
-
-class WorkflowServiceRequestData(BaseModel):
- revision: Optional[dict] = None
- parameters: Optional[dict] = None
- #
- testcase: Optional[dict] = None
- inputs: Optional[dict] = None
- #
- trace: Optional[dict] = None
- outputs: Optional[Any] = None
-
-
-class WorkflowServiceResponseData(BaseModel):
- outputs: Optional[Any] = None
-
-
-class WorkflowServiceBaseRequest(Metadata):
- version: str = "2025.07.14"
-
- interface: Optional[Union[WorkflowServiceInterface, Dict[str, Any]]] = None
- configuration: Optional[Union[WorkflowServiceConfiguration, Dict[str, Any]]] = None
-
- references: Optional[Dict[str, Union[Reference, Dict[str, Any]]]] = None
- links: Optional[Dict[str, Union[Link, Dict[str, Any]]]] = None
-
- secrets: Optional[Dict[str, Any]] = None
- credentials: Optional[str] = None
-
- @model_validator(mode="before")
- def _coerce_nested_models(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- """Convert dicts into their respective Pydantic models."""
- if "interface" in values and isinstance(values["interface"], dict):
- values["interface"] = WorkflowServiceInterface(**values["interface"])
-
- if "configuration" in values and isinstance(values["configuration"], dict):
- values["configuration"] = WorkflowServiceConfiguration(
- **values["configuration"]
- )
-
- if "references" in values and isinstance(values["references"], dict):
- values["references"] = {
- k: (Reference(**v) if isinstance(v, dict) else v)
- for k, v in values["references"].items()
- }
-
- if "links" in values and isinstance(values["links"], dict):
- values["links"] = {
- k: (Link(**v) if isinstance(v, dict) else v)
- for k, v in values["links"].items()
- }
-
- return values
-
-
-class WorkflowServiceRequest(WorkflowServiceBaseRequest):
- data: Optional[WorkflowServiceRequestData] = None
-
-
-class WorkflowServiceBaseResponse(TraceID, SpanID):
- version: str = "2025.07.14"
-
- status: Optional[WorkflowServiceStatus] = WorkflowServiceStatus()
-
-
-class WorkflowServiceBatchResponse(WorkflowServiceBaseResponse):
- data: Optional[WorkflowServiceResponseData] = None
-
-
-class WorkflowServiceStreamResponse(WorkflowServiceBaseResponse):
- generator: Any # Callable[[], AsyncGenerator[Any, None]]
-
- model_config = ConfigDict(arbitrary_types_allowed=True)
-
- async def iterator(self):
- async for item in self.generator():
- yield item
-
-
-WorkflowServiceResponse = Union[
- WorkflowServiceBatchResponse,
- WorkflowServiceStreamResponse,
-]
-
-
-# aliases ----------------------------------------------------------------------
-
-
-class WorkflowIdAlias(AliasConfig):
- workflow_id: Optional[UUID] = None
- artifact_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="workflow_id",
- )
-
-
-class WorkflowVariantIdAlias(AliasConfig):
- workflow_variant_id: Optional[UUID] = None
- variant_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="workflow_variant_id",
- )
-
-
-class WorkflowRevisionIdAlias(AliasConfig):
- workflow_revision_id: Optional[UUID] = None
- revision_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="workflow_revision_id",
- )
-
-
-# workflows --------------------------------------------------------------------
-
-
-class Workflow(Artifact):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowCreate(ArtifactCreate):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowEdit(ArtifactEdit):
- flags: Optional[WorkflowFlags] = None
-
-
-# workflow variants ------------------------------------------------------------
-
-
-class WorkflowVariant(
- Variant,
- WorkflowIdAlias,
-):
- flags: Optional[WorkflowFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
-
-
-class WorkflowVariantCreate(
- VariantCreate,
- WorkflowIdAlias,
-):
- flags: Optional[WorkflowFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
-
-
-class WorkflowVariantEdit(VariantEdit):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowVariantQuery(VariantQuery):
- flags: Optional[WorkflowFlags] = None
-
-
-# workflow revisions -----------------------------------------------------------
-
-from agenta.sdk.models.workflows import WorkflowRevisionData
-
-
-class WorkflowRevision(
- Revision,
- WorkflowIdAlias,
- WorkflowVariantIdAlias,
-):
- flags: Optional[WorkflowFlags] = None
-
- data: Optional[WorkflowRevisionData] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
- sync_alias("workflow_variant_id", "variant_id", self)
-
-
-class WorkflowRevisionCreate(
- RevisionCreate,
- WorkflowIdAlias,
- WorkflowVariantIdAlias,
-):
- flags: Optional[WorkflowFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
- sync_alias("workflow_variant_id", "variant_id", self)
-
-
-class WorkflowRevisionEdit(RevisionEdit):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowRevisionQuery(RevisionQuery):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowRevisionCommit(
- RevisionCommit,
- WorkflowIdAlias,
- WorkflowVariantIdAlias,
-):
- flags: Optional[WorkflowFlags] = None
-
- data: Optional[WorkflowRevisionData] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
- sync_alias("workflow_variant_id", "variant_id", self)
-
-
-class WorkflowRevisionsLog(
- RevisionsLog,
- WorkflowIdAlias,
- WorkflowVariantIdAlias,
- WorkflowRevisionIdAlias,
-):
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
- sync_alias("workflow_variant_id", "variant_id", self)
- sync_alias("workflow_revision_id", "revision_id", self)
-
-
-# forks ------------------------------------------------------------------------
-
-
-class WorkflowRevisionFork(RevisionFork):
- flags: Optional[WorkflowFlags] = None
-
- data: Optional[WorkflowRevisionData] = None
-
-
-class WorkflowRevisionForkAlias(AliasConfig):
- workflow_revision: Optional[WorkflowRevisionFork] = None
-
- revision: Optional[RevisionFork] = Field(
- default=None,
- exclude=True,
- alias="workflow_revision",
- )
-
-
-class WorkflowVariantFork(VariantFork):
- flags: Optional[WorkflowFlags] = None
-
-
-class WorkflowVariantForkAlias(AliasConfig):
- workflow_variant: Optional[WorkflowVariantFork] = None
-
- variant: Optional[VariantFork] = Field(
- default=None,
- exclude=True,
- alias="workflow_variant",
- )
-
-
-class WorkflowFork(
- ArtifactFork,
- WorkflowIdAlias,
- WorkflowVariantIdAlias,
- WorkflowVariantForkAlias,
- WorkflowRevisionIdAlias,
- WorkflowRevisionForkAlias,
-):
- def model_post_init(self, __context) -> None:
- sync_alias("workflow_id", "artifact_id", self)
- sync_alias("workflow_variant_id", "variant_id", self)
- sync_alias("workflow_variant", "variant", self)
- sync_alias("workflow_revision_id", "revision_id", self)
- sync_alias("workflow_revision", "revision", self)
-
-
-# ------------------------------------------------------------------------------
-
-
-class EvaluatorRevision(BaseModel):
- id: Optional[UUID] = None
- slug: Optional[str] = None
- version: Optional[str] = None
-
- data: Optional[WorkflowRevisionData] = None
-
- evaluator_id: Optional[UUID] = None
- evaluator_variant_id: Optional[UUID] = None
-
-
-class ApplicationServiceRequest(WorkflowServiceRequest):
- pass
-
-
-class ApplicationServiceBatchResponse(WorkflowServiceBatchResponse):
- pass
-
-
-class EvaluatorServiceRequest(WorkflowServiceRequest):
- pass
-
-
-class EvaluatorServiceBatchResponse(WorkflowServiceBatchResponse):
- pass
-
-
-# oss.src.core.evaluators.dtos
-
-
-class EvaluatorIdAlias(AliasConfig):
- evaluator_id: Optional[UUID] = None
- workflow_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="evaluator_id",
- )
-
-
-class EvaluatorVariantIdAlias(AliasConfig):
- evaluator_variant_id: Optional[UUID] = None
- workflow_variant_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="evaluator_variant_id",
- )
-
-
-class EvaluatorRevisionData(WorkflowRevisionData):
- pass
-
-
-class EvaluatorFlags(WorkflowFlags):
- def __init__(self, **data):
- data["is_evaluator"] = True
-
- super().__init__(**data)
-
-
-class SimpleEvaluatorFlags(EvaluatorFlags):
- pass
-
-
-class SimpleEvaluatorData(EvaluatorRevisionData):
- pass
-
-
-class Evaluator(Workflow):
- flags: Optional[EvaluatorFlags] = None
-
-
-class SimpleEvaluatorRevision(
- WorkflowRevision,
- EvaluatorIdAlias,
- EvaluatorVariantIdAlias,
-):
- flags: Optional[EvaluatorFlags] = None
-
- data: Optional[EvaluatorRevisionData] = None
-
-
-class SimpleEvaluator(Identifier, Slug, Lifecycle, Header, Metadata):
- flags: Optional[SimpleEvaluatorFlags] = None
-
- data: Optional[SimpleEvaluatorData] = None
-
-
-class SimpleEvaluatorCreate(Slug, Header, Metadata):
- flags: Optional[SimpleEvaluatorFlags] = None
-
- data: Optional[SimpleEvaluatorData] = None
-
-
-class SimpleEvaluatorEdit(Identifier, Header, Metadata):
- flags: Optional[SimpleEvaluatorFlags] = None
-
- data: Optional[SimpleEvaluatorData] = None
-
-
-class SimpleEvaluatorResponse(BaseModel):
- count: int = 0
- evaluator: Optional[SimpleEvaluator] = None
-
-
-class EvaluatorRevisionResponse(BaseModel):
- count: int = 0
- evaluator_revision: Optional[EvaluatorRevision] = None
-
-
-# oss.src.core.applications.dtos
-
-# aliases ----------------------------------------------------------------------
-
-
-class ApplicationIdAlias(AliasConfig):
- application_id: Optional[UUID] = None
- workflow_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="application_id",
- )
-
-
-class ApplicationVariantIdAlias(AliasConfig):
- application_variant_id: Optional[UUID] = None
- workflow_variant_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="application_variant_id",
- )
-
-
-class ApplicationRevisionIdAlias(AliasConfig):
- application_revision_id: Optional[UUID] = None
- workflow_revision_id: Optional[UUID] = Field(
- default=None,
- exclude=True,
- alias="application_revision_id",
- )
-
-
-# globals ----------------------------------------------------------------------
-
-
-class ApplicationFlags(WorkflowFlags):
- def __init__(self, **data):
- data["is_evaluator"] = False
-
- super().__init__(**data)
-
-
-# applications -------------------------------------------------------------------
-
-
-class Application(Workflow):
- flags: Optional[ApplicationFlags] = None
-
-
-class ApplicationCreate(WorkflowCreate):
- flags: Optional[ApplicationFlags] = None
-
-
-class ApplicationEdit(WorkflowEdit):
- flags: Optional[ApplicationFlags] = None
-
-
-# application variants -----------------------------------------------------------
-
-
-class ApplicationVariant(
- WorkflowVariant,
- ApplicationIdAlias,
-):
- flags: Optional[ApplicationFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("application_id", "workflow_id", self)
-
-
-class ApplicationVariantCreate(
- WorkflowVariantCreate,
- ApplicationIdAlias,
-):
- flags: Optional[ApplicationFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("application_id", "workflow_id", self)
-
-
-class ApplicationVariantEdit(WorkflowVariantEdit):
- flags: Optional[ApplicationFlags] = None
-
-
-# application revisions -----------------------------------------------------
-
-
-class ApplicationRevisionData(WorkflowRevisionData):
- pass
-
-
-class ApplicationRevision(
- WorkflowRevision,
- ApplicationIdAlias,
- ApplicationVariantIdAlias,
-):
- flags: Optional[ApplicationFlags] = None
-
- data: Optional[ApplicationRevisionData] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("application_id", "workflow_id", self)
- sync_alias("application_variant_id", "workflow_variant_id", self)
-
-
-class ApplicationRevisionCreate(
- WorkflowRevisionCreate,
- ApplicationIdAlias,
- ApplicationVariantIdAlias,
-):
- flags: Optional[ApplicationFlags] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("application_id", "workflow_id", self)
- sync_alias("application_variant_id", "workflow_variant_id", self)
-
-
-class ApplicationRevisionEdit(WorkflowRevisionEdit):
- flags: Optional[ApplicationFlags] = None
-
-
-class ApplicationRevisionCommit(
- WorkflowRevisionCommit,
- ApplicationIdAlias,
- ApplicationVariantIdAlias,
-):
- flags: Optional[ApplicationFlags] = None
-
- data: Optional[ApplicationRevisionData] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("application_id", "workflow_id", self)
- sync_alias("application_variant_id", "workflow_variant_id", self)
-
-
-class ApplicationRevisionResponse(BaseModel):
- count: int = 0
- application_revision: Optional[ApplicationRevision] = None
-
-
-class ApplicationRevisionsResponse(BaseModel):
- count: int = 0
- application_revisions: List[ApplicationRevision] = []
-
-
-# simple applications ------------------------------------------------------------
-
-
-class LegacyApplicationFlags(WorkflowFlags):
- pass
-
-
-class LegacyApplicationData(WorkflowRevisionData):
- pass
-
-
-class LegacyApplication(Identifier, Slug, Lifecycle, Header, Metadata):
- flags: Optional[LegacyApplicationFlags] = None
-
- data: Optional[LegacyApplicationData] = None
-
-
-class LegacyApplicationCreate(Slug, Header, Metadata):
- flags: Optional[LegacyApplicationFlags] = None
-
- data: Optional[LegacyApplicationData] = None
-
-
-class LegacyApplicationEdit(Identifier, Header, Metadata):
- flags: Optional[LegacyApplicationFlags] = None
-
- data: Optional[LegacyApplicationData] = None
-
-
-class LegacyApplicationResponse(BaseModel):
- count: int = 0
- application: Optional[LegacyApplication] = None
-
-
-# end of oss.src.core.applications.dtos
diff --git a/sdk/agenta/sdk/tracing/exporters.py b/sdk/agenta/sdk/tracing/exporters.py
index a121bd857a..5618b10178 100644
--- a/sdk/agenta/sdk/tracing/exporters.py
+++ b/sdk/agenta/sdk/tracing/exporters.py
@@ -1,7 +1,4 @@
-from typing import Sequence, Dict, List, Optional, Any
-from threading import Thread
-from os import environ
-from uuid import UUID
+from typing import Sequence, Dict, List, Optional
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.export import (
@@ -11,21 +8,18 @@
ReadableSpan,
)
-from agenta.sdk.utils.constants import TRUTHY
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.utils.exceptions import suppress
from agenta.sdk.utils.cache import TTLLRUCache
-from agenta.sdk.contexts.tracing import (
- otlp_context_manager,
- otlp_context,
- OTLPContext,
+from agenta.sdk.context.tracing import (
+ tracing_exporter_context_manager,
+ tracing_exporter_context,
+ TracingExporterContext,
)
log = get_module_logger(__name__)
-_ASYNC_EXPORT = environ.get("AGENTA_OTLP_ASYNC_EXPORT", "true").lower() in TRUTHY
-
class InlineTraceExporter(SpanExporter):
def __init__(
@@ -51,8 +45,6 @@ def export(
self._registry[trace_id].append(span)
- return
-
def shutdown(self) -> None:
self._shutdown = True
@@ -92,38 +84,28 @@ def __init__(
self.credentials = credentials
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
- grouped_spans: Dict[Optional[str], List[ReadableSpan]] = dict()
+ grouped_spans: Dict[str, List[str]] = {}
for span in spans:
trace_id = span.get_span_context().trace_id
credentials = None
if self.credentials:
- credentials = str(self.credentials.get(trace_id))
+ credentials = self.credentials.get(trace_id)
if credentials not in grouped_spans:
- grouped_spans[credentials] = list()
+ grouped_spans[credentials] = []
grouped_spans[credentials].append(span)
serialized_spans = []
for credentials, _spans in grouped_spans.items():
- with otlp_context_manager(
- context=OTLPContext(
+ with tracing_exporter_context_manager(
+ context=TracingExporterContext(
credentials=credentials,
)
):
- for _span in _spans:
- trace_id = _span.get_span_context().trace_id
- span_id = _span.get_span_context().span_id
-
- # log.debug(
- # "[SPAN] [EXPORT]",
- # trace_id=UUID(int=trace_id).hex,
- # span_id=UUID(int=span_id).hex[-16:],
- # )
-
serialized_spans.append(super().export(_spans))
if all(serialized_spans):
@@ -132,48 +114,16 @@ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
return SpanExportResult.FAILURE
def _export(self, serialized_data: bytes, timeout_sec: Optional[float] = None):
- try:
- credentials = otlp_context.get().credentials
-
- if credentials:
- self._session.headers.update({"Authorization": credentials})
-
- def __export():
- with suppress():
- resp = None
- if timeout_sec is not None:
- resp = super(OTLPExporter, self)._export(
- serialized_data,
- timeout_sec,
- )
- else:
- resp = super(OTLPExporter, self)._export(
- serialized_data,
- )
-
- # log.debug(
- # "[SPAN] [_EXPORT]",
- # data=serialized_data,
- # resp=resp,
- # )
-
- if _ASYNC_EXPORT is True:
- # log.debug("[SPAN] [ASYNC.X]")
- thread = Thread(target=__export, daemon=True)
- thread.start()
- else:
- # log.debug("[SPAN] [ SYNC.X]")
- return __export()
-
- except Exception as e:
- log.error(f"Export failed with error: {e}", exc_info=True)
-
- finally:
+ credentials = tracing_exporter_context.get().credentials
- class Response:
- ok = True
+ if credentials:
+ self._session.headers.update({"Authorization": credentials})
- return Response()
+ with suppress():
+ if timeout_sec is not None:
+ return super()._export(serialized_data, timeout_sec)
+ else:
+ return super()._export(serialized_data)
ConsoleExporter = ConsoleSpanExporter
diff --git a/sdk/agenta/sdk/tracing/processors.py b/sdk/agenta/sdk/tracing/processors.py
index 88c2f40a12..165ddcb638 100644
--- a/sdk/agenta/sdk/tracing/processors.py
+++ b/sdk/agenta/sdk/tracing/processors.py
@@ -1,7 +1,5 @@
from typing import Optional, Dict, List
from threading import Lock
-from json import dumps
-from uuid import UUID
from opentelemetry.baggage import get_all as get_baggage
from opentelemetry.context import Context
@@ -11,13 +9,10 @@
ReadableSpan,
BatchSpanProcessor,
)
-from opentelemetry.trace import SpanContext
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.tracing.conventions import Reference
-from agenta.sdk.contexts.tracing import TracingContext
-
log = get_module_logger(__name__)
@@ -55,15 +50,6 @@ def on_start(
span: Span,
parent_context: Optional[Context] = None,
) -> None:
- trace_id = span.context.trace_id
- span_id = span.context.span_id
-
- # log.debug(
- # "[SPAN] [START] ",
- # trace_id=UUID(int=trace_id).hex,
- # span_id=UUID(int=span_id).hex[-16:],
- # )
-
for key in self.references.keys():
span.set_attribute(f"ag.refs.{key}", self.references[key])
@@ -75,83 +61,6 @@ def on_start(
if _key in [_.value for _ in Reference.__members__.values()]:
span.set_attribute(key, baggage[key])
- context = TracingContext.get()
-
- trace_type = span.attributes.get("trace_type") if span.attributes else None
-
- context.annotate = (
- context.annotate
- or (context.type == "annotation")
- or (trace_type == "annotation")
- )
- context.type = (
- (str(trace_type) if trace_type else None)
- or context.type
- or ("annotation" if context.annotate else "invocation")
- )
-
- span.set_attribute("ag.type.tree", context.type)
-
- if context.flags:
- for key in context.flags.keys():
- span.set_attribute(f"ag.flags.{key}", context.flags[key])
- # if context.tags:
- # for key in context.tags.keys():
- # span.set_attribute(f"ag.tags.{key}", context.tags[key])
- # if context.meta:
- # span.set_attribute(f"ag.meta.", dumps(context.meta))
-
- # --- DISTRIBUTED
- if not self.inline:
- if context.links:
- for key, link in context.links.items():
- try:
- link = link.model_dump(mode="json", exclude_none=True)
- except: # pylint: disable=bare-except
- pass
- if not isinstance(link, dict):
- continue
- if not link.get("trace_id") or not link.get("span_id"):
- continue
-
- span.add_link(
- context=SpanContext(
- trace_id=int(str(link.get("trace_id")), 16),
- span_id=int(str(link.get("span_id")), 16),
- is_remote=True,
- ),
- attributes=dict(
- key=str(key),
- ),
- )
-
- if context.references:
- for key, ref in context.references.items():
- try:
- ref = ref.model_dump(mode="json", exclude_none=True)
- except: # pylint: disable=bare-except
- pass
- if not isinstance(ref, dict):
- continue
- if not ref.get("id") and not ref.get("slug") and not ref.get("version"):
- continue
-
- if ref.get("id"):
- span.set_attribute(
- f"ag.refs.{key}.id",
- str(ref.get("id")),
- )
- if ref.get("slug"):
- span.set_attribute(
- f"ag.refs.{key}.slug",
- str(ref.get("slug")),
- )
- if ref.get("version"):
- span.set_attribute(
- f"ag.refs.{key}.version",
- str(ref.get("version")),
- )
-
trace_id = span.context.trace_id
span_id = span.context.span_id
@@ -165,12 +74,6 @@ def on_end(
trace_id = span.context.trace_id
span_id = span.context.span_id
- # log.debug(
- # "[SPAN] [END] ",
- # trace_id=UUID(int=trace_id).hex,
- # span_id=UUID(int=span_id).hex[-16:],
- # )
-
self._spans.setdefault(trace_id, []).append(span)
self._registry.setdefault(trace_id, {})
self._registry[trace_id].pop(span_id, None)
diff --git a/sdk/agenta/sdk/tracing/propagation.py b/sdk/agenta/sdk/tracing/propagation.py
index 8327c27ef1..32da48b322 100644
--- a/sdk/agenta/sdk/tracing/propagation.py
+++ b/sdk/agenta/sdk/tracing/propagation.py
@@ -6,8 +6,6 @@
from opentelemetry.baggage import set_baggage
from opentelemetry.context import get_current
-from agenta.sdk.contexts.tracing import TracingContext
-
import agenta as ag
@@ -74,7 +72,7 @@ def inject(
_context = get_current()
- ctx = TracingContext.get()
+ ctx = ag.sdk.context.tracing.tracing_context.get()
# --- Inject traceparent --- #
try:
diff --git a/sdk/agenta/sdk/tracing/spans.py b/sdk/agenta/sdk/tracing/spans.py
index 5145d6cbbf..eaee49b0f8 100644
--- a/sdk/agenta/sdk/tracing/spans.py
+++ b/sdk/agenta/sdk/tracing/spans.py
@@ -35,10 +35,6 @@ def __init__(
## --- PROXY METHODS --- ##
- @property
- def name(self) -> str:
- return self._span.name
-
def get_span_context(self):
return self._span.get_span_context()
diff --git a/sdk/agenta/sdk/types.py b/sdk/agenta/sdk/types.py
index da45b2c9b8..4d999d8274 100644
--- a/sdk/agenta/sdk/types.py
+++ b/sdk/agenta/sdk/types.py
@@ -71,15 +71,15 @@ def __init__(
):
headers = dict(extra_headers or {})
if version is not None:
- headers["x-ag-version"] = version
+ headers["X-ag-version"] = version
if content_type:
- headers["x-ag-content-type"] = content_type
+ headers["X-ag-content-type"] = content_type
if tree_id:
- headers["x-ag-tree-id"] = tree_id
+ headers["X-ag-tree-id"] = tree_id
if trace_id:
- headers["x-ag-trace-id"] = trace_id
+ headers["X-ag-trace-id"] = trace_id
if span_id:
- headers["x-ag-span-id"] = span_id
+ headers["X-ag-span-id"] = span_id
super().__init__(
content=content,
@@ -387,7 +387,7 @@ class ModelConfig(BaseModel):
"""Configuration for model parameters"""
model: str = MCField(
- default="gpt-4o-mini",
+ default="gpt-3.5-turbo",
choices=supported_llm_models,
)
@@ -462,154 +462,6 @@ def __init__(self, message: str, original_error: Optional[Exception] = None):
super().__init__(message)
-import json
-import re
-from typing import Any, Dict, Iterable, Tuple, Optional
-
-# --- Optional dependency: python-jsonpath (provides JSONPath + JSON Pointer) ---
-try:
- import jsonpath # ✅ use module API
- from jsonpath import JSONPointer # pointer class is fine to use
-except Exception:
- jsonpath = None
- JSONPointer = None
-
-# ========= Scheme detection =========
-
-
-def detect_scheme(expr: str) -> str:
- """Return 'json-path', 'json-pointer', or 'dot-notation' based on the placeholder prefix."""
- if expr.startswith("$"):
- return "json-path"
- if expr.startswith("/"):
- return "json-pointer"
- return "dot-notation"
-
-
-# ========= Resolvers =========
-
-
-def resolve_dot_notation(expr: str, data: dict) -> object:
- if "[" in expr or "]" in expr:
- raise KeyError(f"Bracket syntax is not supported in dot-notation: {expr!r}")
-
- # First, check if the expression exists as a literal key (e.g., "topic.story" as a single key)
- # This allows users to use dots in their variable names without nested access
- if expr in data:
- return data[expr]
-
- # If not found as a literal key, try to parse as dot-notation path
- cur = data
- for token in (p for p in expr.split(".") if p):
- if isinstance(cur, list) and token.isdigit():
- cur = cur[int(token)]
- else:
- if not isinstance(cur, dict):
- raise KeyError(
- f"Cannot access key {token!r} on non-dict while resolving {expr!r}"
- )
- if token not in cur:
- raise KeyError(f"Missing key {token!r} while resolving {expr!r}")
- cur = cur[token]
- return cur
-
-
-def resolve_json_path(expr: str, data: dict) -> object:
- if jsonpath is None:
- raise ImportError("python-jsonpath is required for json-path ($...)")
-
- if not (expr == "$" or expr.startswith("$.") or expr.startswith("$[")):
- raise ValueError(
- f"Invalid json-path expression {expr!r}. "
- "Must start with '$', '$.' or '$[' (no implicit normalization)."
- )
-
- # Use package-level APIf
- results = jsonpath.findall(expr, data) # always returns a list
- return results[0] if len(results) == 1 else results
-
-
-def resolve_json_pointer(expr: str, data: Dict[str, Any]) -> Any:
- """Resolve a JSON Pointer; returns a single value."""
- if JSONPointer is None:
- raise ImportError("python-jsonpath is required for json-pointer (/...)")
- return JSONPointer(expr).resolve(data)
-
-
-def resolve_any(expr: str, data: Dict[str, Any]) -> Any:
- """Dispatch to the right resolver based on detected scheme."""
- scheme = detect_scheme(expr)
- if scheme == "json-path":
- return resolve_json_path(expr, data)
- if scheme == "json-pointer":
- return resolve_json_pointer(expr, data)
- return resolve_dot_notation(expr, data)
-
-
-# ========= Placeholder & coercion helpers =========
-
-_PLACEHOLDER_RE = re.compile(r"\{\{\s*(.*?)\s*\}\}")
-
-
-def extract_placeholders(template: str) -> Iterable[str]:
- """Yield the inner text of all {{ ... }} occurrences (trimmed)."""
- for m in _PLACEHOLDER_RE.finditer(template):
- yield m.group(1).strip()
-
-
-def coerce_to_str(value: Any) -> str:
- """Pretty stringify values for embedding into templates."""
- if isinstance(value, (dict, list)):
- return json.dumps(value, ensure_ascii=False)
- return str(value)
-
-
-def build_replacements(
- placeholders: Iterable[str], data: Dict[str, Any]
-) -> Tuple[Dict[str, str], set]:
- """
- Resolve all placeholders against data.
- Returns (replacements, unresolved_placeholders).
- """
- replacements: Dict[str, str] = {}
- unresolved: set = set()
- for expr in set(placeholders):
- try:
- val = resolve_any(expr, data)
- # Escape backslashes to avoid regex replacement surprises
- replacements[expr] = coerce_to_str(val).replace("\\", "\\\\")
- except Exception:
- unresolved.add(expr)
- return replacements, unresolved
-
-
-def apply_replacements(template: str, replacements: Dict[str, str]) -> str:
- """Replace {{ expr }} using a callback to avoid regex-injection issues."""
-
- def _repl(m: re.Match) -> str:
- expr = m.group(1).strip()
- return replacements.get(expr, m.group(0))
-
- return _PLACEHOLDER_RE.sub(_repl, template)
-
-
-def compute_truly_unreplaced(original: set, rendered: str) -> set:
- """Only count placeholders that were in the original template and remain."""
- now = set(extract_placeholders(rendered))
- return original & now
-
-
-def missing_lib_hints(unreplaced: set) -> Optional[str]:
- """Suggest installing python-jsonpath if placeholders indicate json-path or json-pointer usage."""
- if any(expr.startswith("$") or expr.startswith("/") for expr in unreplaced) and (
- jsonpath is None or JSONPointer is None
- ):
- return (
- "Install python-jsonpath to enable json-path ($...) and json-pointer (/...)"
- )
- return None
-
-
class PromptTemplate(BaseModel):
"""A template for generating prompts with formatting capabilities"""
@@ -656,7 +508,6 @@ def _format_with_template(self, content: str, kwargs: Dict[str, Any]) -> str:
try:
if self.template_format == "fstring":
return content.format(**kwargs)
-
elif self.template_format == "jinja2":
from jinja2 import Template, TemplateError
@@ -667,33 +518,36 @@ def _format_with_template(self, content: str, kwargs: Dict[str, Any]) -> str:
f"Jinja2 template error in content: '{content}'. Error: {str(e)}",
original_error=e,
)
-
elif self.template_format == "curly":
- original_placeholders = set(extract_placeholders(content))
+ import re
- replacements, _unresolved = build_replacements(
- original_placeholders, kwargs
- )
+ # Find all variables in the template
+ template_variables = set(re.findall(r"\{\{(.*?)\}\}", content))
- result = apply_replacements(content, replacements)
+ # Check which template variables are missing from inputs
+ # Validate BEFORE replacement to avoid false positives
+ provided_variables = set(kwargs.keys())
+ missing_variables = template_variables - provided_variables
- truly_unreplaced = compute_truly_unreplaced(
- original_placeholders, result
- )
- if truly_unreplaced:
- hint = missing_lib_hints(truly_unreplaced)
- suffix = f" Hint: {hint}" if hint else ""
+ if missing_variables:
raise TemplateFormatError(
- f"Unreplaced variables in curly template: {sorted(truly_unreplaced)}.{suffix}"
+ f"Unreplaced variables in curly template: {sorted(missing_variables)}"
)
- return result
+ # Replace all variables in a single pass
+ # This prevents cascading replacements and handles self-referential values
+ def replace_var(match):
+ var_name = match.group(1)
+ if var_name in kwargs:
+ return str(kwargs[var_name])
+ return match.group(0)
+ result = re.sub(r"\{\{(.*?)\}\}", replace_var, content)
+ return result
else:
raise TemplateFormatError(
f"Unknown template format: {self.template_format}"
)
-
except KeyError as e:
key = str(e).strip("'")
raise TemplateFormatError(
@@ -701,8 +555,7 @@ def _format_with_template(self, content: str, kwargs: Dict[str, Any]) -> str:
)
except Exception as e:
raise TemplateFormatError(
- f"Error formatting template '{content}': {str(e)}",
- original_error=e,
+ f"Error formatting template '{content}': {str(e)}", original_error=e
)
def _substitute_variables(self, obj: Any, kwargs: Dict[str, Any]) -> Any:
@@ -777,7 +630,7 @@ def format(self, **kwargs) -> "PromptTemplate":
)
)
- new_llm_config = self.llm_config.model_copy(deep=True)
+ new_llm_config = self.llm_config.copy(deep=True)
if new_llm_config.response_format is not None:
rf_dict = new_llm_config.response_format.model_dump(by_alias=True)
substituted = self._substitute_variables(rf_dict, kwargs)
diff --git a/sdk/agenta/sdk/utils/client.py b/sdk/agenta/sdk/utils/client.py
deleted file mode 100644
index dba0cb6309..0000000000
--- a/sdk/agenta/sdk/utils/client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import requests
-
-BASE_TIMEOUT = 10
-
-from agenta.sdk.utils.logging import get_module_logger
-
-import agenta as ag
-
-log = get_module_logger(__name__)
-
-
-def authed_api():
- """
- Preconfigured requests for authenticated endpoints (supports all methods).
- """
-
- api_url = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_url
- api_key = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key
-
- if not api_url or not api_key:
- log.error("Please call ag.init() first.")
- log.error("And don't forget to set AGENTA_API_URL and AGENTA_API_KEY.")
- raise ValueError("API URL and API Key must be set.")
-
- def _request(method: str, endpoint: str, **kwargs):
- url = f"{api_url}{endpoint}"
- headers = kwargs.pop("headers", {})
- headers.setdefault("Authorization", f"ApiKey {api_key}")
-
- return requests.request(
- method=method,
- url=url,
- headers=headers,
- timeout=BASE_TIMEOUT,
- **kwargs,
- )
-
- return _request
diff --git a/sdk/agenta/sdk/utils/logging.py b/sdk/agenta/sdk/utils/logging.py
index cc4789b93c..1091ceefd0 100644
--- a/sdk/agenta/sdk/utils/logging.py
+++ b/sdk/agenta/sdk/utils/logging.py
@@ -8,6 +8,15 @@
import structlog
from structlog.typing import EventDict, WrappedLogger, Processor
+# from datetime import datetime
+# from logging.handlers import RotatingFileHandler
+
+# from opentelemetry.trace import get_current_span
+# from opentelemetry._logs import set_logger_provider
+# from opentelemetry.sdk._logs import LoggingHandler, LoggerProvider
+# from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
+# from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
+
TRACE_LEVEL = 1
logging.TRACE = TRACE_LEVEL
logging.addLevelName(TRACE_LEVEL, "TRACE")
@@ -31,6 +40,15 @@ def bound_logger_trace(self, *args, **kwargs):
AGENTA_LOG_CONSOLE_ENABLED = os.getenv("AGENTA_LOG_CONSOLE_ENABLED", "true") == "true"
AGENTA_LOG_CONSOLE_LEVEL = os.getenv("AGENTA_LOG_CONSOLE_LEVEL", "TRACE").upper()
+# AGENTA_LOG_OTLP_ENABLED = os.getenv("AGENTA_LOG_OTLP_ENABLED", "false") == "true"
+# AGENTA_LOG_OTLP_LEVEL = os.getenv("AGENTA_LOG_OTLP_LEVEL", "INFO").upper()
+
+# AGENTA_LOG_FILE_ENABLED = os.getenv("AGENTA_LOG_FILE_ENABLED", "true") == "true"
+# AGENTA_LOG_FILE_LEVEL = os.getenv("AGENTA_LOG_FILE_LEVEL", "WARNING").upper()
+# AGENTA_LOG_FILE_BASE = os.getenv("AGENTA_LOG_FILE_PATH", "error")
+# LOG_FILE_DATE = datetime.utcnow().strftime("%Y-%m-%d")
+# AGENTA_LOG_FILE_PATH = f"{AGENTA_LOG_FILE_BASE}-{LOG_FILE_DATE}.log"
+
# COLORS
LEVEL_COLORS = {
"TRACE": "\033[97m",
@@ -70,6 +88,15 @@ def process_positional_args(_, __, event_dict: EventDict) -> EventDict:
return event_dict
+# def add_trace_context(_, __, event_dict: EventDict) -> EventDict:
+# span = get_current_span()
+# if span and span.get_span_context().is_valid:
+# ctx = span.get_span_context()
+# event_dict["TraceId"] = format(ctx.trace_id, "032x")
+# event_dict["SpanId"] = format(ctx.span_id, "016x")
+# return event_dict
+
+
def add_logger_info(
logger: WrappedLogger, method_name: str, event_dict: EventDict
) -> EventDict:
@@ -116,9 +143,36 @@ def render(_, __, event_dict: EventDict) -> str:
return render
+# def plain_renderer() -> Processor:
+# hidden = {
+# "SeverityText",
+# "SeverityNumber",
+# "MethodName",
+# "logger_factory",
+# "LoggerName",
+# "level",
+# }
+
+# def render(_, __, event_dict: EventDict) -> str:
+# ts = event_dict.pop("Timestamp", "")[:23] + "Z"
+# level = event_dict.get("level", "")
+# msg = event_dict.pop("event", "")
+# padded = f"[{level:<5}]"
+# logger = f"[{event_dict.pop('logger', '')}]"
+# extras = " ".join(f"{k}={v}" for k, v in event_dict.items() if k not in hidden)
+# return f"{ts} {padded} {msg} {logger} {extras}"
+
+# return render
+
+
+# def json_renderer() -> Processor:
+# return structlog.processors.JSONRenderer()
+
+
SHARED_PROCESSORS: list[Processor] = [
structlog.processors.TimeStamper(fmt="iso", utc=True, key="Timestamp"),
process_positional_args,
+ # add_trace_context,
add_logger_info,
structlog.processors.format_exc_info,
structlog.processors.dict_tracebacks,
@@ -139,30 +193,36 @@ def create_struct_logger(
)
-# Guard against double initialization
-_LOGGING_CONFIGURED = False
-
# CONFIGURE HANDLERS AND STRUCTLOG LOGGERS
handlers = []
loggers = []
-if AGENTA_LOG_CONSOLE_ENABLED and not _LOGGING_CONFIGURED:
- _LOGGING_CONFIGURED = True
-
- # Check if console logger already has handlers (from OSS module)
- console_logger = logging.getLogger("console")
-
- if not console_logger.handlers:
- # Only add handler if it doesn't exist yet
- h = logging.StreamHandler(sys.stdout)
- h.setLevel(getattr(logging, AGENTA_LOG_CONSOLE_LEVEL, TRACE_LEVEL))
- h.setFormatter(logging.Formatter("%(message)s"))
- console_logger.addHandler(h)
- console_logger.setLevel(TRACE_LEVEL)
- console_logger.propagate = False
-
+if AGENTA_LOG_CONSOLE_ENABLED:
+ h = logging.StreamHandler(sys.stdout)
+ h.setLevel(getattr(logging, AGENTA_LOG_CONSOLE_LEVEL, TRACE_LEVEL))
+ h.setFormatter(logging.Formatter("%(message)s"))
+ logging.getLogger("console").addHandler(h)
loggers.append(create_struct_logger([colored_console_renderer()], "console"))
+# if AGENTA_LOG_FILE_ENABLED:
+# h = RotatingFileHandler(AGENTA_LOG_FILE_PATH, maxBytes=10 * 1024 * 1024, backupCount=5)
+# h.setLevel(getattr(logging, AGENTA_LOG_FILE_LEVEL, logging.WARNING))
+# h.setFormatter(logging.Formatter("%(message)s"))
+# logging.getLogger("file").addHandler(h)
+# loggers.append(create_struct_logger([plain_renderer()], "file"))
+
+# if AGENTA_LOG_OTLP_ENABLED:
+# provider = LoggerProvider()
+# exporter = OTLPLogExporter()
+# provider.add_log_record_processor(BatchLogRecordProcessor(exporter))
+# set_logger_provider(provider)
+# h = LoggingHandler(
+# level=getattr(logging, AGENTA_LOG_OTLP_LEVEL, logging.INFO), logger_provider=provider
+# )
+# h.setFormatter(logging.Formatter("%(message)s"))
+# logging.getLogger("otel").addHandler(h)
+# loggers.append(create_struct_logger([json_renderer()], "otel"))
+
class MultiLogger:
def __init__(self, *loggers: structlog.stdlib.BoundLogger):
diff --git a/sdk/agenta/sdk/utils/references.py b/sdk/agenta/sdk/utils/references.py
deleted file mode 100644
index 865be8d1ab..0000000000
--- a/sdk/agenta/sdk/utils/references.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from uuid import UUID
-import re
-import unicodedata
-
-
-def get_slug_from_name_and_id(
- name: str,
- id: UUID, # pylint: disable=redefined-builtin
-) -> str:
- # Normalize Unicode (e.g., é → e)
- name = unicodedata.normalize("NFKD", name)
- # Remove non-ASCII characters
- name = name.encode("ascii", "ignore").decode("ascii")
- # Lowercase and remove non-word characters except hyphens and spaces
- name = re.sub(r"[^\w\s-]", "", name.lower())
- # Replace any sequence of hyphens or whitespace with a single hyphen
- name = re.sub(r"[-\s]+", "-", name)
- # Trim leading/trailing hyphens
- name = name.strip("-")
- # Last 12 characters of the ID
- slug = f"{name}-{id.hex[-12:]}"
-
- return slug.lower()
diff --git a/sdk/agenta/sdk/workflows/builtin.py b/sdk/agenta/sdk/workflows/builtin.py
deleted file mode 100644
index 96fe546f66..0000000000
--- a/sdk/agenta/sdk/workflows/builtin.py
+++ /dev/null
@@ -1,600 +0,0 @@
-from typing import Optional, Union, Dict
-
-from agenta.sdk.models.workflows import Reference
-from agenta.sdk.decorators.running import workflow, Workflow, application, evaluator
-from agenta.sdk.workflows.handlers import SinglePromptConfig
-
-
-def echo(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
-) -> Workflow:
- return workflow(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="echo",
- )()
-
-
-def auto_exact_match(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- correct_answer_key: Optional[str] = "correct_answer",
-) -> Workflow:
- parameters = dict(
- correct_answer_key=correct_answer_key,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_exact_match",
- #
- parameters=parameters,
- )()
-
-
-def auto_regex_test(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- regex_pattern: str,
- #
- regex_should_match: Optional[bool] = True,
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- regex_pattern=regex_pattern,
- regex_should_match=regex_should_match,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_regex_test",
- #
- parameters=parameters,
- )()
-
-
-def field_match_test(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- json_field: str,
- #
- correct_answer_key: Optional[str] = "correct_answer",
-) -> Workflow:
- parameters = dict(
- json_field=json_field,
- correct_answer_key=correct_answer_key,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="field_match_test",
- #
- parameters=parameters,
- )()
-
-
-def auto_webhook_test(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- webhook_url: str,
- #
- correct_answer_key: Optional[str] = "correct_answer",
-) -> Workflow:
- parameters = dict(
- webhook_url=webhook_url,
- correct_answer_key=correct_answer_key,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_webhook_test",
- #
- parameters=parameters,
- )()
-
-
-def auto_custom_code_run(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- code: str,
- #
- correct_answer_key: Optional[str] = "correct_answer",
- threshold: Optional[float] = 0.5,
-) -> Workflow:
- parameters = dict(
- code=code,
- correct_answer_key=correct_answer_key,
- threshold=threshold,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_custom_code_run",
- #
- parameters=parameters,
- )()
-
-
-def auto_ai_critique(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- prompt_template: list[dict[str, str]],
- #
- correct_answer_key: Optional[str] = "correct_answer",
- model: Optional[str] = "gpt-3.5-turbo",
-) -> Workflow:
- parameters = dict(
- prompt_template=prompt_template,
- correct_answer_key=correct_answer_key,
- model=model,
- version=3,
- template_format="curly",
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_ai_critique",
- #
- parameters=parameters,
- )()
-
-
-def auto_starts_with(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- prefix: str,
- #
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- prefix=prefix,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_starts_with",
- #
- parameters=parameters,
- )()
-
-
-def auto_ends_with(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- suffix: str,
- #
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- suffix=suffix,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_ends_with",
- #
- parameters=parameters,
- )()
-
-
-def auto_contains(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- substring: str,
- #
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- substring=substring,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_contains",
- #
- parameters=parameters,
- )()
-
-
-def auto_contains_any(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- substrings: list[str],
- #
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- substrings=substrings,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_contains_any",
- #
- parameters=parameters,
- )()
-
-
-def auto_contains_all(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- substrings: list[str],
- #
- case_sensitive: Optional[bool] = True,
-) -> Workflow:
- parameters = dict(
- substrings=substrings,
- case_sensitive=case_sensitive,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_contains_all",
- #
- parameters=parameters,
- )()
-
-
-def auto_contains_json(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
-) -> Workflow:
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_contains_json",
- )()
-
-
-def auto_json_diff(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- correct_answer_key: Optional[str] = "correct_answer",
- threshold: Optional[float] = 0.5,
- predict_keys: Optional[bool] = False,
- case_insensitive_keys: Optional[bool] = False,
- compare_schema_only: Optional[bool] = False,
-) -> Workflow:
- parameters = dict(
- correct_answer_key=correct_answer_key,
- threshold=threshold,
- predict_keys=predict_keys,
- case_insensitive_keys=case_insensitive_keys,
- compare_schema_only=compare_schema_only,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_json_diff",
- #
- parameters=parameters,
- )()
-
-
-def auto_levenshtein_distance(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- correct_answer_key: Optional[str] = "correct_answer",
- case_sensitive: Optional[bool] = True,
- threshold: Optional[float] = 0.5,
-) -> Workflow:
- parameters = dict(
- correct_answer_key=correct_answer_key,
- case_sensitive=case_sensitive,
- threshold=threshold,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_levenshtein_distance",
- #
- parameters=parameters,
- )()
-
-
-def auto_similarity_match(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- correct_answer_key: Optional[str] = "correct_answer",
- case_sensitive: Optional[bool] = True,
- threshold: Optional[float] = 0.5,
-) -> Workflow:
- parameters = dict(
- correct_answer_key=correct_answer_key,
- case_sensitive=case_sensitive,
- threshold=threshold,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_similarity_match",
- #
- parameters=parameters,
- )()
-
-
-def auto_semantic_similarity(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- correct_answer_key: Optional[str] = "correct_answer",
- threshold: Optional[float] = 0.5,
- embedding_model: Optional[str] = "text-embedding-3-small",
-) -> Workflow:
- parameters = dict(
- correct_answer_key=correct_answer_key,
- threshold=threshold,
- embedding_model=embedding_model,
- )
-
- return evaluator(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="auto_semantic_similarity",
- #
- parameters=parameters,
- )()
-
-
-def completion(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- config: SinglePromptConfig,
-) -> Workflow:
- parameters = config.model_dump(
- mode="json",
- exclude_none=True,
- )
-
- return application(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="completion",
- #
- parameters=parameters,
- )()
-
-
-def chat(
- *,
- slug: Optional[str] = None,
- #
- name: Optional[str] = None,
- description: Optional[str] = None,
- #
- variant_slug: Optional[str] = None,
- #
- config: SinglePromptConfig,
-) -> Workflow:
- parameters = config.model_dump(
- mode="json",
- exclude_none=True,
- )
-
- return application(
- slug=slug,
- #
- name=name,
- description=description,
- #
- variant_slug=variant_slug,
- #
- uri="chat",
- #
- parameters=parameters,
- )()
diff --git a/sdk/agenta/sdk/workflows/configurations.py b/sdk/agenta/sdk/workflows/configurations.py
deleted file mode 100644
index 9086047c53..0000000000
--- a/sdk/agenta/sdk/workflows/configurations.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from agenta.sdk.models.workflows import WorkflowServiceConfiguration
-
-
-echo_v0_configuration = WorkflowServiceConfiguration()
-auto_exact_match_v0_configuration = WorkflowServiceConfiguration()
-auto_regex_test_v0_configuration = WorkflowServiceConfiguration()
-field_match_test_v0_configuration = WorkflowServiceConfiguration()
-auto_webhook_test_v0_configuration = WorkflowServiceConfiguration()
-auto_custom_code_run_v0_configuration = WorkflowServiceConfiguration()
-auto_ai_critique_v0_configuration = WorkflowServiceConfiguration()
-auto_starts_with_v0_configuration = WorkflowServiceConfiguration()
-auto_ends_with_v0_configuration = WorkflowServiceConfiguration()
-auto_contains_v0_configuration = WorkflowServiceConfiguration()
-auto_contains_any_v0_configuration = WorkflowServiceConfiguration()
-auto_contains_all_v0_configuration = WorkflowServiceConfiguration()
-auto_contains_json_v0_configuration = WorkflowServiceConfiguration()
-auto_json_diff_v0_configuration = WorkflowServiceConfiguration()
-auto_levenshtein_distance_v0_configuration = WorkflowServiceConfiguration()
-auto_similarity_match_v0_configuration = WorkflowServiceConfiguration()
-auto_semantic_similarity_v0_configuration = WorkflowServiceConfiguration()
-completion_v0_configuration = WorkflowServiceConfiguration()
-chat_v0_configuration = WorkflowServiceConfiguration()
diff --git a/sdk/agenta/sdk/workflows/handlers.py b/sdk/agenta/sdk/workflows/handlers.py
deleted file mode 100644
index 738392f345..0000000000
--- a/sdk/agenta/sdk/workflows/handlers.py
+++ /dev/null
@@ -1,1787 +0,0 @@
-from typing import List, Any, Optional, Any, Dict, Union
-from json import dumps, loads
-import traceback
-import json
-import re
-import math
-
-import httpx
-
-import litellm
-
-from pydantic import BaseModel, Field
-from openai import AsyncOpenAI, OpenAIError
-from difflib import SequenceMatcher
-
-from agenta.sdk.utils.logging import get_module_logger
-
-from agenta.sdk.litellm import mockllm
-from agenta.sdk.types import PromptTemplate, Message
-from agenta.sdk.managers.secrets import SecretsManager
-
-from agenta.sdk.decorators.tracing import instrument
-
-from agenta.sdk.models.shared import Data
-from agenta.sdk.models.tracing import Trace
-from agenta.sdk.workflows.sandbox import execute_code_safely
-from agenta.sdk.workflows.errors import (
- InvalidConfigurationParametersV0Error,
- MissingConfigurationParameterV0Error,
- InvalidConfigurationParameterV0Error,
- InvalidInputsV0Error,
- MissingInputV0Error,
- InvalidInputV0Error,
- InvalidOutputsV0Error,
- MissingOutputV0Error,
- InvalidSecretsV0Error,
- JSONDiffV0Error,
- LevenshteinDistanceV0Error,
- SyntacticSimilarityV0Error,
- SemanticSimilarityV0Error,
- WebhookServerV0Error,
- WebhookClientV0Error,
- CustomCodeServerV0Error,
- RegexPatternV0Error,
- PromptFormattingV0Error,
- PromptCompletionV0Error,
-)
-
-from agenta.sdk.litellm import mockllm
-from agenta.sdk.litellm.litellm import litellm_handler
-
-litellm.logging = False
-litellm.set_verbose = False
-litellm.drop_params = True
-# litellm.turn_off_message_logging = True
-mockllm.litellm = litellm
-
-litellm.callbacks = [litellm_handler()]
-
-log = get_module_logger(__name__)
-
-
-async def _compute_embedding(openai: Any, model: str, input: str) -> List[float]:
- response = await openai.embeddings.create(model=model, input=input)
- # embeddings API already returns a list of floats
- return response.data[0].embedding
-
-
-def _compute_similarity(embedding_1: List[float], embedding_2: List[float]) -> float:
- # Cosine similarity
- dot = sum(a * b for a, b in zip(embedding_1, embedding_2))
- norm1 = math.sqrt(sum(a * a for a in embedding_1))
- norm2 = math.sqrt(sum(b * b for b in embedding_2))
- if norm1 == 0 or norm2 == 0:
- return 0.0
- return dot / (norm1 * norm2)
-
-
-import json
-import re
-from typing import Any, Dict, Iterable, Tuple, Optional
-
-try:
- import jsonpath # ✅ use module API
- from jsonpath import JSONPointer # pointer class is fine to use
-except Exception:
- jsonpath = None
- JSONPointer = None
-
-# ========= Scheme detection =========
-
-
-def detect_scheme(expr: str) -> str:
- """Return 'json-path', 'json-pointer', or 'dot-notation' based on the placeholder prefix."""
- if expr.startswith("$"):
- return "json-path"
- if expr.startswith("/"):
- return "json-pointer"
- return "dot-notation"
-
-
-# ========= Resolvers =========
-
-
-def resolve_dot_notation(expr: str, data: dict) -> object:
- if "[" in expr or "]" in expr:
- raise KeyError(f"Bracket syntax is not supported in dot-notation: {expr!r}")
-
- # First, check if the expression exists as a literal key (e.g., "topic.story" as a single key)
- # This allows users to use dots in their variable names without nested access
- if expr in data:
- return data[expr]
-
- # If not found as a literal key, try to parse as dot-notation path
- cur = data
- for token in (p for p in expr.split(".") if p):
- if isinstance(cur, list) and token.isdigit():
- cur = cur[int(token)]
- else:
- if not isinstance(cur, dict):
- raise KeyError(
- f"Cannot access key {token!r} on non-dict while resolving {expr!r}"
- )
- if token not in cur:
- raise KeyError(f"Missing key {token!r} while resolving {expr!r}")
- cur = cur[token]
- return cur
-
-
-def resolve_json_path(expr: str, data: dict) -> object:
- if jsonpath is None:
- raise ImportError("python-jsonpath is required for json-path ($...)")
-
- if not (expr == "$" or expr.startswith("$.") or expr.startswith("$[")):
- raise ValueError(
- f"Invalid json-path expression {expr!r}. "
- "Must start with '$', '$.' or '$[' (no implicit normalization)."
- )
-
- # Use package-level APIf
- results = jsonpath.findall(expr, data) # always returns a list
- return results[0] if len(results) == 1 else results
-
-
-def resolve_json_pointer(expr: str, data: Dict[str, Any]) -> Any:
- """Resolve a JSON Pointer; returns a single value."""
- if JSONPointer is None:
- raise ImportError("python-jsonpath is required for json-pointer (/...)")
- return JSONPointer(expr).resolve(data)
-
-
-def resolve_any(expr: str, data: Dict[str, Any]) -> Any:
- """Dispatch to the right resolver based on detected scheme."""
- scheme = detect_scheme(expr)
- if scheme == "json-path":
- return resolve_json_path(expr, data)
- if scheme == "json-pointer":
- return resolve_json_pointer(expr, data)
- return resolve_dot_notation(expr, data)
-
-
-# ========= Placeholder & coercion helpers =========
-
-_PLACEHOLDER_RE = re.compile(r"\{\{\s*(.*?)\s*\}\}")
-
-
-def extract_placeholders(template: str) -> Iterable[str]:
- """Yield the inner text of all {{ ... }} occurrences (trimmed)."""
- for m in _PLACEHOLDER_RE.finditer(template):
- yield m.group(1).strip()
-
-
-def coerce_to_str(value: Any) -> str:
- """Pretty stringify values for embedding into templates."""
- if isinstance(value, (dict, list)):
- return json.dumps(value, ensure_ascii=False)
- return str(value)
-
-
-def build_replacements(
- placeholders: Iterable[str], data: Dict[str, Any]
-) -> Tuple[Dict[str, str], set]:
- """
- Resolve all placeholders against data.
- Returns (replacements, unresolved_placeholders).
- """
- replacements: Dict[str, str] = {}
- unresolved: set = set()
- for expr in set(placeholders):
- try:
- val = resolve_any(expr, data)
- # Escape backslashes to avoid regex replacement surprises
- replacements[expr] = coerce_to_str(val).replace("\\", "\\\\")
- except Exception:
- unresolved.add(expr)
- return replacements, unresolved
-
-
-def apply_replacements(template: str, replacements: Dict[str, str]) -> str:
- """Replace {{ expr }} using a callback to avoid regex-injection issues."""
-
- def _repl(m: re.Match) -> str:
- expr = m.group(1).strip()
- return replacements.get(expr, m.group(0))
-
- return _PLACEHOLDER_RE.sub(_repl, template)
-
-
-def compute_truly_unreplaced(original: set, rendered: str) -> set:
- """Only count placeholders that were in the original template and remain."""
- now = set(extract_placeholders(rendered))
- return original & now
-
-
-def missing_lib_hints(unreplaced: set) -> Optional[str]:
- """Suggest installing python-jsonpath if placeholders indicate json-path or json-pointer usage."""
- if any(expr.startswith("$") or expr.startswith("/") for expr in unreplaced) and (
- jsonpath is None or JSONPointer is None
- ):
- return (
- "Install python-jsonpath to enable json-path ($...) and json-pointer (/...)"
- )
- return None
-
-
-def _format_with_template(
- content: str,
- format: str,
- kwargs: Dict[str, Any],
-) -> str:
- """Internal method to format content based on template_format"""
- if format == "fstring":
- return content.format(**kwargs)
-
- elif format == "jinja2":
- from jinja2 import Template, TemplateError
-
- try:
- return Template(content).render(**kwargs)
- except TemplateError:
- return content
-
- elif format == "curly":
- original_placeholders = set(extract_placeholders(content))
-
- replacements, _unresolved = build_replacements(original_placeholders, kwargs)
-
- result = apply_replacements(content, replacements)
-
- truly_unreplaced = compute_truly_unreplaced(original_placeholders, result)
-
- if truly_unreplaced:
- hint = missing_lib_hints(truly_unreplaced)
- suffix = f" Hint: {hint}" if hint else ""
- raise ValueError(
- f"Template variables not found or unresolved: "
- f"{', '.join(sorted(truly_unreplaced))}.{suffix}"
- )
-
- return result
-
- return content
-
-
-def _flatten_json(json_obj: Union[list, dict]) -> Dict[str, Any]:
- """
- This function takes a (nested) JSON object and flattens it into a single-level dictionary where each key represents the path to the value in the original JSON structure. This is done recursively, ensuring that the full hierarchical context is preserved in the keys.
-
- Args:
- json_obj (Union[list, dict]): The (nested) JSON object to flatten. It can be either a dictionary or a list.
-
- Returns:
- Dict[str, Any]: The flattened JSON object as a dictionary, with keys representing the paths to the values in the original structure.
- """
-
- output = {}
-
- def flatten(obj: Union[list, dict], path: str = "") -> None:
- if isinstance(obj, dict):
- for key, value in obj.items():
- new_key = f"{path}.{key}" if path else key
- if isinstance(value, (dict, list)):
- flatten(value, new_key)
- else:
- output[new_key] = value
-
- elif isinstance(obj, list):
- for index, value in enumerate(obj):
- new_key = f"{path}.{index}" if path else str(index)
- if isinstance(value, (dict, list)):
- flatten(value, new_key)
- else:
- output[new_key] = value
-
- flatten(json_obj)
- return output
-
-
-def _compare_jsons(
- ground_truth: Union[list, dict],
- app_output: Union[list, dict],
- settings_values: dict,
-):
- """
- This function takes two JSON objects (ground truth and application output), flattens them using the `_flatten_json` function, and then compares the fields.
-
- Args:
- ground_truth (list | dict): The ground truth
- app_output (list | dict): The application output
- settings_values: dict: The advanced configuration of the evaluator
-
- Returns:
- the average score between both JSON objects
- """
-
- def normalize_keys(d: Dict[str, Any], case_insensitive: bool) -> Dict[str, Any]:
- if not case_insensitive:
- return d
- return {k.lower(): v for k, v in d.items()}
-
- def diff(ground_truth: Any, app_output: Any, compare_schema_only: bool) -> float:
- gt_key, gt_value = next(iter(ground_truth.items()))
- ao_key, ao_value = next(iter(app_output.items()))
-
- if compare_schema_only:
- return (
- 1.0 if (gt_key == ao_key and type(gt_value) == type(ao_value)) else 0.0
- )
- return 1.0 if (gt_key == ao_key and gt_value == ao_value) else 0.0
-
- flattened_ground_truth = _flatten_json(ground_truth)
- flattened_app_output = _flatten_json(app_output)
-
- keys = set(flattened_ground_truth.keys())
- if settings_values.get("predict_keys", False):
- keys = keys.union(set(flattened_app_output.keys()))
-
- cumulated_score = 0.0
- no_of_keys = len(keys)
-
- case_insensitive_keys = settings_values.get("case_insensitive_keys", False)
- compare_schema_only = settings_values.get("compare_schema_only", False)
- flattened_ground_truth = normalize_keys(
- flattened_ground_truth, case_insensitive_keys
- )
- flattened_app_output = normalize_keys(flattened_app_output, case_insensitive_keys)
-
- for key in keys:
- ground_truth_value = flattened_ground_truth.get(key, None)
- llm_app_output_value = flattened_app_output.get(key, None)
-
- key_score = 0.0
- if ground_truth_value is not None and llm_app_output_value is not None:
- key_score = diff(
- {key: ground_truth_value},
- {key: llm_app_output_value},
- compare_schema_only,
- )
-
- cumulated_score += key_score
- try:
- average_score = cumulated_score / no_of_keys
- return average_score
- except ZeroDivisionError:
- return 0.0
-
-
-@instrument()
-def echo_v0(aloha: Any):
- return {"got": aloha}
-
-
-@instrument(annotate=True)
-def auto_exact_match_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Exact match evaluator for comparing outputs against reference outputs.
-
- inputs: Testcase data, which may contain reference outputs
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator
-
- Returns:
- Evaluation result with success flag (True for match, False for mismatch)
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- # --------------------------------------------------------------------------
- success = False
- if isinstance(outputs, str) and isinstance(correct_answer, str):
- success = outputs == correct_answer
- elif isinstance(outputs, dict) and isinstance(correct_answer, dict):
- outputs = dumps(outputs, sort_keys=True)
- correct_answer = dumps(correct_answer, sort_keys=True)
- success = outputs == correct_answer
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_regex_test_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Regex test evaluator for checking if output matches a regex pattern.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with regex pattern and matching flag
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "regex_pattern" in parameters:
- raise MissingConfigurationParameterV0Error(path="regex_pattern")
-
- regex_pattern = parameters["regex_pattern"]
-
- if not isinstance(regex_pattern, str):
- raise InvalidConfigurationParameterV0Error(
- path="regex_pattern",
- expected="str",
- got=regex_pattern,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- regex_should_match = parameters.get("regex_should_match", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- try:
- pattern = re.compile(
- regex_pattern,
- flags=0 if case_sensitive else re.IGNORECASE,
- )
- except Exception as e:
- raise RegexPatternV0Error(pattern=regex_pattern) from e
-
- result = pattern.search(outputs_str)
-
- success = bool(result) == regex_should_match
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def field_match_test_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Field match test evaluator for extracting and comparing a specific field from JSON output.
-
- Args:
- inputs: Testcase data with ground truth
- outputs: Output from the workflow execution (expected to be JSON string or dict)
- parameters: Configuration for the evaluator with json_field to extract
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "json_field" in parameters:
- raise MissingConfigurationParameterV0Error(path="json_field")
-
- json_field = str(parameters["json_field"])
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_dict = outputs
- if isinstance(outputs, str):
- try:
- outputs_dict = loads(outputs)
- except json.JSONDecodeError as e:
- raise InvalidOutputsV0Error(expected="dict", got=outputs) from e
-
- if not isinstance(outputs_dict, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- if not json_field in outputs_dict:
- raise MissingOutputV0Error(path=json_field)
-
- # --------------------------------------------------------------------------
- success = outputs_dict[json_field] == correct_answer
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-async def auto_webhook_test_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Webhook test evaluator for sending output to an external service for evaluation.
-
- Args:
- inputs: Testcase data with ground truth
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with webhook_url
-
- Returns:
- Evaluation result with score from the webhook
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "webhook_url" in parameters:
- raise MissingConfigurationParameterV0Error(path="webhook_url")
-
- webhook_url = str(parameters["webhook_url"])
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- json_payload = {
- "inputs": inputs,
- "output": outputs_str,
- "correct_answer": correct_answer,
- }
-
- async with httpx.AsyncClient() as client:
- try:
- response = await client.post(
- url=webhook_url,
- json=json_payload,
- )
- except Exception as e:
- raise WebhookClientV0Error(
- message=str(e),
- ) from e
-
- if response.status_code != 200:
- raise WebhookServerV0Error(
- code=response.status_code,
- message=response.json(),
- )
-
- try:
- _outputs = response.json()
- except Exception as e:
- raise WebhookClientV0Error(
- message=str(e),
- ) from e
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- if isinstance(_outputs, bool):
- return {"success": _outputs}
-
- if isinstance(_outputs, dict) or isinstance(_outputs, str):
- return _outputs
-
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=_outputs)
-
-
-@instrument(annotate=True)
-async def auto_custom_code_run_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Custom code execution evaluator for running arbitrary code to evaluate outputs.
-
- Args:
- inputs: Testcase data with ground truth
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with code to execute
-
- Returns:
- Evaluation result with score from the custom code
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "code" in parameters:
- raise MissingConfigurationParameterV0Error(path="code")
-
- code = str(parameters["code"])
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- try:
- _outputs = execute_code_safely(
- app_params={},
- inputs=inputs,
- output=outputs,
- correct_answer=correct_answer,
- code=code,
- )
- except Exception as e:
- raise CustomCodeServerV0Error(
- message=str(e),
- stacktrace=traceback.format_exc(),
- ) from e
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- if isinstance(_outputs, bool):
- return {"success": _outputs}
-
- if isinstance(_outputs, dict) or isinstance(_outputs, str):
- return _outputs
-
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=_outputs)
-
-
-@instrument(annotate=True)
-async def auto_ai_critique_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- # return {"score": 0.75, "success": True}
-
- """
- AI critique evaluator for using an LLM to evaluate outputs.
-
- Args:
- inputs: Testcase data with ground truth
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with prompt_template and model
-
- Returns:
- Evaluation result with score from the AI
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- correct_answer_key = parameters.get("correct_answer_key")
-
- if not "prompt_template" in parameters:
- raise MissingConfigurationParameterV0Error(path="prompt_template")
-
- prompt_template = parameters.get("prompt_template")
-
- if not isinstance(prompt_template, list):
- raise InvalidConfigurationParameterV0Error(
- path="prompt_template",
- expected="list",
- got=prompt_template,
- )
-
- template_version = parameters.get("version") or "3"
-
- default_format = "fstring" if template_version == "2" else "curly"
-
- template_format = str(parameters.get("template_format") or default_format)
-
- model = parameters.get("model") or "gpt-3.5-turbo"
-
- if not isinstance(model, str):
- raise InvalidConfigurationParameterV0Error(
- path="model",
- expected="str",
- got=model,
- )
-
- response_type = parameters.get("response_type") or (
- "json_schema" if template_version == "4" else "text"
- )
-
- if not response_type in ["text", "json_object", "json_schema"]:
- raise InvalidConfigurationParameterV0Error(
- path="response_type",
- expected=["text", "json_object", "json_schema"],
- got=response_type,
- )
-
- json_schema = parameters.get("json_schema") or None
-
- json_schema = json_schema if response_type == "json_schema" else None
-
- if response_type == "json_schema" and not isinstance(json_schema, dict):
- raise InvalidConfigurationParameterV0Error(
- path="json_schema",
- expected="dict",
- got=json_schema,
- )
-
- response_format: dict = dict(type=response_type)
-
- if response_type == "json_schema":
- response_format["json_schema"] = json_schema
-
- correct_answer = None
-
- if inputs:
- if not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if correct_answer_key:
- if correct_answer_key in inputs:
- correct_answer = inputs[correct_answer_key]
-
- secrets = await SecretsManager.retrieve_secrets()
-
- if secrets is None or not isinstance(secrets, list):
- raise InvalidSecretsV0Error(expected="list", got=secrets)
-
- openai_api_key = None # secrets.get("OPENAI_API_KEY")
- anthropic_api_key = None # secrets.get("ANTHROPIC_API_KEY")
- openrouter_api_key = None # secrets.get("OPENROUTER_API_KEY")
- cohere_api_key = None # secrets.get("COHERE_API_KEY")
- azure_api_key = None # secrets.get("AZURE_API_KEY")
- groq_api_key = None # secrets.get("GROQ_API_KEY")
-
- for secret in secrets:
- if secret.get("kind") == "provider_key":
- secret_data = secret.get("data", {})
- if secret_data.get("kind") == "openai":
- provider_data = secret_data.get("provider", {})
- openai_api_key = provider_data.get("key") or openai_api_key
- if secret_data.get("kind") == "anthropic":
- provider_data = secret_data.get("provider", {})
- anthropic_api_key = provider_data.get("key") or anthropic_api_key
- if secret_data.get("kind") == "openrouter":
- provider_data = secret_data.get("provider", {})
- openrouter_api_key = provider_data.get("key") or openrouter_api_key
- if secret_data.get("kind") == "cohere":
- provider_data = secret_data.get("provider", {})
- cohere_api_key = provider_data.get("key") or cohere_api_key
- if secret_data.get("kind") == "azure":
- provider_data = secret_data.get("provider", {})
- azure_api_key = provider_data.get("key") or azure_api_key
- if secret_data.get("kind") == "groq":
- provider_data = secret_data.get("provider", {})
- groq_api_key = provider_data.get("key") or groq_api_key
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- litellm.openai_key = openai_api_key
- litellm.anthropic_key = anthropic_api_key
- litellm.openrouter_key = openrouter_api_key
- litellm.cohere_key = cohere_api_key
- litellm.azure_key = azure_api_key
- litellm.groq_key = groq_api_key
-
- context: Dict[str, Any] = dict()
-
- if parameters:
- context.update(
- **{
- "parameters": parameters,
- }
- )
-
- if correct_answer:
- context.update(
- **{
- "ground_truth": correct_answer,
- "correct_answer": correct_answer,
- "reference": correct_answer,
- }
- )
-
- if outputs:
- context.update(
- **{
- "prediction": outputs,
- "outputs": outputs,
- }
- )
-
- if inputs:
- context.update(**inputs)
- context.update(
- **{
- "inputs": inputs,
- }
- )
-
- try:
- formatted_prompt_template = [
- {
- "role": message["role"],
- "content": _format_with_template(
- content=message["content"],
- format=template_format,
- kwargs=context,
- ),
- }
- for message in prompt_template
- ]
- except Exception as e:
- raise PromptFormattingV0Error(
- message=str(e),
- stacktrace=traceback.format_exc(),
- ) from e
-
- try:
- response = await litellm.acompletion(
- model=model,
- messages=formatted_prompt_template,
- temperature=0.01,
- response_format=response_format,
- )
-
- _outputs = response.choices[0].message.content.strip() # type: ignore
-
- except litellm.AuthenticationError as e: # type: ignore
- e.message = e.message.replace(
- "litellm.AuthenticationError: AuthenticationError: ", ""
- )
- raise e
-
- except Exception as e:
- raise PromptCompletionV0Error(
- message=str(e),
- stacktrace=traceback.format_exc(),
- ) from e
- # --------------------------------------------------------------------------
-
- try:
- _outputs = json.loads(_outputs)
- except:
- pass
-
- if isinstance(_outputs, (int, float)):
- return {
- "score": _outputs,
- "success": _outputs >= threshold,
- }
-
- if isinstance(_outputs, bool):
- return {
- "success": _outputs,
- }
-
- if isinstance(_outputs, dict):
- return _outputs
-
- raise InvalidOutputsV0Error(expected=["dict", "str", "int", "float"], got=_outputs)
-
-
-@instrument(annotate=True)
-def auto_starts_with_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Starts with evaluator for checking if output starts with a specific prefix.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with prefix and case sensitivity setting
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "prefix" in parameters:
- raise MissingConfigurationParameterV0Error(path="prefix")
-
- prefix = parameters["prefix"]
-
- if not isinstance(prefix, str):
- raise InvalidConfigurationParameterV0Error(
- path="prefix",
- expected="str",
- got=prefix,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- prefix = prefix.lower()
-
- success = outputs_str.startswith(prefix)
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_ends_with_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Ends with evaluator for checking if output ends with a specific suffix.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with suffix and case sensitivity setting
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "suffix" in parameters:
- raise MissingConfigurationParameterV0Error(path="suffix")
-
- suffix = parameters["suffix"]
-
- if not isinstance(suffix, str):
- raise InvalidConfigurationParameterV0Error(
- path="suffix",
- expected="str",
- got=suffix,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- suffix = suffix.lower()
-
- success = outputs_str.endswith(suffix)
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_contains_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Contains evaluator for checking if output contains a specific substring.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with substring and case sensitivity setting
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "substring" in parameters:
- raise MissingConfigurationParameterV0Error(path="substring")
-
- substring = parameters["substring"]
-
- if not isinstance(substring, str):
- raise InvalidConfigurationParameterV0Error(
- path="substring",
- expected="str",
- got=substring,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- substring = substring.lower()
-
- success = substring in outputs_str
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_contains_any_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Contains any evaluator for checking if output contains any of the specified substrings.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with substrings list and case sensitivity setting
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "substrings" in parameters:
- raise MissingConfigurationParameterV0Error(path="substrings")
-
- substrings = parameters["substrings"]
-
- if not isinstance(substrings, list):
- raise InvalidConfigurationParameterV0Error(
- path="substrings",
- expected="list",
- got=substrings,
- )
-
- substrings = [s.strip() for s in substrings]
-
- if not all(isinstance(s, str) for s in substrings):
- raise InvalidConfigurationParameterV0Error(
- path="substrings",
- expected="list[str]",
- got=substrings,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- substrings = [s.lower() for s in substrings]
-
- success = any(substring in outputs_str for substring in substrings)
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_contains_all_v0(
- parameters: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Contains all evaluator for checking if output contains all of the specified substrings.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with substrings list and case sensitivity setting
-
- Returns:
- Evaluation result with success flag
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "substrings" in parameters:
- raise MissingConfigurationParameterV0Error(path="substrings")
-
- substrings = parameters["substrings"]
-
- if not isinstance(substrings, list):
- raise InvalidConfigurationParameterV0Error(
- path="substrings",
- expected="list",
- got=substrings,
- )
-
- substrings = [s.strip() for s in substrings]
-
- if not all(isinstance(s, str) for s in substrings):
- raise InvalidConfigurationParameterV0Error(
- path="substrings",
- expected="list[str]",
- got=substrings,
- )
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- substrings = [s.lower() for s in substrings]
-
- success = all(substring in outputs_str for substring in substrings)
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_contains_json_v0(
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Contains JSON evaluator for checking if output contains valid JSON content.
-
- Args:
- inputs: Testcase data
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator
-
- Returns:
- Evaluation result with success flag
- """
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- # --------------------------------------------------------------------------
- success = True
- potential_json = ""
-
- try:
- start_index = outputs_str.index("{")
- end_index = outputs_str.rindex("}") + 1
- potential_json = outputs_str[start_index:end_index]
- except Exception: # pylint: disable=broad-exception-caught
- success = False
-
- if success:
- try:
- json.loads(potential_json)
- except Exception: # pylint: disable=broad-exception-caught
- success = False
- # --------------------------------------------------------------------------
-
- return {"success": success}
-
-
-@instrument(annotate=True)
-def auto_json_diff_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- JSON diff evaluator for finding differences between JSON structures.
-
- Args:
- inputs: Testcase data with reference JSON
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator
-
- Returns:
- Evaluation result with score only (no diff explanation)
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(correct_answer, str) and not isinstance(correct_answer, dict):
- raise InvalidInputV0Error(
- path=correct_answer_key, expected=["dict", "str"], got=correct_answer
- )
-
- correct_answer_dict = (
- correct_answer if isinstance(correct_answer, dict) else loads(correct_answer)
- )
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_dict = outputs
- if isinstance(outputs, str):
- try:
- outputs_dict = loads(outputs)
- except json.JSONDecodeError as e:
- raise InvalidOutputsV0Error(expected="dict", got=outputs) from e
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- try:
- _outputs = _compare_jsons(
- ground_truth=correct_answer_dict,
- app_output=outputs_dict, # type: ignore
- settings_values=parameters,
- )
-
- except Exception as e:
- raise JSONDiffV0Error(message=str(e), stacktrace=traceback.format_exc()) from e
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- raise JSONDiffV0Error(
- message=f"json-diff error: got ({type(_outputs)}) {_outputs}, expected (int, float)."
- )
-
-
-@instrument(annotate=True)
-def auto_levenshtein_distance_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Levenshtein distance evaluator using pure Python implementation.
- Measures edit distance and returns normalized similarity score.
-
- Args:
- inputs: Testcase data with reference string.
- outputs: Output from the workflow execution.
- parameters: Configuration for the evaluator.
-
- Returns:
- Dictionary with normalized similarity score (0 to 1),
- or error message if evaluation fails.
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(correct_answer, str) and not isinstance(correct_answer, dict):
- raise InvalidInputV0Error(
- path=correct_answer_key, expected=["dict", "str"], got=correct_answer
- )
-
- correct_answer_str = (
- correct_answer if isinstance(correct_answer, str) else dumps(correct_answer)
- )
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- correct_answer_str = correct_answer_str.lower()
-
- try:
- # Compute Levenshtein distance
- if len(correct_answer_str) == 0:
- distance = len(outputs_str)
- else:
- previous_row = list(range(len(correct_answer_str) + 1))
- for i, c1 in enumerate(outputs_str):
- current_row = [i + 1]
- for j, c2 in enumerate(correct_answer_str):
- insert = previous_row[j + 1] + 1
- delete = current_row[j] + 1
- substitute = previous_row[j] + (c1 != c2)
- current_row.append(min(insert, delete, substitute))
- previous_row = current_row
- distance = previous_row[-1]
-
- # Normalize similarity score
- max_length = max(len(outputs_str), len(correct_answer_str))
- _outputs = 1.0 if max_length == 0 else 1.0 - (distance / max_length)
- except Exception as e:
- raise LevenshteinDistanceV0Error(
- message=str(e), stacktrace=traceback.format_exc()
- ) from e
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- raise LevenshteinDistanceV0Error(
- message=f"levenshtein-distance error: got ({type(_outputs)}) {_outputs}, expected (int, float)."
- )
-
-
-@instrument(annotate=True)
-def auto_similarity_match_v0(
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Similarity match evaluator for measuring string similarity between output and reference.
-
- Args:
- inputs: Testcase data with reference string
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator
-
- Returns:
- Evaluation result with similarity score
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- case_sensitive = parameters.get("case_sensitive", True) is True
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(correct_answer, str) and not isinstance(correct_answer, dict):
- raise InvalidInputV0Error(
- path=correct_answer_key, expected=["dict", "str"], got=correct_answer
- )
-
- correct_answer_str = (
- correct_answer if isinstance(correct_answer, str) else dumps(correct_answer)
- )
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- threshold = (
- parameters.get("threshold") or parameters.get("similarity_threshold") or 0.5
- )
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- if not case_sensitive:
- outputs_str = outputs_str.lower()
- correct_answer_str = correct_answer_str.lower()
-
- try:
- matcher = SequenceMatcher(None, outputs_str, correct_answer_str)
-
- _outputs = matcher.ratio()
- except Exception as e:
- raise SyntacticSimilarityV0Error(
- message=str(e), stacktrace=traceback.format_exc()
- ) from e
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- raise SyntacticSimilarityV0Error(
- message=f"syntactic-similarity-match error: got ({type(_outputs)}) {_outputs}, expected (int, float)."
- )
-
-
-@instrument(annotate=True)
-async def auto_semantic_similarity_v0(
- *,
- parameters: Optional[Data] = None,
- inputs: Optional[Data] = None,
- outputs: Optional[Union[Data, str]] = None,
-) -> Any:
- """
- Semantic similarity evaluator for measuring semantic similarity between output and reference using embeddings.
-
- Args:
- inputs: Testcase data with reference string
- outputs: Output from the workflow execution
- parameters: Configuration for the evaluator with embedding model and credentials
-
- Returns:
- Evaluation result with cosine similarity score
- """
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "correct_answer_key" in parameters:
- raise MissingConfigurationParameterV0Error(path="correct_answer_key")
-
- correct_answer_key = str(parameters["correct_answer_key"])
-
- embedding_model = parameters.get("embedding_model", "text-embedding-3-small")
-
- if not isinstance(embedding_model, str):
- raise InvalidConfigurationParametersV0Error(expected="str", got=embedding_model)
-
- if inputs is None or not isinstance(inputs, dict):
- raise InvalidInputsV0Error(expected="dict", got=inputs)
-
- if not correct_answer_key in inputs:
- raise MissingInputV0Error(path=correct_answer_key)
-
- correct_answer = inputs[correct_answer_key]
-
- if not isinstance(correct_answer, str) and not isinstance(correct_answer, dict):
- raise InvalidInputV0Error(
- path=correct_answer_key, expected=["dict", "str"], got=correct_answer
- )
-
- correct_answer_str = (
- correct_answer if isinstance(correct_answer, str) else dumps(correct_answer)
- )
-
- if not isinstance(outputs, str) and not isinstance(outputs, dict):
- raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
-
- outputs_str = outputs if isinstance(outputs, str) else dumps(outputs)
-
- secrets = await SecretsManager.retrieve_secrets()
-
- if secrets is None or not isinstance(secrets, list):
- raise InvalidSecretsV0Error(expected="list", got=secrets)
-
- openai_api_key = None # secrets.get("OPENAI_API_KEY")
-
- for secret in secrets:
- if secret.get("kind") == "provider_key":
- secret_data = secret.get("data", {})
- if secret_data.get("kind") == "openai":
- provider_data = secret_data.get("provider", {})
- openai_api_key = provider_data.get("key") or openai_api_key
-
- threshold = parameters.get("threshold") or 0.5
-
- if not isinstance(threshold, float):
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float",
- got=threshold,
- )
-
- if not 0.0 < threshold <= 1.0:
- raise InvalidConfigurationParameterV0Error(
- path="threshold",
- expected="float[0.0, 1.0]",
- got=threshold,
- )
-
- _outputs = None
-
- # --------------------------------------------------------------------------
- try:
- openai = AsyncOpenAI(api_key=openai_api_key)
- except OpenAIError as e:
- raise OpenAIError("OpenAIException - " + e.args[0])
-
- output_embedding = await _compute_embedding(
- openai,
- embedding_model,
- outputs_str,
- )
-
- reference_embedding = await _compute_embedding(
- openai,
- embedding_model,
- correct_answer_str,
- )
-
- _outputs = float(
- _compute_similarity(
- output_embedding,
- reference_embedding,
- )
- )
- # --------------------------------------------------------------------------
-
- if isinstance(_outputs, (int, float)):
- return {"score": _outputs, "success": _outputs >= threshold}
-
- raise SemanticSimilarityV0Error(
- message=f"semantic-similarity error: got ({type(_outputs)}) {_outputs}, expected (int, float)."
- )
-
-
-class SinglePromptConfig(BaseModel):
- prompt: PromptTemplate = Field(
- default=PromptTemplate(
- system_prompt="You are an expert in geography",
- user_prompt="What is the capital of {{country}}?",
- )
- )
-
-
-@instrument()
-async def completion_v0(
- parameters: Data,
- inputs: Dict[str, str],
-) -> Any:
- if parameters is None or not isinstance(parameters, dict):
- raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
-
- if not "prompt" in parameters:
- raise MissingConfigurationParameterV0Error(path="prompt")
-
- params: Dict[str, Any] = {**(parameters or {})}
-
- config = SinglePromptConfig(**params)
- if config.prompt.input_keys is not None:
- required_keys = set(config.prompt.input_keys)
- provided_keys = set(inputs.keys())
-
- if required_keys != provided_keys:
- raise InvalidInputsV0Error(
- expected=sorted(required_keys),
- got=sorted(provided_keys),
- )
-
- await SecretsManager.ensure_secrets_in_workflow()
-
- provider_settings = SecretsManager.get_provider_settings_from_workflow(
- config.prompt.llm_config.model
- )
-
- if not provider_settings:
- raise InvalidSecretsV0Error(expected="dict", got=provider_settings)
-
- with mockllm.user_aws_credentials_from(provider_settings):
- response = await mockllm.acompletion(
- **{
- k: v
- for k, v in config.prompt.format(**inputs).to_openai_kwargs().items()
- if k != "model"
- },
- **provider_settings,
- )
-
- message = response.choices[0].message # type: ignore
-
- if message.content is not None:
- return message.content
- if hasattr(message, "refusal") and message.refusal is not None: # type: ignore
- return message.refusal # type: ignore
- if hasattr(message, "parsed") and message.parsed is not None: # type: ignore
- return message.parsed # type: ignore
- if hasattr(message, "tool_calls") and message.tool_calls is not None:
- return [tool_call.dict() for tool_call in message.tool_calls]
-
-
-@instrument()
-async def chat_v0(
- parameters: Data,
- inputs: Optional[Dict[str, str]] = None,
- messages: Optional[List[Message]] = None,
-):
- params: Dict[str, Any] = {**(parameters or {})}
-
- config = SinglePromptConfig(**params)
- if config.prompt.input_keys is not None:
- required_keys = set(config.prompt.input_keys)
- provided_keys = set(inputs.keys()) if inputs is not None else set()
-
- if required_keys != provided_keys:
- raise InvalidInputsV0Error(
- expected=sorted(required_keys),
- got=sorted(provided_keys),
- )
-
- if inputs is not None:
- formatted_prompt = config.prompt.format(**inputs)
- else:
- formatted_prompt = config.prompt
- openai_kwargs = formatted_prompt.to_openai_kwargs()
-
- if messages is not None:
- openai_kwargs["messages"].extend(messages)
-
- await SecretsManager.ensure_secrets_in_workflow()
-
- provider_settings = SecretsManager.get_provider_settings_from_workflow(
- config.prompt.llm_config.model
- )
-
- if not provider_settings:
- raise InvalidSecretsV0Error(expected="dict", got=provider_settings)
-
- with mockllm.user_aws_credentials_from(provider_settings):
- response = await mockllm.acompletion(
- **{
- k: v for k, v in openai_kwargs.items() if k != "model"
- }, # we should use the model_name from provider_settings
- **provider_settings,
- )
-
- return response.choices[0].message.model_dump(exclude_none=True) # type: ignore
diff --git a/sdk/agenta/sdk/workflows/interfaces.py b/sdk/agenta/sdk/workflows/interfaces.py
deleted file mode 100644
index 85334ab6cb..0000000000
--- a/sdk/agenta/sdk/workflows/interfaces.py
+++ /dev/null
@@ -1,948 +0,0 @@
-from agenta.sdk.models.workflows import WorkflowServiceInterface
-
-echo_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:echo:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Echo Parameters",
- "description": "No configuration parameters required.",
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Echo Input",
- "description": "Arbitrary input to be echoed back.",
- },
- outputs={
- "type": "object",
- "title": "Echo Output",
- "description": "The echoed response object.",
- "properties": {
- "got": {
- "type": "string",
- "title": "Echoed Value",
- "description": "The input value passed back unchanged.",
- }
- },
- "required": ["got"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_exact_match_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_exact_match:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Exact Match Parameters",
- "description": "Configuration for the Exact Match evaluator.",
- "properties": {
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "The name of the column in the test data that contains the correct answer.",
- "default": "correct_answer",
- }
- },
- "required": ["correct_answer_key"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Exact Match Inputs",
- "description": "Testcase data including the correct answer.",
- },
- outputs={
- "type": "object",
- "title": "Exact Match Outputs",
- "description": "Result indicating whether the output exactly matched the expected answer.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if the outputs exactly matched, False otherwise.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_regex_test_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_regex_test:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Regex Test Parameters",
- "description": "Settings for evaluating whether output matches a regex pattern.",
- "properties": {
- "regex_pattern": {
- "type": "string",
- "title": "Regex Pattern",
- "description": "Pattern for regex testing (e.g., ^this_word\\d{3}$).",
- "default": "",
- },
- "regex_should_match": {
- "type": "boolean",
- "title": "Match or Mismatch",
- "description": "If True, regex must match; if False, regex must not match.",
- "default": True,
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, regex matching is case-sensitive.",
- "default": True,
- },
- },
- "required": ["regex_pattern"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Regex Test Inputs",
- "description": "Output from the workflow execution to be tested against the regex.",
- },
- outputs={
- "type": "object",
- "title": "Regex Test Outputs",
- "description": "Result indicating whether regex matched as configured.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if regex condition passed, False otherwise.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-field_match_test_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:field_match_test:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Field Match Parameters",
- "description": "Settings for comparing a specific JSON field against the expected answer.",
- "properties": {
- "json_field": {
- "type": "string",
- "title": "JSON Field",
- "description": "The field in the JSON output to evaluate.",
- "default": "",
- },
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- },
- "required": ["json_field", "correct_answer_key"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Field Match Inputs",
- "description": "Testcase data including the correct answer.",
- },
- outputs={
- "type": "object",
- "title": "Field Match Outputs",
- "description": "Result indicating whether the selected field matched the expected answer.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if the JSON field matched the expected answer.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_webhook_test_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_webhook_test:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Webhook Test Parameters",
- "description": "Settings for sending evaluation requests to a webhook service.",
- "properties": {
- "webhook_url": {
- "type": "string",
- "format": "uri",
- "title": "Webhook URL",
- "description": "The endpoint that will receive the evaluation payload.",
- },
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Score threshold to determine success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- },
- "required": ["webhook_url"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Webhook Test Inputs",
- "description": "Payload including inputs, output, and correct answer sent to the webhook.",
- },
- outputs={
- "type": "object",
- "title": "Webhook Test Outputs",
- "description": "Score and success flag returned by the webhook evaluation.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Numeric evaluation score returned by the webhook.",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if the score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_custom_code_run_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_custom_code_run:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Custom Code Evaluation Parameters",
- "description": "Settings for running custom Python code to evaluate workflow outputs.",
- "properties": {
- "code": {
- "type": "string",
- "title": "Evaluation Code",
- "description": "Python code snippet that will be executed to evaluate the output.",
- "default": (
- "from typing import Dict, Union, Any\n\n"
- "def evaluate(\n"
- " app_params: Dict[str, str],\n"
- " inputs: Dict[str, str],\n"
- " output: Union[str, Dict[str, Any]],\n"
- " correct_answer: str\n"
- ") -> float:\n"
- " if output in correct_answer:\n"
- " return 1.0\n"
- " return 0.0\n"
- ),
- },
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in the test data containing the correct answer.",
- "default": "correct_answer",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Score threshold used to determine success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- },
- "required": ["code"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Custom Code Evaluation Inputs",
- "description": "Testcase data and workflow outputs available to the custom code.",
- },
- outputs={
- "type": "object",
- "title": "Custom Code Evaluation Outputs",
- "description": "Score and success flag returned by the custom evaluation code.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Numeric score computed by the custom code.",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_ai_critique_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_ai_critique:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "LLM-as-a-Judge Parameters",
- "description": "Configuration for using an AI model to critique workflow outputs.",
- "properties": {
- "prompt_template": {
- "type": "array",
- "title": "Prompt Template",
- "description": "Template messages used by the LLM to evaluate outputs.",
- "items": {"type": "object"},
- },
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- "model": {
- "type": "string",
- "title": "Model",
- "description": "The LLM model to use for evaluation.",
- "default": "gpt-3.5-turbo",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Score threshold to determine success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- "version": {
- "type": "string",
- "title": "Evaluator Version",
- "description": "Internal evaluator version identifier.",
- "default": "3",
- },
- },
- "required": ["prompt_template"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "AI Critique Inputs",
- "description": "Testcase data and workflow outputs provided to the LLM judge.",
- },
- outputs={
- "type": "object",
- "title": "AI Critique Outputs",
- "description": "Score and success flag returned by the AI critique evaluator.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Numeric evaluation score assigned by the AI.",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if the score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_starts_with_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_starts_with:v0",
- schemas=dict( # type: ignore # type: ignore
- parameters={
- "type": "object",
- "title": "Starts With Parameters",
- "description": "Configuration for checking if output starts with a specific prefix.",
- "properties": {
- "prefix": {
- "type": "string",
- "title": "Prefix",
- "description": "The string to match at the start of the output.",
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, matching is case-sensitive.",
- "default": True,
- },
- },
- "required": ["prefix"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Starts With Inputs",
- "description": "Workflow output to be checked against the prefix.",
- },
- outputs={
- "type": "object",
- "title": "Starts With Outputs",
- "description": "Result of the prefix check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if output starts with the given prefix.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_ends_with_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_ends_with:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Ends With Parameters",
- "description": "Configuration for checking if output ends with a specific suffix.",
- "properties": {
- "suffix": {
- "type": "string",
- "title": "Suffix",
- "description": "The string to match at the end of the output.",
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, matching is case-sensitive.",
- "default": True,
- },
- },
- "required": ["suffix"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Ends With Inputs",
- "description": "Workflow output to be checked against the suffix.",
- },
- outputs={
- "type": "object",
- "title": "Ends With Outputs",
- "description": "Result of the suffix check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if output ends with the given suffix.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_contains_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_contains:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Contains Parameters",
- "description": "Configuration for checking if output contains a given substring.",
- "properties": {
- "substring": {
- "type": "string",
- "title": "Substring",
- "description": "The string to check for in the output.",
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, substring search is case-sensitive.",
- "default": True,
- },
- },
- "required": ["substring"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Contains Inputs",
- "description": "Workflow output to be checked for substring presence.",
- },
- outputs={
- "type": "object",
- "title": "Contains Outputs",
- "description": "Result of the substring presence check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if substring is found in the output.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_contains_any_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_contains_any:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Contains Any Parameters",
- "description": "Configuration for checking if output contains any of the specified substrings.",
- "properties": {
- "substrings": {
- "type": "array",
- "title": "Substrings",
- "description": "List of substrings to check for. The evaluation passes if any substring is found.",
- "items": {"type": "string"},
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, substring checks are case-sensitive.",
- "default": True,
- },
- },
- "required": ["substrings"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Contains Any Inputs",
- "description": "Workflow output to be checked for substrings.",
- },
- outputs={
- "type": "object",
- "title": "Contains Any Outputs",
- "description": "Result of the 'contains any' substring check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if any substring is found in the output.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_contains_all_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_contains_all:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Contains All Parameters",
- "description": "Configuration for checking if output contains all of the specified substrings.",
- "properties": {
- "substrings": {
- "type": "array",
- "title": "Substrings",
- "description": "List of substrings to check for. The evaluation passes only if all substrings are found.",
- "items": {"type": "string"},
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, substring checks are case-sensitive.",
- "default": True,
- },
- },
- "required": ["substrings"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Contains All Inputs",
- "description": "Workflow output to be checked for substrings.",
- },
- outputs={
- "type": "object",
- "title": "Contains All Outputs",
- "description": "Result of the 'contains all' substring check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if all substrings are found in the output.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_contains_json_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_contains_json:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Contains JSON Parameters",
- "description": "No configuration parameters required.",
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Contains JSON Inputs",
- "description": "Workflow output to be checked for valid JSON content.",
- },
- outputs={
- "type": "object",
- "title": "Contains JSON Outputs",
- "description": "Result of the JSON validity check.",
- "properties": {
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if valid JSON content was found in the output.",
- }
- },
- "required": ["success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_json_diff_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_json_diff:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "JSON Diff Parameters",
- "description": "Settings for comparing predicted JSON output against ground truth JSON.",
- "properties": {
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct JSON answer.",
- "default": "correct_answer",
- },
- "compare_schema_only": {
- "type": "boolean",
- "title": "Compare Schema Only",
- "description": "If True, only keys and their types are compared; values are ignored.",
- "default": False,
- },
- "predict_keys": {
- "type": "boolean",
- "title": "Include Prediction Keys",
- "description": "If True, prediction keys not present in ground truth are ignored.",
- "default": False,
- },
- "case_insensitive_keys": {
- "type": "boolean",
- "title": "Case-Insensitive Keys",
- "description": "If True, key comparisons are case-insensitive.",
- "default": False,
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Minimum similarity score required for success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- },
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "JSON Diff Inputs",
- "description": "Workflow output and ground truth JSON to compare.",
- },
- outputs={
- "type": "object",
- "title": "JSON Diff Outputs",
- "description": "Score and success flag for the JSON comparison.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Similarity score between prediction and ground truth.",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_levenshtein_distance_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_levenshtein_distance:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Levenshtein Distance Parameters",
- "description": "Settings for computing normalized Levenshtein similarity between outputs and ground truth.",
- "properties": {
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Minimum similarity score required for success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, comparison is case-sensitive.",
- "default": True,
- },
- },
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Levenshtein Inputs",
- "description": "Workflow output and ground truth string to compare.",
- },
- outputs={
- "type": "object",
- "title": "Levenshtein Outputs",
- "description": "Score and success flag for the Levenshtein similarity comparison.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Normalized Levenshtein similarity score (0–1).",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_similarity_match_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_similarity_match:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Similarity Match Parameters",
- "description": "Settings for comparing predicted output against ground truth using string similarity.",
- "properties": {
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Minimum similarity score required for success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- "similarity_threshold": {
- "type": "number",
- "title": "Similarity Threshold (Alias)",
- "description": "Alternative field for threshold, retained for compatibility.",
- "minimum": 0,
- "maximum": 1,
- },
- "case_sensitive": {
- "type": "boolean",
- "title": "Case Sensitive",
- "description": "If True, similarity comparison is case-sensitive.",
- "default": True,
- },
- },
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Similarity Match Inputs",
- "description": "Workflow output and ground truth string to compare.",
- },
- outputs={
- "type": "object",
- "title": "Similarity Match Outputs",
- "description": "Score and success flag for the similarity comparison.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Similarity score (0–1).",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-auto_semantic_similarity_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:auto_semantic_similarity:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Semantic Similarity Parameters",
- "description": "Settings for semantic similarity using embeddings.",
- "properties": {
- "correct_answer_key": {
- "type": "string",
- "title": "Expected Answer Column",
- "description": "Column in test data containing the correct answer.",
- "default": "correct_answer",
- },
- "embedding_model": {
- "type": "string",
- "title": "Embedding Model",
- "description": "The model used to generate embeddings.",
- "default": "text-embedding-3-small",
- },
- "threshold": {
- "type": "number",
- "title": "Threshold",
- "description": "Minimum semantic similarity score required for success.",
- "minimum": 0,
- "maximum": 1,
- "default": 0.5,
- },
- },
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Semantic Similarity Inputs",
- "description": "Workflow output and ground truth string to embed and compare.",
- },
- outputs={
- "type": "object",
- "title": "Semantic Similarity Outputs",
- "description": "Score and success flag for the semantic similarity comparison.",
- "properties": {
- "score": {
- "type": "number",
- "title": "Score",
- "description": "Cosine similarity score between output and ground truth embeddings.",
- },
- "success": {
- "type": "boolean",
- "title": "Success",
- "description": "True if score meets or exceeds the threshold.",
- },
- },
- "required": ["score", "success"],
- "additionalProperties": False,
- },
- ),
-)
-
-completion_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:completion:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Completion App Parameters",
- "description": "Configuration for running a completion workflow.",
- "properties": {
- "prompt": {
- "type": "object",
- "title": "Prompt Template",
- "description": "Prompt template configuration including system and user prompts.",
- }
- },
- "required": ["prompt"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Completion App Inputs",
- "description": "Inputs required by the completion workflow, matching the prompt template’s input keys.",
- },
- outputs={
- "type": ["string", "object", "array"],
- "title": "Completion App Outputs",
- "description": "Generated response, which may be text or structured data.",
- },
- ),
-)
-
-chat_v0_interface = WorkflowServiceInterface(
- uri="agenta:built-in:chat:v0",
- schemas=dict( # type: ignore
- parameters={
- "type": "object",
- "title": "Chat App Parameters",
- "description": "Configuration for running a chat-based workflow.",
- "properties": {
- "prompt": {
- "type": "object",
- "title": "Prompt Template",
- "description": "Prompt template configuration for initializing the chat.",
- }
- },
- "required": ["prompt"],
- "additionalProperties": False,
- },
- inputs={
- "type": "object",
- "title": "Chat App Inputs",
- "description": "Optional inputs provided to format the prompt.",
- },
- outputs={
- "type": "object",
- "title": "Chat App Outputs",
- "description": "Final chat message returned by the workflow.",
- "properties": {
- "role": {
- "type": "string",
- "description": "Role of the message sender.",
- },
- "content": {"type": "string", "description": "Content of the message."},
- },
- "required": ["role", "content"],
- "additionalProperties": True, # allows OpenAI-style message fields like tool_calls
- },
- ),
-)
diff --git a/sdk/agenta/sdk/engines/running/registry.py b/sdk/agenta/sdk/workflows/registry.py
similarity index 100%
rename from sdk/agenta/sdk/engines/running/registry.py
rename to sdk/agenta/sdk/workflows/registry.py
diff --git a/sdk/agenta/sdk/workflows/sandbox.py b/sdk/agenta/sdk/workflows/sandbox.py
deleted file mode 100644
index fc9f5386e2..0000000000
--- a/sdk/agenta/sdk/workflows/sandbox.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from typing import Union, Text, Dict, Any
-
-from RestrictedPython import safe_builtins, compile_restricted, utility_builtins
-from RestrictedPython.Eval import (
- default_guarded_getiter,
- default_guarded_getitem,
-)
-from RestrictedPython.Guards import (
- guarded_iter_unpack_sequence,
- full_write_guard,
-)
-
-
-def is_import_safe(python_code: Text) -> bool:
- """Checks if the imports in the python code contains a system-level import.
-
- Args:
- python_code (str): The Python code to be executed
-
- Returns:
- bool - module is secured or not
- """
-
- disallowed_imports = ["os", "subprocess", "threading", "multiprocessing"]
- for import_ in disallowed_imports:
- if import_ in python_code:
- return False
- return True
-
-
-def execute_code_safely(
- app_params: Dict[str, Any],
- inputs: Dict[str, Any],
- output: Union[dict, str],
- correct_answer: Any, # for backward compatibility reasons
- code: Text,
-) -> Union[float, None]:
- """
- Execute the provided Python code safely using RestrictedPython.
-
- Args:
- - app_params (Dict[str, str]): The parameters of the app variant.
- - inputs (dict): Inputs to be used during code execution.
- - output (str): The output of the app variant after being called.
- - correct_answer (str): The correct answer (or target) of the app variant.
- - code (Text): The Python code to be executed.
- - datapoint (Dict[str, str]): The test datapoint.
-
- Returns:
- - (float): Result of the execution if successful. Should be between 0 and 1.
- - None if execution fails or result is not a float between 0 and 1.
- """
- # Define the available built-ins
- local_builtins = safe_builtins.copy()
-
- # Add the __import__ built-in function to the local builtins
- local_builtins["__import__"] = __import__
-
- # Define supported packages
- allowed_imports = [
- "math",
- "random",
- "datetime",
- "json",
- "requests",
- "typing",
- ]
-
- # Create a dictionary to simulate allowed imports
- allowed_modules = {}
- for package_name in allowed_imports:
- allowed_modules[package_name] = __import__(package_name)
-
- # Add the allowed modules to the local built-ins
- local_builtins.update(allowed_modules)
- local_builtins.update(utility_builtins)
-
- # Define the environment for the code execution
- environment = {
- "_getiter_": default_guarded_getiter,
- "_getitem_": default_guarded_getitem,
- "_iter_unpack_sequence_": guarded_iter_unpack_sequence,
- "_write_": full_write_guard,
- "__builtins__": local_builtins,
- }
-
- # Compile the code in a restricted environment
- byte_code = compile_restricted(code, filename="
", mode="exec")
-
- # Call the evaluation function, extract the result if it exists
- # and is a float between 0 and 1
- try:
- # Execute the code
- exec(byte_code, environment)
-
- # Call the evaluation function, extract the result
- result = environment["evaluate"](app_params, inputs, output, correct_answer)
-
- # Attempt to convert result to float
- if isinstance(result, (float, int, str)):
- try:
- result = float(result)
- except ValueError as e:
- raise ValueError(f"Result cannot be converted to float: {e}")
-
- if not isinstance(result, float):
- raise TypeError(f"Result is not a float after conversion: {type(result)}")
-
- return result
-
- except KeyError as e:
- raise KeyError(f"Missing expected key in environment: {e}")
-
- except SyntaxError as e:
- raise SyntaxError(f"Syntax error in provided code: {e}")
-
- except Exception as e:
- raise RuntimeError(f"Error during code execution: {e}")
diff --git a/sdk/agenta/sdk/workflows/types.py b/sdk/agenta/sdk/workflows/types.py
new file mode 100644
index 0000000000..4666296320
--- /dev/null
+++ b/sdk/agenta/sdk/workflows/types.py
@@ -0,0 +1,472 @@
+# - oss.src.core.shared.dtos ---------------------------------------------------
+
+
+from typing import Optional, Dict, List, Union
+from uuid import UUID
+from datetime import datetime
+
+from typing_extensions import TypeAliasType
+from pydantic import BaseModel
+
+
+BoolJson: TypeAliasType = TypeAliasType(
+ "BoolJson",
+ Union[bool, Dict[str, "BoolJson"]],
+)
+
+StringJson: TypeAliasType = TypeAliasType(
+ "StringJson",
+ Union[str, Dict[str, "StringJson"]],
+)
+
+FullJson: TypeAliasType = TypeAliasType(
+ "FullJson",
+ Union[str, int, float, bool, None, Dict[str, "FullJson"], List["FullJson"]],
+)
+
+NumericJson: TypeAliasType = TypeAliasType(
+ "NumericJson",
+ Union[int, float, Dict[str, "NumericJson"]],
+)
+
+NoListJson: TypeAliasType = TypeAliasType(
+ "NoListJson",
+ Union[str, int, float, bool, None, Dict[str, "NoListJson"]],
+)
+
+LabelJson: TypeAliasType = TypeAliasType(
+ "LabelJson",
+ Union[bool, str, Dict[str, "LabelJson"]],
+)
+
+Json = Dict[str, FullJson]
+
+Data = Dict[str, FullJson]
+
+Meta = Dict[str, FullJson]
+
+Tags = Dict[str, LabelJson]
+
+Flags = Dict[str, LabelJson]
+
+Hashes = Dict[str, StringJson]
+
+Metrics = Dict[str, NumericJson]
+
+
+class Metadata(BaseModel):
+ flags: Optional[Flags] = None
+ meta: Optional[Meta] = None
+ tags: Optional[Tags] = None
+
+
+class Lifecycle(BaseModel):
+ created_at: Optional[datetime] = None
+ updated_at: Optional[datetime] = None
+ deleted_at: Optional[datetime] = None
+
+ created_by_id: Optional[UUID] = None
+ updated_by_id: Optional[UUID] = None
+ deleted_by_id: Optional[UUID] = None
+
+
+class TraceID(BaseModel):
+ trace_id: Optional[str] = None
+
+
+class SpanID(BaseModel):
+ span_id: Optional[str] = None
+
+
+class Identifier(BaseModel):
+ id: Optional[UUID] = None
+
+
+class Slug(BaseModel):
+ slug: Optional[str] = None
+
+
+class Version(BaseModel):
+ version: Optional[str] = None
+
+
+class Header(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+
+
+class Reference(Identifier, Slug, Version):
+ pass
+
+
+class Link(TraceID, SpanID):
+ pass
+
+
+def sync_alias(primary: str, alias: str, instance: BaseModel) -> None:
+ primary_val = getattr(instance, primary)
+ alias_val = getattr(instance, alias)
+
+ if primary_val and alias_val is None:
+ object.__setattr__(instance, alias, primary_val)
+ elif alias_val and primary_val is None:
+ object.__setattr__(instance, primary, alias_val)
+
+
+class AliasConfig(BaseModel):
+ model_config = {
+ "populate_by_name": True,
+ "from_attributes": True,
+ }
+
+
+class Status(BaseModel):
+ code: Optional[int] = 500
+ message: Optional[str] = "Please try again later."
+
+
+Mappings = Dict[str, str]
+
+Schema = Dict[str, FullJson]
+
+
+# ------------------------------------------------------------------------------
+
+# - oss.src.core.git.dtos ------------------------------------------------------
+
+
+from typing import Optional, List
+from uuid import UUID
+from datetime import datetime
+
+from pydantic import BaseModel
+
+
+class Commit(BaseModel):
+ author: Optional[UUID] = None
+ date: Optional[datetime] = None
+ message: Optional[str] = None
+
+
+class Revision(Identifier, Slug, Version, Lifecycle, Header, Metadata, Commit):
+ data: Optional[Data] = None
+
+ artifact_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = None
+
+
+# ------------------------------------------------------------------------------
+
+# - oss.src.core.tracing.dtos --------------------------------------------------
+
+import random
+import string
+from enum import Enum
+from datetime import datetime, timezone
+from typing import List, Dict, Any, Union, Optional
+
+from pydantic import BaseModel, model_validator, Field
+
+
+class TraceType(Enum):
+ INVOCATION = "invocation"
+ ANNOTATION = "annotation"
+ UNKNOWN = "unknown"
+
+
+class SpanType(Enum):
+ AGENT = "agent"
+ CHAIN = "chain"
+ WORKFLOW = "workflow"
+ TASK = "task"
+ TOOL = "tool"
+ EMBEDDING = "embedding"
+ QUERY = "query"
+ LLM = "llm"
+ COMPLETION = "completion"
+ CHAT = "chat"
+ RERANK = "rerank"
+ UNKNOWN = "unknown"
+
+
+class AgMetricEntryAttributes(BaseModel):
+ cumulative: Optional[Metrics] = None
+ incremental: Optional[Metrics] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgMetricsAttributes(BaseModel):
+ duration: Optional[AgMetricEntryAttributes] = None
+ errors: Optional[AgMetricEntryAttributes] = None
+ tokens: Optional[AgMetricEntryAttributes] = None
+ costs: Optional[AgMetricEntryAttributes] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgTypeAttributes(BaseModel):
+ trace: Optional[TraceType] = TraceType.INVOCATION
+ span: Optional[SpanType] = SpanType.TASK
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgDataAttributes(BaseModel):
+ inputs: Optional[Dict[str, Any]] = None
+ outputs: Optional[Any] = None
+ internals: Optional[Dict[str, Any]] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgAttributes(BaseModel):
+ type: AgTypeAttributes = Field(default_factory=AgTypeAttributes)
+ data: AgDataAttributes = Field(default_factory=AgDataAttributes)
+
+ metrics: Optional[AgMetricsAttributes] = None
+ flags: Optional[Flags] = None
+ tags: Optional[Tags] = None
+ meta: Optional[Meta] = None
+ exception: Optional[Data] = None
+ references: Optional[Dict[str, "OTelReference"]] = None
+ unsupported: Optional[Data] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class OTelStatusCode(Enum):
+ STATUS_CODE_UNSET = "STATUS_CODE_UNSET"
+ STATUS_CODE_OK = "STATUS_CODE_OK"
+ STATUS_CODE_ERROR = "STATUS_CODE_ERROR"
+
+
+class OTelSpanKind(Enum):
+ SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED"
+ SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL"
+ SPAN_KIND_SERVER = "SPAN_KIND_SERVER"
+ SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT"
+ SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER"
+ SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER"
+
+
+OTelAttributes = Json
+OTelMetrics = Metrics
+OTelTags = Tags
+
+
+class OTelEvent(BaseModel):
+ name: str
+ timestamp: Union[datetime, int]
+
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelEvents = List[OTelEvent]
+
+
+class OTelHash(Identifier):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelHashes = List[OTelHash]
+
+
+class OTelLink(TraceID, SpanID):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelLinks = List[OTelLink]
+
+
+class OTelReference(Reference):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelReferences = List[OTelReference]
+
+
+class OTelSpansTree(BaseModel):
+ spans: Optional["OTelNestedSpans"] = None
+
+
+OTelSpansTrees = List[OTelSpansTree]
+
+
+class OTelFlatSpan(Lifecycle):
+ trace_id: str
+ span_id: str
+ parent_id: Optional[str] = None
+
+ trace_type: Optional[TraceType] = None
+ span_type: Optional[SpanType] = None
+
+ span_kind: Optional[OTelSpanKind] = None
+ span_name: Optional[str] = None
+
+ start_time: Optional[Union[datetime, int]] = None
+ end_time: Optional[Union[datetime, int]] = None
+
+ status_code: Optional[OTelStatusCode] = None
+ status_message: Optional[str] = None
+
+ attributes: Optional[OTelAttributes] = None
+ references: Optional[OTelReferences] = None
+ links: Optional[OTelLinks] = None
+ hashes: Optional[OTelHashes] = None
+
+ exception: Optional[Data] = None
+
+ events: Optional[OTelEvents] = None
+
+ @model_validator(mode="after")
+ def set_defaults(self):
+ if self.trace_type is None:
+ self.trace_type = TraceType.INVOCATION
+ if self.span_type is None:
+ self.span_type = SpanType.TASK
+ if self.span_kind is None:
+ self.span_kind = OTelSpanKind.SPAN_KIND_UNSPECIFIED
+ if self.status_code is None:
+ self.status_code = OTelStatusCode.STATUS_CODE_UNSET
+ if self.end_time is None and self.start_time is not None:
+ self.end_time = self.start_time
+ if self.start_time is None and self.end_time is not None:
+ self.start_time = self.end_time
+ if self.start_time is None and self.end_time is None:
+ now = datetime.now(timezone.utc)
+ self.start_time = now
+ self.end_time = now
+ if self.span_name is None:
+ self.span_name = "".join(
+ random.choices(string.ascii_letters + string.digits, k=8)
+ )
+ return self
+
+
+class OTelSpan(OTelFlatSpan, OTelSpansTree):
+ pass
+
+
+OTelFlatSpans = List[OTelFlatSpan]
+OTelNestedSpans = Dict[str, Union[OTelSpan, List[OTelSpan]]]
+OTelTraceTree = Dict[str, OTelSpansTree]
+OTelTraceTrees = List[OTelTraceTree]
+OTelSpans = List[OTelSpan]
+
+
+Attributes = OTelAttributes
+Trace = OTelTraceTree
+
+
+# ------------------------------------------------------------------------------
+
+# - oss.src.core.workflows.dtos ------------------------------------------------
+
+
+from typing import Optional, Dict
+from uuid import UUID
+
+from pydantic import BaseModel, Field, model_validator
+
+
+class WorkflowIdAlias(AliasConfig):
+ workflow_id: Optional[UUID] = None
+ artifact_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_id",
+ )
+
+
+class WorkflowVariantIdAlias(AliasConfig):
+ workflow_variant_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_variant_id",
+ )
+
+
+class WorkflowFlags(BaseModel):
+ is_custom: Optional[bool] = None
+ is_evaluator: Optional[bool] = None
+ is_human: Optional[bool] = None
+
+
+class WorkflowServiceVersion(BaseModel):
+ version: Optional[str] = None
+
+
+class WorkflowServiceInterface(WorkflowServiceVersion):
+ uri: Optional[str] = None # str (Enum) w/ validation
+ url: Optional[str] = None # str w/ validation
+ headers: Optional[
+ Dict[str, Union[Reference, str]] # either hardcoded or a secret
+ ] = None
+
+ schemas: Optional[Schema] = None # json-schema instead of pydantic
+ mappings: Optional[Mappings] = None # used in the workflow interface
+
+
+class WorkflowServiceConfiguration(WorkflowServiceInterface):
+ script: Optional[str] = None # str w/ validation
+ parameters: Optional[Data] = None # configuration values
+
+
+class WorkflowRevisionData(WorkflowServiceConfiguration):
+ pass
+
+
+class WorkflowRevision(
+ Revision,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ data: Optional[WorkflowRevisionData] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+
+
+class WorkflowServiceData(BaseModel):
+ inputs: Optional[Data] = None
+ outputs: Optional[Union[str, Data]] = None
+ trace: Optional[Trace] = None
+ trace_outputs: Optional[Union[str, Data]] = None
+ traces: Optional[Dict[str, Trace]] = None
+ traces_outputs: Optional[Dict[str, Union[str, Data]]] = None
+
+
+class WorkflowServiceRequest(Version, Metadata):
+ data: Optional[WorkflowServiceData] = None
+
+ path: Optional[str] = "/"
+ method: Optional[str] = "invoke"
+
+ references: Optional[Dict[str, Reference]] = None
+ links: Optional[Dict[str, Link]] = None
+
+ # secrets: Optional[Dict[str, Secret]] = None
+ credentials: Optional[str] = None
+
+
+class WorkflowServiceResponse(Identifier, Version):
+ data: Optional[WorkflowServiceData] = None
+
+ links: Optional[Dict[str, Link]] = None
+
+ status: Optional[Status] = None # = Status()
+
+
+# ------------------------------------------------------------------------------
+
+from typing import Callable, Awaitable
+
+WorkflowServiceHandler = Callable[
+ [WorkflowServiceRequest, WorkflowRevision],
+ Awaitable[WorkflowServiceResponse],
+]
diff --git a/sdk/agenta/sdk/workflows/utils.py b/sdk/agenta/sdk/workflows/utils.py
index d86f499da4..6f51847449 100644
--- a/sdk/agenta/sdk/workflows/utils.py
+++ b/sdk/agenta/sdk/workflows/utils.py
@@ -1,314 +1,17 @@
-# /agenta/sdk/workflows/utils.py
+from typing import Optional, Tuple
-from typing import Optional, Tuple, Callable
-from agenta.sdk.models.workflows import WorkflowServiceInterface
-
-from agenta.sdk.workflows.handlers import (
- echo_v0,
- auto_exact_match_v0,
- auto_regex_test_v0,
- field_match_test_v0,
- auto_webhook_test_v0,
- auto_custom_code_run_v0,
- auto_ai_critique_v0,
- auto_starts_with_v0,
- auto_ends_with_v0,
- auto_contains_v0,
- auto_contains_any_v0,
- auto_contains_all_v0,
- auto_contains_json_v0,
- auto_json_diff_v0,
- auto_levenshtein_distance_v0,
- auto_similarity_match_v0,
- auto_semantic_similarity_v0,
- completion_v0,
- chat_v0,
-)
-
-from agenta.sdk.workflows.interfaces import (
- echo_v0_interface,
- auto_exact_match_v0_interface,
- auto_regex_test_v0_interface,
- field_match_test_v0_interface,
- auto_webhook_test_v0_interface,
- auto_custom_code_run_v0_interface,
- auto_ai_critique_v0_interface,
- auto_starts_with_v0_interface,
- auto_ends_with_v0_interface,
- auto_contains_v0_interface,
- auto_contains_any_v0_interface,
- auto_contains_all_v0_interface,
- auto_contains_json_v0_interface,
- auto_json_diff_v0_interface,
- auto_levenshtein_distance_v0_interface,
- auto_similarity_match_v0_interface,
- auto_semantic_similarity_v0_interface,
- completion_v0_interface,
- chat_v0_interface,
-)
-
-
-from agenta.sdk.workflows.configurations import (
- echo_v0_configuration,
- auto_exact_match_v0_configuration,
- auto_regex_test_v0_configuration,
- field_match_test_v0_configuration,
- auto_webhook_test_v0_configuration,
- auto_custom_code_run_v0_configuration,
- auto_ai_critique_v0_configuration,
- auto_starts_with_v0_configuration,
- auto_ends_with_v0_configuration,
- auto_contains_v0_configuration,
- auto_contains_any_v0_configuration,
- auto_contains_all_v0_configuration,
- auto_contains_json_v0_configuration,
- auto_json_diff_v0_configuration,
- auto_levenshtein_distance_v0_configuration,
- auto_similarity_match_v0_configuration,
- auto_semantic_similarity_v0_configuration,
- completion_v0_configuration,
- chat_v0_configuration,
-)
-
-INTERFACE_REGISTRY: dict = dict(
- agenta={
- "built-in": dict(
- echo=dict(v0=echo_v0_interface),
- auto_exact_match=dict(v0=auto_exact_match_v0_interface),
- auto_regex_test=dict(v0=auto_regex_test_v0_interface),
- field_match_test=dict(v0=field_match_test_v0_interface),
- auto_webhook_test=dict(v0=auto_webhook_test_v0_interface),
- auto_custom_code_run=dict(v0=auto_custom_code_run_v0_interface),
- auto_ai_critique=dict(v0=auto_ai_critique_v0_interface),
- auto_starts_with=dict(v0=auto_starts_with_v0_interface),
- auto_ends_with=dict(v0=auto_ends_with_v0_interface),
- auto_contains=dict(v0=auto_contains_v0_interface),
- auto_contains_any=dict(v0=auto_contains_any_v0_interface),
- auto_contains_all=dict(v0=auto_contains_all_v0_interface),
- auto_contains_json=dict(v0=auto_contains_json_v0_interface),
- auto_json_diff=dict(v0=auto_json_diff_v0_interface),
- auto_levenshtein_distance=dict(v0=auto_levenshtein_distance_v0_interface),
- auto_similarity_match=dict(v0=auto_similarity_match_v0_interface),
- auto_semantic_similarity=dict(v0=auto_semantic_similarity_v0_interface),
- completion=dict(v0=completion_v0_interface),
- chat=dict(v0=chat_v0_interface),
- ),
- },
-)
-
-CONFIGURATION_REGISTRY: dict = dict(
- agenta={
- "built-in": dict(
- echo=dict(v0=echo_v0_configuration),
- auto_exact_match=dict(v0=auto_exact_match_v0_configuration),
- auto_regex_test=dict(v0=auto_regex_test_v0_configuration),
- field_match_test=dict(v0=field_match_test_v0_configuration),
- auto_webhook_test=dict(v0=auto_webhook_test_v0_configuration),
- auto_custom_code_run=dict(v0=auto_custom_code_run_v0_configuration),
- auto_ai_critique=dict(v0=auto_ai_critique_v0_configuration),
- auto_starts_with=dict(v0=auto_starts_with_v0_configuration),
- auto_ends_with=dict(v0=auto_ends_with_v0_configuration),
- auto_contains=dict(v0=auto_contains_v0_configuration),
- auto_contains_any=dict(v0=auto_contains_any_v0_configuration),
- auto_contains_all=dict(v0=auto_contains_all_v0_configuration),
- auto_contains_json=dict(v0=auto_contains_json_v0_configuration),
- auto_json_diff=dict(v0=auto_json_diff_v0_configuration),
- auto_levenshtein_distance=dict(
- v0=auto_levenshtein_distance_v0_configuration
- ),
- auto_similarity_match=dict(v0=auto_similarity_match_v0_configuration),
- auto_semantic_similarity=dict(v0=auto_semantic_similarity_v0_configuration),
- completion=dict(v0=completion_v0_configuration),
- chat=dict(v0=chat_v0_configuration),
- ),
- },
-)
-
-# Global registry for workflow handlers organized by URI structure.
-#
-# URI Format: provider:kind:key:version
-#
-# Structure:
-# HANDLER_REGISTRY[provider][kind][key][version] = handler_callable
-#
-# Components:
-# - provider: The source/namespace of the handler (e.g., "agenta", "user")
-# - kind: The category/type of handler (e.g., "built-in", "custom")
-# - key: The unique identifier for the handler (e.g., "echo", "auto_exact_match", "module.function_name")
-# - version: The version identifier (e.g., "v0", "v1", "latest")
-#
-# Examples:
-# - URI: "agenta:built-in:echo:v0"
-# Access: HANDLER_REGISTRY["agenta"]["built-in"]["echo"]["v0"]
-#
-# - URI: "user:custom:mymodule.my_workflow:latest"
-# Access: HANDLER_REGISTRY["user"]["custom"]["mymodule.my_workflow"]["latest"]
-#
-# Usage:
-# - register_handler(fn, uri) - Registers a new handler with the given URI
-# - retrieve_handler(uri) - Retrieves a handler by its URI
-# - retrieve_interface(uri) - Retrieves the interface configuration for a handler
-# - retrieve_configuration(uri) - Retrieves default parameters for a handler
-#
-# The registry supports automatic URI generation for user-defined workflows:
-# If no URI is provided, register_handler() generates: "user:custom:{module}.{name}:latest"
-HANDLER_REGISTRY: dict = dict(
- agenta={
- "built-in": dict(
- echo=dict(v0=echo_v0),
- auto_exact_match=dict(v0=auto_exact_match_v0),
- auto_regex_test=dict(v0=auto_regex_test_v0),
- field_match_test=dict(v0=field_match_test_v0),
- auto_webhook_test=dict(v0=auto_webhook_test_v0),
- auto_custom_code_run=dict(v0=auto_custom_code_run_v0),
- auto_ai_critique=dict(v0=auto_ai_critique_v0),
- auto_starts_with=dict(v0=auto_starts_with_v0),
- auto_ends_with=dict(v0=auto_ends_with_v0),
- auto_contains=dict(v0=auto_contains_v0),
- auto_contains_any=dict(v0=auto_contains_any_v0),
- auto_contains_all=dict(v0=auto_contains_all_v0),
- auto_contains_json=dict(v0=auto_contains_json_v0),
- auto_json_diff=dict(v0=auto_json_diff_v0),
- auto_levenshtein_distance=dict(v0=auto_levenshtein_distance_v0),
- auto_similarity_match=dict(v0=auto_similarity_match_v0),
- auto_semantic_similarity=dict(v0=auto_semantic_similarity_v0),
- completion=dict(v0=completion_v0),
- chat=dict(v0=chat_v0),
- ),
- },
-)
-
-
-def parse_uri(
+async def parse_service_uri(
uri: str,
) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
if not uri or not uri.strip():
return None, None, None, None
+ # uri ~ [|empty]:::[|'latest'|empty]
+
parts = uri.split(":")
- # 1 → key
- # 2 → kind:key
- # 3 → provider:kind:key
- # 4 → provider:kind:key:version
- if len(parts) == 1:
- provider, kind, key, version = "agenta", "built-in", parts[0], "latest"
- elif len(parts) == 2:
- provider, kind, key, version = "agenta", parts[0], parts[1], "latest"
- elif len(parts) == 3:
- provider, kind, key, version = parts[0], parts[1], parts[2], "latest"
- elif len(parts) == 4:
- provider, kind, key, version = parts[0], parts[1], parts[2], parts[3]
- else:
+ if len(parts) != 4:
return None, None, None, None
- return provider, kind, key, version
-
-
-def register_handler(fn: Callable, uri: Optional[str] = None) -> str:
- """Register a handler function in the global handler registry.
-
- Stores a callable in the HANDLER_REGISTRY with a hierarchical URI structure
- of provider:kind:key:version. If no URI is provided, generates one automatically
- using the function's module and name (user:custom:module.name:latest).
-
- The URI is parsed into components and used to create nested dictionary entries
- in the registry for later retrieval by retrieve_handler().
-
- Args:
- fn: The callable function to register
- uri: Optional URI string in format "provider:kind:key:version".
- If None, auto-generates "user:custom:{module}.{name}:latest"
-
- Returns:
- The URI string used for registration
-
- Raises:
- ValueError: If the URI is invalid or missing required components
-
- Example:
- >>> def my_workflow(): pass
- >>> uri = register_handler(my_workflow, "user:custom:my_workflow:v1")
- >>> uri
- 'user:custom:my_workflow:v1'
- """
- if not uri:
- key = f"{fn.__module__}.{fn.__name__}"
- uri = f"user:custom:{key}:latest"
-
- provider, kind, key, version = parse_uri(uri) # type: ignore
-
- if not provider or not kind or not key or not version:
- raise ValueError(f"Invalid URI: {uri}")
-
- HANDLER_REGISTRY.setdefault(provider, {}).setdefault(kind, {}).setdefault(
- key, {}
- ).setdefault(version, fn)
-
- return uri
-
-
-def _get_with_latest(
- registry: dict,
- provider: Optional[str] = None,
- kind: Optional[str] = None,
- key: Optional[str] = None,
- version: Optional[str] = None,
-):
- kind_dict = registry.get(provider, {}).get(kind, {}).get(key, {})
-
- if not isinstance(kind_dict, dict) or not kind_dict:
- return None
-
- if version == "latest":
- # if "latest" explicitly exists, prefer it
- if "latest" in kind_dict:
- return kind_dict.get("latest")
-
- # collect keys of the form vN
- candidates = [
- (int(v[1:]), v)
- for v in kind_dict.keys()
- if isinstance(v, str) and v.startswith("v") and v[1:].isdigit()
- ]
- if not candidates:
- return None
- # get the highest int N
- _, best_key = max(candidates, key=lambda x: x[0])
- return kind_dict.get(best_key)
-
- return kind_dict.get(version)
-
-
-def retrieve_handler(uri: Optional[str] = None) -> Optional[Callable]:
- if not uri:
- return None
- provider, kind, key, version = parse_uri(uri)
-
- return _get_with_latest(HANDLER_REGISTRY, provider, kind, key, version)
-
-
-def retrieve_interface(uri: Optional[str] = None) -> Optional[WorkflowServiceInterface]:
- if not uri:
- return None
- provider, kind, key, version = parse_uri(uri)
-
- return _get_with_latest(INTERFACE_REGISTRY, provider, kind, key, version)
-
-
-def retrieve_configuration(uri: Optional[str] = None) -> Optional[dict]:
- if not uri:
- return None
- provider, kind, key, version = parse_uri(uri)
-
- return _get_with_latest(CONFIGURATION_REGISTRY, provider, kind, key, version)
-
-
-def is_custom_uri(uri: Optional[str] = None) -> bool:
- if not uri:
- return True
-
- provider, kind, key, version = parse_uri(uri)
-
- return provider == "user" and kind == "custom"
+ return tuple(parts)
diff --git a/sdk/patches/litellm_vertex_ai_partner_models.py b/sdk/patches/litellm_vertex_ai_partner_models.py
deleted file mode 100644
index c68041f8f0..0000000000
--- a/sdk/patches/litellm_vertex_ai_partner_models.py
+++ /dev/null
@@ -1,248 +0,0 @@
-## litellm/llms/vertex_ai/vertex_ai_partner_models/main.py
-
-# What is this?
-## API Handler for calling Vertex AI Partner Models
-from enum import Enum
-from typing import Callable, Optional, Union
-
-import httpx # type: ignore
-
-import litellm
-from litellm import LlmProviders
-from litellm.types.llms.vertex_ai import VertexPartnerProvider
-from litellm.utils import ModelResponse
-
-from ...custom_httpx.llm_http_handler import BaseLLMHTTPHandler
-from ..vertex_llm_base import VertexBase
-
-base_llm_http_handler = BaseLLMHTTPHandler()
-
-
-class VertexAIError(Exception):
- def __init__(self, status_code, message):
- self.status_code = status_code
- self.message = message
- self.request = httpx.Request(
- method="POST", url=" https://cloud.google.com/vertex-ai/"
- )
- self.response = httpx.Response(status_code=status_code, request=self.request)
- super().__init__(
- self.message
- ) # Call the base class constructor with the parameters it needs
-
-
-class PartnerModelPrefixes(str, Enum):
- META_PREFIX = "meta/"
- DEEPSEEK_PREFIX = "deepseek-ai"
- MISTRAL_PREFIX = "mistral"
- CODERESTAL_PREFIX = "codestral"
- JAMBA_PREFIX = "jamba"
- CLAUDE_PREFIX = "claude"
- QWEN_PREFIX = "qwen"
- GPT_OSS_PREFIX = "openai/gpt-oss-"
-
-
-class VertexAIPartnerModels(VertexBase):
- @staticmethod
- def is_vertex_partner_model(model: str):
- """
- Check if the model string is a Vertex AI Partner Model
- Only use this once you have confirmed that custom_llm_provider is vertex_ai
-
- Returns:
- bool: True if the model string is a Vertex AI Partner Model, False otherwise
- """
- if (
- model.startswith(PartnerModelPrefixes.META_PREFIX)
- or model.startswith(PartnerModelPrefixes.DEEPSEEK_PREFIX)
- or model.startswith(PartnerModelPrefixes.MISTRAL_PREFIX)
- or model.startswith(PartnerModelPrefixes.CODERESTAL_PREFIX)
- or model.startswith(PartnerModelPrefixes.JAMBA_PREFIX)
- or model.startswith(PartnerModelPrefixes.CLAUDE_PREFIX)
- or model.startswith(PartnerModelPrefixes.QWEN_PREFIX)
- or model.startswith(PartnerModelPrefixes.GPT_OSS_PREFIX)
- ):
- return True
- return False
-
- @staticmethod
- def should_use_openai_handler(model: str):
- OPENAI_LIKE_VERTEX_PROVIDERS = [
- "llama",
- PartnerModelPrefixes.DEEPSEEK_PREFIX,
- PartnerModelPrefixes.QWEN_PREFIX,
- PartnerModelPrefixes.GPT_OSS_PREFIX,
- ]
- if any(provider in model for provider in OPENAI_LIKE_VERTEX_PROVIDERS):
- return True
- return False
-
- def completion(
- self,
- model: str,
- messages: list,
- model_response: ModelResponse,
- print_verbose: Callable,
- encoding,
- logging_obj,
- api_base: Optional[str],
- optional_params: dict,
- custom_prompt_dict: dict,
- headers: Optional[dict],
- timeout: Union[float, httpx.Timeout],
- litellm_params: dict,
- vertex_project=None,
- vertex_location=None,
- vertex_credentials=None,
- logger_fn=None,
- acompletion: bool = False,
- client=None,
- ):
- try:
- from litellm.llms.anthropic.chat import AnthropicChatCompletion
- from litellm.llms.codestral.completion.handler import (
- CodestralTextCompletion,
- )
- from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler
- from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
- VertexLLM,
- )
- except Exception as e:
- raise VertexAIError(
- status_code=400, message=f"Failed to import partner handlers: {e}"
- )
-
- try:
- access_token, project_id = self._ensure_access_token(
- credentials=vertex_credentials,
- project_id=vertex_project,
- custom_llm_provider="vertex_ai",
- )
-
- openai_like_chat_completions = OpenAILikeChatHandler()
- codestral_fim_completions = CodestralTextCompletion()
- anthropic_chat_completions = AnthropicChatCompletion()
-
- ## CONSTRUCT API BASE
- stream: bool = optional_params.get("stream", False) or False
-
- optional_params["stream"] = stream
-
- if self.should_use_openai_handler(model):
- partner = VertexPartnerProvider.llama
- elif "mistral" in model or "codestral" in model:
- partner = VertexPartnerProvider.mistralai
- elif "jamba" in model:
- partner = VertexPartnerProvider.ai21
- elif "claude" in model:
- partner = VertexPartnerProvider.claude
- else:
- raise ValueError(f"Unknown partner model: {model}")
-
- api_base = self.get_complete_vertex_url(
- custom_api_base=api_base,
- vertex_location=vertex_location,
- vertex_project=vertex_project,
- project_id=project_id,
- partner=partner,
- stream=stream,
- model=model,
- )
-
- if "codestral" in model or "mistral" in model:
- model = model.split("@")[0]
-
- if "codestral" in model and litellm_params.get("text_completion") is True:
- optional_params["model"] = model
- text_completion_model_response = litellm.TextCompletionResponse(
- stream=stream
- )
- return codestral_fim_completions.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- api_key=access_token,
- custom_prompt_dict=custom_prompt_dict,
- model_response=text_completion_model_response,
- print_verbose=print_verbose,
- logging_obj=logging_obj,
- optional_params=optional_params,
- acompletion=acompletion,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- timeout=timeout,
- encoding=encoding,
- )
- elif "claude" in model:
- if headers is None:
- headers = {}
- headers.update({"Authorization": "Bearer {}".format(access_token)})
-
- optional_params.update(
- {
- "anthropic_version": "vertex-2023-10-16",
- "is_vertex_request": True,
- }
- )
-
- return anthropic_chat_completions.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- acompletion=acompletion,
- custom_prompt_dict=litellm.custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- optional_params=optional_params,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- encoding=encoding, # for calculating input/output tokens
- api_key=access_token,
- logging_obj=logging_obj,
- headers=headers,
- timeout=timeout,
- client=client,
- custom_llm_provider=LlmProviders.VERTEX_AI.value,
- )
- elif self.should_use_openai_handler(model):
- return base_llm_http_handler.completion(
- model=model,
- stream=stream,
- messages=messages,
- acompletion=acompletion,
- api_base=api_base,
- model_response=model_response,
- optional_params=optional_params,
- litellm_params=litellm_params,
- custom_llm_provider="vertex_ai",
- timeout=timeout,
- headers=headers,
- encoding=encoding,
- api_key=access_token,
- logging_obj=logging_obj, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
- client=client,
- )
- return openai_like_chat_completions.completion(
- model=model,
- messages=messages,
- api_base=api_base,
- api_key=access_token,
- custom_prompt_dict=custom_prompt_dict,
- model_response=model_response,
- print_verbose=print_verbose,
- logging_obj=logging_obj,
- optional_params=optional_params,
- acompletion=acompletion,
- litellm_params=litellm_params,
- logger_fn=logger_fn,
- client=client,
- timeout=timeout,
- encoding=encoding,
- custom_llm_provider="vertex_ai",
- custom_endpoint=True,
- )
-
- except Exception as e:
- if hasattr(e, "status_code"):
- raise e
- raise VertexAIError(status_code=500, message=str(e))
diff --git a/sdk/poetry.lock b/sdk/poetry.lock
index dce11a6848..d467156f9d 100644
--- a/sdk/poetry.lock
+++ b/sdk/poetry.lock
@@ -14,137 +14,138 @@ files = [
[[package]]
name = "aiohttp"
-version = "3.13.2"
+version = "3.13.0"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"},
- {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"},
- {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"},
- {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"},
- {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"},
- {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"},
- {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"},
- {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"},
- {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"},
- {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"},
- {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"},
- {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"},
- {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"},
- {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"},
+ {file = "aiohttp-3.13.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ca69ec38adf5cadcc21d0b25e2144f6a25b7db7bea7e730bac25075bc305eff0"},
+ {file = "aiohttp-3.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:240f99f88a9a6beb53ebadac79a2e3417247aa756202ed234b1dbae13d248092"},
+ {file = "aiohttp-3.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a4676b978a9711531e7cea499d4cdc0794c617a1c0579310ab46c9fdf5877702"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48fcdd5bc771cbbab8ccc9588b8b6447f6a30f9fe00898b1a5107098e00d6793"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:eeea0cdd2f687e210c8f605f322d7b0300ba55145014a5dbe98bd4be6fff1f6c"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b3f01d5aeb632adaaf39c5e93f040a550464a768d54c514050c635adcbb9d0"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a4dc0b83e25267f42ef065ea57653de4365b56d7bc4e4cfc94fabe56998f8ee6"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:72714919ed9b90f030f761c20670e529c4af96c31bd000917dd0c9afd1afb731"},
+ {file = "aiohttp-3.13.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:564be41e85318403fdb176e9e5b3e852d528392f42f2c1d1efcbeeed481126d7"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:84912962071087286333f70569362e10793f73f45c48854e6859df11001eb2d3"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:90b570f1a146181c3d6ae8f755de66227ded49d30d050479b5ae07710f7894c5"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2d71ca30257ce756e37a6078b1dff2d9475fee13609ad831eac9a6531bea903b"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:cd45eb70eca63f41bb156b7dffbe1a7760153b69892d923bdb79a74099e2ed90"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5ae3a19949a27982c7425a7a5a963c1268fdbabf0be15ab59448cbcf0f992519"},
+ {file = "aiohttp-3.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ea6df292013c9f050cbf3f93eee9953d6e5acd9e64a0bf4ca16404bfd7aa9bcc"},
+ {file = "aiohttp-3.13.0-cp310-cp310-win32.whl", hash = "sha256:3b64f22fbb6dcd5663de5ef2d847a5638646ef99112503e6f7704bdecb0d1c4d"},
+ {file = "aiohttp-3.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:f8d877aa60d80715b2afc565f0f1aea66565824c229a2d065b31670e09fed6d7"},
+ {file = "aiohttp-3.13.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:99eb94e97a42367fef5fc11e28cb2362809d3e70837f6e60557816c7106e2e20"},
+ {file = "aiohttp-3.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4696665b2713021c6eba3e2b882a86013763b442577fe5d2056a42111e732eca"},
+ {file = "aiohttp-3.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3e6a38366f7f0d0f6ed7a1198055150c52fda552b107dad4785c0852ad7685d1"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aab715b1a0c37f7f11f9f1f579c6fbaa51ef569e47e3c0a4644fba46077a9409"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7972c82bed87d7bd8e374b60a6b6e816d75ba4f7c2627c2d14eed216e62738e1"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca8313cb852af788c78d5afdea24c40172cbfff8b35e58b407467732fde20390"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c333a2385d2a6298265f4b3e960590f787311b87f6b5e6e21bb8375914ef504"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cc6d5fc5edbfb8041d9607f6a417997fa4d02de78284d386bea7ab767b5ea4f3"},
+ {file = "aiohttp-3.13.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7ddedba3d0043349edc79df3dc2da49c72b06d59a45a42c1c8d987e6b8d175b8"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23ca762140159417a6bbc959ca1927f6949711851e56f2181ddfe8d63512b5ad"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:bfe824d6707a5dc3c5676685f624bc0c63c40d79dc0239a7fd6c034b98c25ebe"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:3c11fa5dd2ef773a8a5a6daa40243d83b450915992eab021789498dc87acc114"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00fdfe370cffede3163ba9d3f190b32c0cfc8c774f6f67395683d7b0e48cdb8a"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6475e42ef92717a678bfbf50885a682bb360a6f9c8819fb1a388d98198fdcb80"},
+ {file = "aiohttp-3.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:77da5305a410910218b99f2a963092f4277d8a9c1f429c1ff1b026d1826bd0b6"},
+ {file = "aiohttp-3.13.0-cp311-cp311-win32.whl", hash = "sha256:2f9d9ea547618d907f2ee6670c9a951f059c5994e4b6de8dcf7d9747b420c820"},
+ {file = "aiohttp-3.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f19f7798996d4458c669bd770504f710014926e9970f4729cf55853ae200469"},
+ {file = "aiohttp-3.13.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1c272a9a18a5ecc48a7101882230046b83023bb2a662050ecb9bfcb28d9ab53a"},
+ {file = "aiohttp-3.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:97891a23d7fd4e1afe9c2f4473e04595e4acb18e4733b910b6577b74e7e21985"},
+ {file = "aiohttp-3.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:475bd56492ce5f4cffe32b5533c6533ee0c406d1d0e6924879f83adcf51da0ae"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c32ada0abb4bc94c30be2b681c42f058ab104d048da6f0148280a51ce98add8c"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4af1f8877ca46ecdd0bc0d4a6b66d4b2bddc84a79e2e8366bc0d5308e76bceb8"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e04ab827ec4f775817736b20cdc8350f40327f9b598dec4e18c9ffdcbea88a93"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a6d9487b9471ec36b0faedf52228cd732e89be0a2bbd649af890b5e2ce422353"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e66c57416352f36bf98f6641ddadd47c93740a22af7150d3e9a1ef6e983f9a8"},
+ {file = "aiohttp-3.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:469167d5372f5bb3aedff4fc53035d593884fff2617a75317740e885acd48b04"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a9f3546b503975a69b547c9fd1582cad10ede1ce6f3e313a2f547c73a3d7814f"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6b4174fcec98601f0cfdf308ee29a6ae53c55f14359e848dab4e94009112ee7d"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a533873a7a4ec2270fb362ee5a0d3b98752e4e1dc9042b257cd54545a96bd8ed"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:ce887c5e54411d607ee0959cac15bb31d506d86a9bcaddf0b7e9d63325a7a802"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d871f6a30d43e32fc9252dc7b9febe1a042b3ff3908aa83868d7cf7c9579a59b"},
+ {file = "aiohttp-3.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:222c828243b4789d79a706a876910f656fad4381661691220ba57b2ab4547865"},
+ {file = "aiohttp-3.13.0-cp312-cp312-win32.whl", hash = "sha256:682d2e434ff2f1108314ff7f056ce44e457f12dbed0249b24e106e385cf154b9"},
+ {file = "aiohttp-3.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:0a2be20eb23888df130214b91c262a90e2de1553d6fb7de9e9010cec994c0ff2"},
+ {file = "aiohttp-3.13.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:00243e51f16f6ec0fb021659d4af92f675f3cf9f9b39efd142aa3ad641d8d1e6"},
+ {file = "aiohttp-3.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:059978d2fddc462e9211362cbc8446747ecd930537fa559d3d25c256f032ff54"},
+ {file = "aiohttp-3.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:564b36512a7da3b386143c611867e3f7cfb249300a1bf60889bd9985da67ab77"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4aa995b9156ae499393d949a456a7ab0b994a8241a96db73a3b73c7a090eff6a"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:55ca0e95a3905f62f00900255ed807c580775174252999286f283e646d675a49"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:49ce7525853a981fc35d380aa2353536a01a9ec1b30979ea4e35966316cace7e"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2117be9883501eaf95503bd313eb4c7a23d567edd44014ba15835a1e9ec6d852"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d169c47e40c911f728439da853b6fd06da83761012e6e76f11cb62cddae7282b"},
+ {file = "aiohttp-3.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:703ad3f742fc81e543638a7bebddd35acadaa0004a5e00535e795f4b6f2c25ca"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5bf635c3476f4119b940cc8d94ad454cbe0c377e61b4527f0192aabeac1e9370"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cfe6285ef99e7ee51cef20609be2bc1dd0e8446462b71c9db8bb296ba632810a"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8af6391c5f2e69749d7f037b614b8c5c42093c251f336bdbfa4b03c57d6c4"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:12f5d820fadc5848d4559ea838aef733cf37ed2a1103bba148ac2f5547c14c29"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f1338b61ea66f4757a0544ed8a02ccbf60e38d9cfb3225888888dd4475ebb96"},
+ {file = "aiohttp-3.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:582770f82513419512da096e8df21ca44f86a2e56e25dc93c5ab4df0fe065bf0"},
+ {file = "aiohttp-3.13.0-cp313-cp313-win32.whl", hash = "sha256:3194b8cab8dbc882f37c13ef1262e0a3d62064fa97533d3aa124771f7bf1ecee"},
+ {file = "aiohttp-3.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:7897298b3eedc790257fef8a6ec582ca04e9dbe568ba4a9a890913b925b8ea21"},
+ {file = "aiohttp-3.13.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c417f8c2e1137775569297c584a8a7144e5d1237789eae56af4faf1894a0b861"},
+ {file = "aiohttp-3.13.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f84b53326abf8e56ebc28a35cebf4a0f396a13a76300f500ab11fe0573bf0b52"},
+ {file = "aiohttp-3.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:990a53b9d6a30b2878789e490758e568b12b4a7fb2527d0c89deb9650b0e5813"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c811612711e01b901e18964b3e5dec0d35525150f5f3f85d0aee2935f059910a"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ee433e594d7948e760b5c2a78cc06ac219df33b0848793cf9513d486a9f90a52"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:19bb08e56f57c215e9572cd65cb6f8097804412c54081d933997ddde3e5ac579"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f27b7488144eb5dd9151cf839b195edd1569629d90ace4c5b6b18e4e75d1e63a"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d812838c109757a11354a161c95708ae4199c4fd4d82b90959b20914c1d097f6"},
+ {file = "aiohttp-3.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7c20db99da682f9180fa5195c90b80b159632fb611e8dbccdd99ba0be0970620"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cf8b0870047900eb1f17f453b4b3953b8ffbf203ef56c2f346780ff930a4d430"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:5b8a5557d5af3f4e3add52a58c4cf2b8e6e59fc56b261768866f5337872d596d"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:052bcdd80c1c54b8a18a9ea0cd5e36f473dc8e38d51b804cea34841f677a9971"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:76484ba17b2832776581b7ab466d094e48eba74cb65a60aea20154dae485e8bd"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:62d8a0adcdaf62ee56bfb37737153251ac8e4b27845b3ca065862fb01d99e247"},
+ {file = "aiohttp-3.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5004d727499ecb95f7c9147dd0bfc5b5670f71d355f0bd26d7af2d3af8e07d2f"},
+ {file = "aiohttp-3.13.0-cp314-cp314-win32.whl", hash = "sha256:a1c20c26af48aea984f63f96e5d7af7567c32cb527e33b60a0ef0a6313cf8b03"},
+ {file = "aiohttp-3.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:56f7d230ec66e799fbfd8350e9544f8a45a4353f1cf40c1fea74c1780f555b8f"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:2fd35177dc483ae702f07b86c782f4f4b100a8ce4e7c5778cea016979023d9fd"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4df1984c8804ed336089e88ac81a9417b1fd0db7c6f867c50a9264488797e778"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e68c0076052dd911a81d3acc4ef2911cc4ef65bf7cadbfbc8ae762da24da858f"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc95c49853cd29613e4fe4ff96d73068ff89b89d61e53988442e127e8da8e7ba"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3b3bdc89413117b40cc39baae08fd09cbdeb839d421c4e7dce6a34f6b54b3ac1"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e77a729df23be2116acc4e9de2767d8e92445fbca68886dd991dc912f473755"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e88ab34826d6eeb6c67e6e92400b9ec653faf5092a35f07465f44c9f1c429f82"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:019dbef24fe28ce2301419dd63a2b97250d9760ca63ee2976c2da2e3f182f82e"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2c4aeaedd20771b7b4bcdf0ae791904445df6d856c02fc51d809d12d17cffdc7"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b3a8e6a2058a0240cfde542b641d0e78b594311bc1a710cbcb2e1841417d5cb3"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:f8e38d55ca36c15f36d814ea414ecb2401d860de177c49f84a327a25b3ee752b"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a921edbe971aade1bf45bcbb3494e30ba6863a5c78f28be992c42de980fd9108"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:474cade59a447cb4019c0dce9f0434bf835fb558ea932f62c686fe07fe6db6a1"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:99a303ad960747c33b65b1cb65d01a62ac73fa39b72f08a2e1efa832529b01ed"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bb34001fc1f05f6b323e02c278090c07a47645caae3aa77ed7ed8a3ce6abcce9"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-win32.whl", hash = "sha256:dea698b64235d053def7d2f08af9302a69fcd760d1c7bd9988fd5d3b6157e657"},
+ {file = "aiohttp-3.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1f164699a060c0b3616459d13c1464a981fddf36f892f0a5027cbd45121fb14b"},
+ {file = "aiohttp-3.13.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fcc425fb6fd2a00c6d91c85d084c6b75a61bc8bc12159d08e17c5711df6c5ba4"},
+ {file = "aiohttp-3.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c2c4c9ce834801651f81d6760d0a51035b8b239f58f298de25162fcf6f8bb64"},
+ {file = "aiohttp-3.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f91e8f9053a07177868e813656ec57599cd2a63238844393cd01bd69c2e40147"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df46d9a3d78ec19b495b1107bf26e4fcf97c900279901f4f4819ac5bb2a02a4c"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3b1eb9871cbe43b6ca6fac3544682971539d8a1d229e6babe43446279679609d"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:62a3cddf8d9a2eae1f79585fa81d32e13d0c509bb9e7ad47d33c83b45a944df7"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0f735e680c323ee7e9ef8e2ea26425c7dbc2ede0086fa83ce9d7ccab8a089f26"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6a51839f778b0e283b43cd82bb17f1835ee2cc1bf1101765e90ae886e53e751c"},
+ {file = "aiohttp-3.13.0-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac90cfab65bc281d6752f22db5fa90419e33220af4b4fa53b51f5948f414c0e7"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:62fd54f3e6f17976962ba67f911d62723c760a69d54f5d7b74c3ceb1a4e9ef8d"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:cf2b60b65df05b6b2fa0d887f2189991a0dbf44a0dd18359001dc8fcdb7f1163"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:1ccedfe280e804d9a9d7fe8b8c4309d28e364b77f40309c86596baa754af50b1"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:ea01ffbe23df53ece0c8732d1585b3d6079bb8c9ee14f3745daf000051415a31"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:19ba8625fa69523627b67f7e9901b587a4952470f68814d79cdc5bc460e9b885"},
+ {file = "aiohttp-3.13.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b14bfae90598d331b5061fd15a7c290ea0c15b34aeb1cf620464bb5ec02a602"},
+ {file = "aiohttp-3.13.0-cp39-cp39-win32.whl", hash = "sha256:cf7a4b976da219e726d0043fc94ae8169c0dba1d3a059b3c1e2c964bafc5a77d"},
+ {file = "aiohttp-3.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b9697d15231aeaed4786f090c9c8bc3ab5f0e0a6da1e76c135a310def271020"},
+ {file = "aiohttp-3.13.0.tar.gz", hash = "sha256:378dbc57dd8cf341ce243f13fa1fa5394d68e2e02c15cd5f28eae35a70ec7f67"},
]
[package.dependencies]
aiohappyeyeballs = ">=2.5.0"
aiosignal = ">=1.4.0"
+async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""}
attrs = ">=17.3.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
@@ -152,7 +153,7 @@ propcache = ">=0.2.0"
yarl = ">=1.17.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi"]
+speedups = ["Brotli", "aiodns (>=3.3.0)", "brotlicffi", "zstandard"]
[[package]]
name = "aiosignal"
@@ -195,6 +196,7 @@ files = [
]
[package.dependencies]
+exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
@@ -202,6 +204,19 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
trio = ["trio (>=0.31.0)"]
+[[package]]
+name = "async-timeout"
+version = "5.0.1"
+description = "Timeout context manager for asyncio programs"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+markers = "python_version < \"3.11\""
+files = [
+ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"},
+ {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"},
+]
+
[[package]]
name = "attrs"
version = "25.4.0"
@@ -228,18 +243,18 @@ files = [
[[package]]
name = "boto3"
-version = "1.40.69"
+version = "1.40.46"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "boto3-1.40.69-py3-none-any.whl", hash = "sha256:c3f710a1990c4be1c0db43b938743d4e404c7f1f06d5f1fa0c8e9b1cea4290b2"},
- {file = "boto3-1.40.69.tar.gz", hash = "sha256:5273f6bac347331a87db809dff97d8736c50c3be19f2bb36ad08c5131c408976"},
+ {file = "boto3-1.40.46-py3-none-any.whl", hash = "sha256:0dfdc13992ceac1ef36a3ab0ac281cd4a45210a53181dc9a71afabfc1db889fe"},
+ {file = "boto3-1.40.46.tar.gz", hash = "sha256:3676767a03d84544b01b3390a2bbdc3b98479223661e90f0ba0b22f4d3f0cb9f"},
]
[package.dependencies]
-botocore = ">=1.40.69,<1.41.0"
+botocore = ">=1.40.46,<1.41.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.14.0,<0.15.0"
@@ -248,36 +263,27 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
-version = "1.40.69"
+version = "1.40.46"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "botocore-1.40.69-py3-none-any.whl", hash = "sha256:5d810efeb9e18f91f32690642fa81ae60e482eefeea0d35ec72da2e3d924c1a5"},
- {file = "botocore-1.40.69.tar.gz", hash = "sha256:df310ddc4d2de5543ba3df4e4b5f9907a2951896d63a9fbae115c26ca0976951"},
+ {file = "botocore-1.40.46-py3-none-any.whl", hash = "sha256:d2c8e0d9ba804d6fd9b942db0aa3e6cfbdd9aab86581b472ee97809b6e5103e0"},
+ {file = "botocore-1.40.46.tar.gz", hash = "sha256:4b0c0efdba788117ef365bf930c0be7300fa052e5e195ea3ed53ab278fc6d7b1"},
]
[package.dependencies]
jmespath = ">=0.7.1,<2.0.0"
python-dateutil = ">=2.1,<3.0.0"
-urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
+urllib3 = [
+ {version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""},
+ {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""},
+]
[package.extras]
crt = ["awscrt (==0.27.6)"]
-[[package]]
-name = "cachetools"
-version = "6.2.1"
-description = "Extensible memoizing collections and decorators"
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701"},
- {file = "cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201"},
-]
-
[[package]]
name = "certifi"
version = "2025.10.5"
@@ -292,137 +298,103 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.4.4"
+version = "3.4.3"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
- {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d"},
- {file = "charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016"},
- {file = "charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525"},
- {file = "charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14"},
- {file = "charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c"},
- {file = "charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-win32.whl", hash = "sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa"},
- {file = "charset_normalizer-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966"},
- {file = "charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50"},
- {file = "charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f"},
- {file = "charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"},
+ {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"},
+ {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"},
+ {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"},
+ {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"},
+ {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"},
+ {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"},
+ {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"},
+ {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"},
+ {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"},
]
[[package]]
name = "click"
-version = "8.3.0"
+version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
-python-versions = ">=3.10"
+python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
- {file = "click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc"},
- {file = "click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4"},
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
]
[package.dependencies]
@@ -465,6 +437,25 @@ files = [
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
]
+[[package]]
+name = "exceptiongroup"
+version = "1.3.0"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
+files = [
+ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
+ {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""}
+
+[package.extras]
+test = ["pytest (>=6)"]
+
[[package]]
name = "execnet"
version = "2.1.1"
@@ -502,104 +493,16 @@ all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
-[[package]]
-name = "fastuuid"
-version = "0.14.0"
-description = "Python bindings to Rust's UUID library."
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "fastuuid-0.14.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:6e6243d40f6c793c3e2ee14c13769e341b90be5ef0c23c82fa6515a96145181a"},
- {file = "fastuuid-0.14.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:13ec4f2c3b04271f62be2e1ce7e95ad2dd1cf97e94503a3760db739afbd48f00"},
- {file = "fastuuid-0.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b2fdd48b5e4236df145a149d7125badb28e0a383372add3fbaac9a6b7a394470"},
- {file = "fastuuid-0.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f74631b8322d2780ebcf2d2d75d58045c3e9378625ec51865fe0b5620800c39d"},
- {file = "fastuuid-0.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cffc144dc93eb604b87b179837f2ce2af44871a7b323f2bfed40e8acb40ba8"},
- {file = "fastuuid-0.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a771f135ab4523eb786e95493803942a5d1fc1610915f131b363f55af53b219"},
- {file = "fastuuid-0.14.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4edc56b877d960b4eda2c4232f953a61490c3134da94f3c28af129fb9c62a4f6"},
- {file = "fastuuid-0.14.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bcc96ee819c282e7c09b2eed2b9bd13084e3b749fdb2faf58c318d498df2efbe"},
- {file = "fastuuid-0.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7a3c0bca61eacc1843ea97b288d6789fbad7400d16db24e36a66c28c268cfe3d"},
- {file = "fastuuid-0.14.0-cp310-cp310-win32.whl", hash = "sha256:7f2f3efade4937fae4e77efae1af571902263de7b78a0aee1a1653795a093b2a"},
- {file = "fastuuid-0.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:ae64ba730d179f439b0736208b4c279b8bc9c089b102aec23f86512ea458c8a4"},
- {file = "fastuuid-0.14.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:73946cb950c8caf65127d4e9a325e2b6be0442a224fd51ba3b6ac44e1912ce34"},
- {file = "fastuuid-0.14.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:12ac85024637586a5b69645e7ed986f7535106ed3013640a393a03e461740cb7"},
- {file = "fastuuid-0.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:05a8dde1f395e0c9b4be515b7a521403d1e8349443e7641761af07c7ad1624b1"},
- {file = "fastuuid-0.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09378a05020e3e4883dfdab438926f31fea15fd17604908f3d39cbeb22a0b4dc"},
- {file = "fastuuid-0.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbb0c4b15d66b435d2538f3827f05e44e2baafcc003dd7d8472dc67807ab8fd8"},
- {file = "fastuuid-0.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cd5a7f648d4365b41dbf0e38fe8da4884e57bed4e77c83598e076ac0c93995e7"},
- {file = "fastuuid-0.14.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c0a94245afae4d7af8c43b3159d5e3934c53f47140be0be624b96acd672ceb73"},
- {file = "fastuuid-0.14.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2b29e23c97e77c3a9514d70ce343571e469098ac7f5a269320a0f0b3e193ab36"},
- {file = "fastuuid-0.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1e690d48f923c253f28151b3a6b4e335f2b06bf669c68a02665bc150b7839e94"},
- {file = "fastuuid-0.14.0-cp311-cp311-win32.whl", hash = "sha256:a6f46790d59ab38c6aa0e35c681c0484b50dc0acf9e2679c005d61e019313c24"},
- {file = "fastuuid-0.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:e150eab56c95dc9e3fefc234a0eedb342fac433dacc273cd4d150a5b0871e1fa"},
- {file = "fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:77e94728324b63660ebf8adb27055e92d2e4611645bf12ed9d88d30486471d0a"},
- {file = "fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:caa1f14d2102cb8d353096bc6ef6c13b2c81f347e6ab9d6fbd48b9dea41c153d"},
- {file = "fastuuid-0.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d23ef06f9e67163be38cece704170486715b177f6baae338110983f99a72c070"},
- {file = "fastuuid-0.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c9ec605ace243b6dbe3bd27ebdd5d33b00d8d1d3f580b39fdd15cd96fd71796"},
- {file = "fastuuid-0.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:808527f2407f58a76c916d6aa15d58692a4a019fdf8d4c32ac7ff303b7d7af09"},
- {file = "fastuuid-0.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fb3c0d7fef6674bbeacdd6dbd386924a7b60b26de849266d1ff6602937675c8"},
- {file = "fastuuid-0.14.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab3f5d36e4393e628a4df337c2c039069344db5f4b9d2a3c9cea48284f1dd741"},
- {file = "fastuuid-0.14.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b9a0ca4f03b7e0b01425281ffd44e99d360e15c895f1907ca105854ed85e2057"},
- {file = "fastuuid-0.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3acdf655684cc09e60fb7e4cf524e8f42ea760031945aa8086c7eae2eeeabeb8"},
- {file = "fastuuid-0.14.0-cp312-cp312-win32.whl", hash = "sha256:9579618be6280700ae36ac42c3efd157049fe4dd40ca49b021280481c78c3176"},
- {file = "fastuuid-0.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:d9e4332dc4ba054434a9594cbfaf7823b57993d7d8e7267831c3e059857cf397"},
- {file = "fastuuid-0.14.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:77a09cb7427e7af74c594e409f7731a0cf887221de2f698e1ca0ebf0f3139021"},
- {file = "fastuuid-0.14.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9bd57289daf7b153bfa3e8013446aa144ce5e8c825e9e366d455155ede5ea2dc"},
- {file = "fastuuid-0.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ac60fc860cdf3c3f327374db87ab8e064c86566ca8c49d2e30df15eda1b0c2d5"},
- {file = "fastuuid-0.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab32f74bd56565b186f036e33129da77db8be09178cd2f5206a5d4035fb2a23f"},
- {file = "fastuuid-0.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e678459cf4addaedd9936bbb038e35b3f6b2061330fd8f2f6a1d80414c0f87"},
- {file = "fastuuid-0.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1e3cc56742f76cd25ecb98e4b82a25f978ccffba02e4bdce8aba857b6d85d87b"},
- {file = "fastuuid-0.14.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:cb9a030f609194b679e1660f7e32733b7a0f332d519c5d5a6a0a580991290022"},
- {file = "fastuuid-0.14.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:09098762aad4f8da3a888eb9ae01c84430c907a297b97166b8abc07b640f2995"},
- {file = "fastuuid-0.14.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1383fff584fa249b16329a059c68ad45d030d5a4b70fb7c73a08d98fd53bcdab"},
- {file = "fastuuid-0.14.0-cp313-cp313-win32.whl", hash = "sha256:a0809f8cc5731c066c909047f9a314d5f536c871a7a22e815cc4967c110ac9ad"},
- {file = "fastuuid-0.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:0df14e92e7ad3276327631c9e7cec09e32572ce82089c55cb1bb8df71cf394ed"},
- {file = "fastuuid-0.14.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:b852a870a61cfc26c884af205d502881a2e59cc07076b60ab4a951cc0c94d1ad"},
- {file = "fastuuid-0.14.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c7502d6f54cd08024c3ea9b3514e2d6f190feb2f46e6dbcd3747882264bb5f7b"},
- {file = "fastuuid-0.14.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1ca61b592120cf314cfd66e662a5b54a578c5a15b26305e1b8b618a6f22df714"},
- {file = "fastuuid-0.14.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa75b6657ec129d0abded3bec745e6f7ab642e6dba3a5272a68247e85f5f316f"},
- {file = "fastuuid-0.14.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8a0dfea3972200f72d4c7df02c8ac70bad1bb4c58d7e0ec1e6f341679073a7f"},
- {file = "fastuuid-0.14.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1bf539a7a95f35b419f9ad105d5a8a35036df35fdafae48fb2fd2e5f318f0d75"},
- {file = "fastuuid-0.14.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:9a133bf9cc78fdbd1179cb58a59ad0100aa32d8675508150f3658814aeefeaa4"},
- {file = "fastuuid-0.14.0-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:f54d5b36c56a2d5e1a31e73b950b28a0d83eb0c37b91d10408875a5a29494bad"},
- {file = "fastuuid-0.14.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:ec27778c6ca3393ef662e2762dba8af13f4ec1aaa32d08d77f71f2a70ae9feb8"},
- {file = "fastuuid-0.14.0-cp314-cp314-win32.whl", hash = "sha256:e23fc6a83f112de4be0cc1990e5b127c27663ae43f866353166f87df58e73d06"},
- {file = "fastuuid-0.14.0-cp314-cp314-win_amd64.whl", hash = "sha256:df61342889d0f5e7a32f7284e55ef95103f2110fee433c2ae7c2c0956d76ac8a"},
- {file = "fastuuid-0.14.0-cp38-cp38-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:47c821f2dfe95909ead0085d4cb18d5149bca704a2b03e03fb3f81a5202d8cea"},
- {file = "fastuuid-0.14.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3964bab460c528692c70ab6b2e469dd7a7b152fbe8c18616c58d34c93a6cf8d4"},
- {file = "fastuuid-0.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c501561e025b7aea3508719c5801c360c711d5218fc4ad5d77bf1c37c1a75779"},
- {file = "fastuuid-0.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dce5d0756f046fa792a40763f36accd7e466525c5710d2195a038f93ff96346"},
- {file = "fastuuid-0.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193ca10ff553cf3cc461572da83b5780fc0e3eea28659c16f89ae5202f3958d4"},
- {file = "fastuuid-0.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0737606764b29785566f968bd8005eace73d3666bd0862f33a760796e26d1ede"},
- {file = "fastuuid-0.14.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e0976c0dff7e222513d206e06341503f07423aceb1db0b83ff6851c008ceee06"},
- {file = "fastuuid-0.14.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6fbc49a86173e7f074b1a9ec8cf12ca0d54d8070a85a06ebf0e76c309b84f0d0"},
- {file = "fastuuid-0.14.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:de01280eabcd82f7542828ecd67ebf1551d37203ecdfd7ab1f2e534edb78d505"},
- {file = "fastuuid-0.14.0-cp38-cp38-win32.whl", hash = "sha256:af5967c666b7d6a377098849b07f83462c4fedbafcf8eb8bc8ff05dcbe8aa209"},
- {file = "fastuuid-0.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3091e63acf42f56a6f74dc65cfdb6f99bfc79b5913c8a9ac498eb7ca09770a8"},
- {file = "fastuuid-0.14.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2ec3d94e13712a133137b2805073b65ecef4a47217d5bac15d8ac62376cefdb4"},
- {file = "fastuuid-0.14.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:139d7ff12bb400b4a0c76be64c28cbe2e2edf60b09826cbfd85f33ed3d0bbe8b"},
- {file = "fastuuid-0.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d55b7e96531216fc4f071909e33e35e5bfa47962ae67d9e84b00a04d6e8b7173"},
- {file = "fastuuid-0.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0eb25f0fd935e376ac4334927a59e7c823b36062080e2e13acbaf2af15db836"},
- {file = "fastuuid-0.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:089c18018fdbdda88a6dafd7d139f8703a1e7c799618e33ea25eb52503d28a11"},
- {file = "fastuuid-0.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fc37479517d4d70c08696960fad85494a8a7a0af4e93e9a00af04d74c59f9e3"},
- {file = "fastuuid-0.14.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:73657c9f778aba530bc96a943d30e1a7c80edb8278df77894fe9457540df4f85"},
- {file = "fastuuid-0.14.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d31f8c257046b5617fc6af9c69be066d2412bdef1edaa4bdf6a214cf57806105"},
- {file = "fastuuid-0.14.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5816d41f81782b209843e52fdef757a361b448d782452d96abedc53d545da722"},
- {file = "fastuuid-0.14.0-cp39-cp39-win32.whl", hash = "sha256:448aa6833f7a84bfe37dd47e33df83250f404d591eb83527fa2cac8d1e57d7f3"},
- {file = "fastuuid-0.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:84b0779c5abbdec2a9511d5ffbfcd2e53079bf889824b32be170c0d8ef5fc74c"},
- {file = "fastuuid-0.14.0.tar.gz", hash = "sha256:178947fc2f995b38497a74172adee64fdeb8b7ec18f2a5934d037641ba265d26"},
-]
-
[[package]]
name = "filelock"
-version = "3.20.0"
+version = "3.19.1"
description = "A platform independent file lock."
optional = false
-python-versions = ">=3.10"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2"},
- {file = "filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4"},
+ {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"},
+ {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"},
]
[[package]]
@@ -744,14 +647,14 @@ files = [
[[package]]
name = "fsspec"
-version = "2025.10.0"
+version = "2025.9.0"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d"},
- {file = "fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59"},
+ {file = "fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7"},
+ {file = "fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19"},
]
[package.extras]
@@ -782,43 +685,16 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto
test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"]
tqdm = ["tqdm"]
-[[package]]
-name = "google-auth"
-version = "2.43.0"
-description = "Google Authentication Library"
-optional = false
-python-versions = ">=3.7"
-groups = ["main"]
-files = [
- {file = "google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16"},
- {file = "google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483"},
-]
-
-[package.dependencies]
-cachetools = ">=2.0.0,<7.0"
-pyasn1-modules = ">=0.2.1"
-rsa = ">=3.1.4,<5"
-
-[package.extras]
-aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"]
-enterprise-cert = ["cryptography", "pyopenssl"]
-pyjwt = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"]
-pyopenssl = ["cryptography (<39.0.0)", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
-reauth = ["pyu2f (>=0.1.5)"]
-requests = ["requests (>=2.20.0,<3.0.0)"]
-testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0)", "cryptography (<39.0.0)", "cryptography (>=38.0.3)", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"]
-urllib3 = ["packaging", "urllib3"]
-
[[package]]
name = "googleapis-common-protos"
-version = "1.72.0"
+version = "1.70.0"
description = "Common protobufs used in Google APIs"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
- {file = "googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038"},
- {file = "googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5"},
+ {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"},
+ {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"},
]
[package.dependencies]
@@ -924,14 +800,14 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t
[[package]]
name = "idna"
-version = "3.11"
+version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.6"
groups = ["main", "dev"]
files = [
- {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"},
- {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"},
+ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
+ {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
@@ -963,14 +839,14 @@ type = ["pytest-mypy"]
[[package]]
name = "iniconfig"
-version = "2.3.0"
+version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
-python-versions = ">=3.10"
+python-versions = ">=3.8"
groups = ["dev"]
files = [
- {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"},
- {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"},
+ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
+ {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
]
[[package]]
@@ -993,114 +869,90 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jiter"
-version = "0.12.0"
+version = "0.11.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65"},
- {file = "jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74"},
- {file = "jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2"},
- {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025"},
- {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca"},
- {file = "jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4"},
- {file = "jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11"},
- {file = "jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9"},
- {file = "jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6"},
- {file = "jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725"},
- {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6"},
- {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e"},
- {file = "jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c"},
- {file = "jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f"},
- {file = "jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5"},
- {file = "jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37"},
- {file = "jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403"},
- {file = "jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126"},
- {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9"},
- {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86"},
- {file = "jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44"},
- {file = "jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb"},
- {file = "jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789"},
- {file = "jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e"},
- {file = "jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed"},
- {file = "jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9"},
- {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626"},
- {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c"},
- {file = "jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de"},
- {file = "jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a"},
- {file = "jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60"},
- {file = "jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6"},
- {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4"},
- {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb"},
- {file = "jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7"},
- {file = "jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3"},
- {file = "jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525"},
- {file = "jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a"},
- {file = "jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a"},
- {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67"},
- {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b"},
- {file = "jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42"},
- {file = "jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf"},
- {file = "jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451"},
- {file = "jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f"},
- {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783"},
- {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b"},
- {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6"},
- {file = "jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183"},
- {file = "jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873"},
- {file = "jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473"},
- {file = "jiter-0.12.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9d28b218d5f9e5f69a0787a196322a5056540cb378cac8ff542b4fa7219966c"},
- {file = "jiter-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0ee12028daf8cfcf880dd492349a122a64f42c059b6c62a2b0c96a83a8da820"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b135ebe757a82d67ed2821526e72d0acf87dd61f6013e20d3c45b8048af927b"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15d7fafb81af8a9e3039fc305529a61cd933eecee33b4251878a1c89859552a3"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92d1f41211d8a8fe412faad962d424d334764c01dac6691c44691c2e4d3eedaf"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a64a48d7c917b8f32f25c176df8749ecf08cec17c466114727efe7441e17f6d"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:122046f3b3710b85de99d9aa2f3f0492a8233a2f54a64902b096efc27ea747b5"},
- {file = "jiter-0.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27ec39225e03c32c6b863ba879deb427882f243ae46f0d82d68b695fa5b48b40"},
- {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26b9e155ddc132225a39b1995b3b9f0fe0f79a6d5cbbeacf103271e7d309b404"},
- {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab05b7c58e29bb9e60b70c2e0094c98df79a1e42e397b9bb6eaa989b7a66dd0"},
- {file = "jiter-0.12.0-cp39-cp39-win32.whl", hash = "sha256:59f9f9df87ed499136db1c2b6c9efb902f964bed42a582ab7af413b6a293e7b0"},
- {file = "jiter-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:d3719596a1ebe7a48a498e8d5d0c4bf7553321d4c3eee1d620628d51351a3928"},
- {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8"},
- {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3"},
- {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e"},
- {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d"},
- {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb"},
- {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b"},
- {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f"},
- {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c"},
- {file = "jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b"},
+ {file = "jiter-0.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3893ce831e1c0094a83eeaf56c635a167d6fa8cc14393cc14298fd6fdc2a2449"},
+ {file = "jiter-0.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:25c625b9b61b5a8725267fdf867ef2e51b429687f6a4eef211f4612e95607179"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd4ca85fb6a62cf72e1c7f5e34ddef1b660ce4ed0886ec94a1ef9777d35eaa1f"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:572208127034725e79c28437b82414028c3562335f2b4f451d98136d0fc5f9cd"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494ba627c7f550ad3dabb21862864b8f2216098dc18ff62f37b37796f2f7c325"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8da18a99f58bca3ecc2d2bba99cac000a924e115b6c4f0a2b98f752b6fbf39a"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ffd3b0fff3fabbb02cc09910c08144db6bb5697a98d227a074401e01ee63dd"},
+ {file = "jiter-0.11.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8fe6530aa738a4f7d4e4702aa8f9581425d04036a5f9e25af65ebe1f708f23be"},
+ {file = "jiter-0.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e35d66681c133a03d7e974e7eedae89720fe8ca3bd09f01a4909b86a8adf31f5"},
+ {file = "jiter-0.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59459beca2fbc9718b6f1acb7bfb59ebc3eb4294fa4d40e9cb679dafdcc6c60"},
+ {file = "jiter-0.11.0-cp310-cp310-win32.whl", hash = "sha256:b7b0178417b0dcfc5f259edbc6db2b1f5896093ed9035ee7bab0f2be8854726d"},
+ {file = "jiter-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:11df2bf99fb4754abddd7f5d940a48e51f9d11624d6313ca4314145fcad347f0"},
+ {file = "jiter-0.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cb5d9db02979c3f49071fce51a48f4b4e4cf574175fb2b11c7a535fa4867b222"},
+ {file = "jiter-0.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1dc6a123f3471c4730db7ca8ba75f1bb3dcb6faeb8d46dd781083e7dee88b32d"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09858f8d230f031c7b8e557429102bf050eea29c77ad9c34c8fe253c5329acb7"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbe2196c4a0ce760925a74ab4456bf644748ab0979762139626ad138f6dac72d"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5beb56d22b63647bafd0b74979216fdee80c580c0c63410be8c11053860ffd09"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97025d09ef549795d8dc720a824312cee3253c890ac73c621721ddfc75066789"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d50880a6da65d8c23a2cf53c412847d9757e74cc9a3b95c5704a1d1a24667347"},
+ {file = "jiter-0.11.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:452d80a1c86c095a242007bd9fc5d21b8a8442307193378f891cb8727e469648"},
+ {file = "jiter-0.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e84e58198d4894668eec2da660ffff60e0f3e60afa790ecc50cb12b0e02ca1d4"},
+ {file = "jiter-0.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df64edcfc5dd5279a791eea52aa113d432c933119a025b0b5739f90d2e4e75f1"},
+ {file = "jiter-0.11.0-cp311-cp311-win32.whl", hash = "sha256:144fc21337d21b1d048f7f44bf70881e1586401d405ed3a98c95a114a9994982"},
+ {file = "jiter-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:b0f32e644d241293b892b1a6dd8f0b9cc029bfd94c97376b2681c36548aabab7"},
+ {file = "jiter-0.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2fb7b377688cc3850bbe5c192a6bd493562a0bc50cbc8b047316428fbae00ada"},
+ {file = "jiter-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1b7cbe3f25bd0d8abb468ba4302a5d45617ee61b2a7a638f63fee1dc086be99"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0a7f0ec81d5b7588c5cade1eb1925b91436ae6726dc2df2348524aeabad5de6"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07630bb46ea2a6b9c6ed986c6e17e35b26148cce2c535454b26ee3f0e8dcaba1"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7764f27d28cd4a9cbc61704dfcd80c903ce3aad106a37902d3270cd6673d17f4"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4a6c4a737d486f77f842aeb22807edecb4a9417e6700c7b981e16d34ba7c72"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf408d2a0abd919b60de8c2e7bc5eeab72d4dafd18784152acc7c9adc3291591"},
+ {file = "jiter-0.11.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cdef53eda7d18e799625023e1e250dbc18fbc275153039b873ec74d7e8883e09"},
+ {file = "jiter-0.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:53933a38ef7b551dd9c7f1064f9d7bb235bb3168d0fa5f14f0798d1b7ea0d9c5"},
+ {file = "jiter-0.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11840d2324c9ab5162fc1abba23bc922124fedcff0d7b7f85fffa291e2f69206"},
+ {file = "jiter-0.11.0-cp312-cp312-win32.whl", hash = "sha256:4f01a744d24a5f2bb4a11657a1b27b61dc038ae2e674621a74020406e08f749b"},
+ {file = "jiter-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:29fff31190ab3a26de026da2f187814f4b9c6695361e20a9ac2123e4d4378a4c"},
+ {file = "jiter-0.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4441a91b80a80249f9a6452c14b2c24708f139f64de959943dfeaa6cb915e8eb"},
+ {file = "jiter-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ff85fc6d2a431251ad82dbd1ea953affb5a60376b62e7d6809c5cd058bb39471"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5e86126d64706fd28dfc46f910d496923c6f95b395138c02d0e252947f452bd"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad8bd82165961867a10f52010590ce0b7a8c53da5ddd8bbb62fef68c181b921"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b42c2cd74273455ce439fd9528db0c6e84b5623cb74572305bdd9f2f2961d3df"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0062dab98172dd0599fcdbf90214d0dcde070b1ff38a00cc1b90e111f071982"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb948402821bc76d1f6ef0f9e19b816f9b09f8577844ba7140f0b6afe994bc64"},
+ {file = "jiter-0.11.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25a5b1110cca7329fd0daf5060faa1234be5c11e988948e4f1a1923b6a457fe1"},
+ {file = "jiter-0.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bf11807e802a214daf6c485037778843fadd3e2ec29377ae17e0706ec1a25758"},
+ {file = "jiter-0.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbb57da40631c267861dd0090461222060960012d70fd6e4c799b0f62d0ba166"},
+ {file = "jiter-0.11.0-cp313-cp313-win32.whl", hash = "sha256:8e36924dad32c48d3c5e188d169e71dc6e84d6cb8dedefea089de5739d1d2f80"},
+ {file = "jiter-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:452d13e4fd59698408087235259cebe67d9d49173b4dacb3e8d35ce4acf385d6"},
+ {file = "jiter-0.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:089f9df9f69532d1339e83142438668f52c97cd22ee2d1195551c2b1a9e6cf33"},
+ {file = "jiter-0.11.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29ed1fe69a8c69bf0f2a962d8d706c7b89b50f1332cd6b9fbda014f60bd03a03"},
+ {file = "jiter-0.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a4d71d7ea6ea8786291423fe209acf6f8d398a0759d03e7f24094acb8ab686ba"},
+ {file = "jiter-0.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9a6dff27eca70930bdbe4cbb7c1a4ba8526e13b63dc808c0670083d2d51a4a72"},
+ {file = "jiter-0.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b1ae2a7593a62132c7d4c2abbee80bbbb94fdc6d157e2c6cc966250c564ef774"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b13a431dba4b059e9e43019d3022346d009baf5066c24dcdea321a303cde9f0"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:af62e84ca3889604ebb645df3b0a3f3bcf6b92babbff642bd214616f57abb93a"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f3b32bb723246e6b351aecace52aba78adb8eeb4b2391630322dc30ff6c773"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:adcab442f4a099a358a7f562eaa54ed6456fb866e922c6545a717be51dbed7d7"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9967c2ab338ee2b2c0102fd379ec2693c496abf71ffd47e4d791d1f593b68e2"},
+ {file = "jiter-0.11.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e7d0bed3b187af8b47a981d9742ddfc1d9b252a7235471ad6078e7e4e5fe75c2"},
+ {file = "jiter-0.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:f6fe0283e903ebc55f1a6cc569b8c1f3bf4abd026fed85e3ff8598a9e6f982f0"},
+ {file = "jiter-0.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:4ee5821e3d66606b29ae5b497230b304f1376f38137d69e35f8d2bd5f310ff73"},
+ {file = "jiter-0.11.0-cp314-cp314-win32.whl", hash = "sha256:c2d13ba7567ca8799f17c76ed56b1d49be30df996eb7fa33e46b62800562a5e2"},
+ {file = "jiter-0.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fb4790497369d134a07fc763cc88888c46f734abdd66f9fdf7865038bf3a8f40"},
+ {file = "jiter-0.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2bbf24f16ba5ad4441a9845e40e4ea0cb9eed00e76ba94050664ef53ef4406"},
+ {file = "jiter-0.11.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:719891c2fb7628a41adff4f2f54c19380a27e6fdfdb743c24680ef1a54c67bd0"},
+ {file = "jiter-0.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df7f1927cbdf34cb91262a5418ca06920fd42f1cf733936d863aeb29b45a14ef"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e71ae6d969d0c9bab336c5e9e2fabad31e74d823f19e3604eaf96d9a97f463df"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5661469a7b2be25ade3a4bb6c21ffd1e142e13351a0759f264dfdd3ad99af1ab"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76c15ef0d3d02f8b389066fa4c410a0b89e9cc6468a1f0674c5925d2f3c3e890"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63782a1350917a27817030716566ed3d5b3c731500fd42d483cbd7094e2c5b25"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a7092b699646a1ddc03a7b112622d9c066172627c7382659befb0d2996f1659"},
+ {file = "jiter-0.11.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f637b8e818f6d75540f350a6011ce21252573c0998ea1b4365ee54b7672c23c5"},
+ {file = "jiter-0.11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a624d87719e1b5d09c15286eaee7e1532a40c692a096ea7ca791121365f548c1"},
+ {file = "jiter-0.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9d0146d8d9b3995821bb586fc8256636258947c2f39da5bab709f3a28fb1a0b"},
+ {file = "jiter-0.11.0-cp39-cp39-win32.whl", hash = "sha256:d067655a7cf0831eb8ec3e39cbd752995e9b69a2206df3535b3a067fac23b032"},
+ {file = "jiter-0.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:f05d03775a11aaf132c447436983169958439f1219069abf24662a672851f94e"},
+ {file = "jiter-0.11.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:902b43386c04739229076bd1c4c69de5d115553d982ab442a8ae82947c72ede7"},
+ {file = "jiter-0.11.0.tar.gz", hash = "sha256:1d9637eaf8c1d6a63d6562f2a6e5ab3af946c66037eb1b894e8fad75422266e4"},
]
[[package]]
@@ -1154,20 +1006,19 @@ referencing = ">=0.31.0"
[[package]]
name = "litellm"
-version = "1.78.7"
+version = "1.76.0"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
groups = ["main"]
files = [
- {file = "litellm-1.78.7-py3-none-any.whl", hash = "sha256:aa93ae1fefe02fb00b2a78eba3c95002f9ef478bade3e22e63508830182e2dfe"},
- {file = "litellm-1.78.7.tar.gz", hash = "sha256:6b10f5c7dc217bde3481fa4f70b5c37edbfa617bec7149276833d311f76a6783"},
+ {file = "litellm-1.76.0-py3-none-any.whl", hash = "sha256:357464242fc1eeda384810c9e334e48ad67a50ecd30cf61e86c15f89e2f2e0b4"},
+ {file = "litellm-1.76.0.tar.gz", hash = "sha256:d26d12333135edd72af60e0e310284dac3b079f4d7c47c79dfbb2430b9b4b421"},
]
[package.dependencies]
aiohttp = ">=3.10"
click = "*"
-fastuuid = ">=0.13.0"
httpx = ">=0.23.0"
importlib-metadata = ">=6.8.0"
jinja2 = ">=3.1.2,<4.0.0"
@@ -1182,7 +1033,7 @@ tokenizers = "*"
caching = ["diskcache (>=5.6.1,<6.0.0)"]
extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-iam (>=2.19.1,<3.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0)", "resend (>=0.8.0,<0.9.0)"]
mlflow = ["mlflow (>3.1.4)"]
-proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.20)", "litellm-proxy-extras (==0.2.27)", "mcp (>=1.10.0,<2.0.0)", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0)", "websockets (>=13.1.0,<14.0.0)"]
+proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.19)", "litellm-proxy-extras (==0.2.18)", "mcp (>=1.10.0,<2.0.0)", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0)", "websockets (>=13.1.0,<14.0.0)"]
semantic-router = ["semantic-router"]
utils = ["numpydoc"]
@@ -1453,6 +1304,9 @@ files = [
{file = "multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5"},
]
+[package.dependencies]
+typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""}
+
[[package]]
name = "mypy"
version = "1.18.2"
@@ -1504,6 +1358,7 @@ files = [
[package.dependencies]
mypy_extensions = ">=1.0.0"
pathspec = ">=0.9.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing_extensions = ">=4.6.0"
[package.extras]
@@ -1527,42 +1382,42 @@ files = [
[[package]]
name = "openai"
-version = "2.7.1"
+version = "1.109.1"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "openai-2.7.1-py3-none-any.whl", hash = "sha256:2f2530354d94c59c614645a4662b9dab0a5b881c5cd767a8587398feac0c9021"},
- {file = "openai-2.7.1.tar.gz", hash = "sha256:df4d4a3622b2df3475ead8eb0fbb3c27fd1c070fa2e55d778ca4f40e0186c726"},
+ {file = "openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315"},
+ {file = "openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
-jiter = ">=0.10.0,<1"
+jiter = ">=0.4.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
tqdm = ">4"
typing-extensions = ">=4.11,<5"
[package.extras]
-aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"]
+aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
-version = "1.38.0"
+version = "1.37.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"},
- {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"},
+ {file = "opentelemetry_api-1.37.0-py3-none-any.whl", hash = "sha256:accf2024d3e89faec14302213bc39550ec0f4095d1cf5ca688e1bfb1c8612f47"},
+ {file = "opentelemetry_api-1.37.0.tar.gz", hash = "sha256:540735b120355bd5112738ea53621f8d5edb35ebcd6fe21ada3ab1c61d1cd9a7"},
]
[package.dependencies]
@@ -1571,68 +1426,68 @@ typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
-version = "1.38.0"
+version = "1.37.0"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"},
- {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.37.0-py3-none-any.whl", hash = "sha256:53038428449c559b0c564b8d718df3314da387109c4d36bd1b94c9a641b0292e"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.37.0.tar.gz", hash = "sha256:c87a1bdd9f41fdc408d9cc9367bb53f8d2602829659f2b90be9f9d79d0bfe62c"},
]
[package.dependencies]
-opentelemetry-proto = "1.38.0"
+opentelemetry-proto = "1.37.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
-version = "1.38.0"
+version = "1.37.0"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"},
- {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"},
+ {file = "opentelemetry_exporter_otlp_proto_http-1.37.0-py3-none-any.whl", hash = "sha256:54c42b39945a6cc9d9a2a33decb876eabb9547e0dcb49df090122773447f1aef"},
+ {file = "opentelemetry_exporter_otlp_proto_http-1.37.0.tar.gz", hash = "sha256:e52e8600f1720d6de298419a802108a8f5afa63c96809ff83becb03f874e44ac"},
]
[package.dependencies]
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
-opentelemetry-exporter-otlp-proto-common = "1.38.0"
-opentelemetry-proto = "1.38.0"
-opentelemetry-sdk = ">=1.38.0,<1.39.0"
+opentelemetry-exporter-otlp-proto-common = "1.37.0"
+opentelemetry-proto = "1.37.0"
+opentelemetry-sdk = ">=1.37.0,<1.38.0"
requests = ">=2.7,<3.0"
typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.59b0"
+version = "0.58b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee"},
- {file = "opentelemetry_instrumentation-0.59b0.tar.gz", hash = "sha256:6010f0faaacdaf7c4dff8aac84e226d23437b331dcda7e70367f6d73a7db1adc"},
+ {file = "opentelemetry_instrumentation-0.58b0-py3-none-any.whl", hash = "sha256:50f97ac03100676c9f7fc28197f8240c7290ca1baa12da8bfbb9a1de4f34cc45"},
+ {file = "opentelemetry_instrumentation-0.58b0.tar.gz", hash = "sha256:df640f3ac715a3e05af145c18f527f4422c6ab6c467e40bd24d2ad75a00cb705"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.59b0"
+opentelemetry-semantic-conventions = "0.58b0"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-proto"
-version = "1.38.0"
+version = "1.37.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"},
- {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"},
+ {file = "opentelemetry_proto-1.37.0-py3-none-any.whl", hash = "sha256:8ed8c066ae8828bbf0c39229979bdf583a126981142378a9cbe9d6fd5701c6e2"},
+ {file = "opentelemetry_proto-1.37.0.tar.gz", hash = "sha256:30f5c494faf66f77faeaefa35ed4443c5edb3b0aa46dad073ed7210e1a789538"},
]
[package.dependencies]
@@ -1640,35 +1495,35 @@ protobuf = ">=5.0,<7.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.38.0"
+version = "1.37.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"},
- {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"},
+ {file = "opentelemetry_sdk-1.37.0-py3-none-any.whl", hash = "sha256:8f3c3c22063e52475c5dbced7209495c2c16723d016d39287dfc215d1771257c"},
+ {file = "opentelemetry_sdk-1.37.0.tar.gz", hash = "sha256:cc8e089c10953ded765b5ab5669b198bbe0af1b3f89f1007d19acd32dc46dda5"},
]
[package.dependencies]
-opentelemetry-api = "1.38.0"
-opentelemetry-semantic-conventions = "0.59b0"
+opentelemetry-api = "1.37.0"
+opentelemetry-semantic-conventions = "0.58b0"
typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.59b0"
+version = "0.58b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"},
- {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"},
+ {file = "opentelemetry_semantic_conventions-0.58b0-py3-none-any.whl", hash = "sha256:5564905ab1458b96684db1340232729fce3b5375a06e140e8904c78e4f815b28"},
+ {file = "opentelemetry_semantic_conventions-0.58b0.tar.gz", hash = "sha256:6bd46f51264279c433755767bb44ad00f1c9e2367e1b42af563372c5a6fa0c25"},
]
[package.dependencies]
-opentelemetry-api = "1.38.0"
+opentelemetry-api = "1.37.0"
typing-extensions = ">=4.5.0"
[[package]]
@@ -1769,154 +1624,153 @@ wcwidth = "*"
[[package]]
name = "propcache"
-version = "0.4.1"
+version = "0.4.0"
description = "Accelerated property cache"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c2d1fa3201efaf55d730400d945b5b3ab6e672e100ba0f9a409d950ab25d7db"},
- {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1eb2994229cc8ce7fe9b3db88f5465f5fd8651672840b2e426b88cdb1a30aac8"},
- {file = "propcache-0.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66c1f011f45a3b33d7bcb22daed4b29c0c9e2224758b6be00686731e1b46f925"},
- {file = "propcache-0.4.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a52009f2adffe195d0b605c25ec929d26b36ef986ba85244891dee3b294df21"},
- {file = "propcache-0.4.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5d4e2366a9c7b837555cf02fb9be2e3167d333aff716332ef1b7c3a142ec40c5"},
- {file = "propcache-0.4.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9d2b6caef873b4f09e26ea7e33d65f42b944837563a47a94719cc3544319a0db"},
- {file = "propcache-0.4.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b16ec437a8c8a965ecf95739448dd938b5c7f56e67ea009f4300d8df05f32b7"},
- {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:296f4c8ed03ca7476813fe666c9ea97869a8d7aec972618671b33a38a5182ef4"},
- {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:1f0978529a418ebd1f49dad413a2b68af33f85d5c5ca5c6ca2a3bed375a7ac60"},
- {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fd138803047fb4c062b1c1dd95462f5209456bfab55c734458f15d11da288f8f"},
- {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8c9b3cbe4584636d72ff556d9036e0c9317fa27b3ac1f0f558e7e84d1c9c5900"},
- {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f93243fdc5657247533273ac4f86ae106cc6445a0efacb9a1bfe982fcfefd90c"},
- {file = "propcache-0.4.1-cp310-cp310-win32.whl", hash = "sha256:a0ee98db9c5f80785b266eb805016e36058ac72c51a064040f2bc43b61101cdb"},
- {file = "propcache-0.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cdb7988c4e5ac7f6d175a28a9aa0c94cb6f2ebe52756a3c0cda98d2809a9e37"},
- {file = "propcache-0.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:d82ad62b19645419fe79dd63b3f9253e15b30e955c0170e5cebc350c1844e581"},
- {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:60a8fda9644b7dfd5dece8c61d8a85e271cb958075bfc4e01083c148b61a7caf"},
- {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c30b53e7e6bda1d547cabb47c825f3843a0a1a42b0496087bb58d8fedf9f41b5"},
- {file = "propcache-0.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6918ecbd897443087a3b7cd978d56546a812517dcaaca51b49526720571fa93e"},
- {file = "propcache-0.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d902a36df4e5989763425a8ab9e98cd8ad5c52c823b34ee7ef307fd50582566"},
- {file = "propcache-0.4.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a9695397f85973bb40427dedddf70d8dc4a44b22f1650dd4af9eedf443d45165"},
- {file = "propcache-0.4.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2bb07ffd7eaad486576430c89f9b215f9e4be68c4866a96e97db9e97fead85dc"},
- {file = "propcache-0.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd6f30fdcf9ae2a70abd34da54f18da086160e4d7d9251f81f3da0ff84fc5a48"},
- {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc38cba02d1acba4e2869eef1a57a43dfbd3d49a59bf90dda7444ec2be6a5570"},
- {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:67fad6162281e80e882fb3ec355398cf72864a54069d060321f6cd0ade95fe85"},
- {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f10207adf04d08bec185bae14d9606a1444715bc99180f9331c9c02093e1959e"},
- {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9b0d8d0845bbc4cfcdcbcdbf5086886bc8157aa963c31c777ceff7846c77757"},
- {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:981333cb2f4c1896a12f4ab92a9cc8f09ea664e9b7dbdc4eff74627af3a11c0f"},
- {file = "propcache-0.4.1-cp311-cp311-win32.whl", hash = "sha256:f1d2f90aeec838a52f1c1a32fe9a619fefd5e411721a9117fbf82aea638fe8a1"},
- {file = "propcache-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:364426a62660f3f699949ac8c621aad6977be7126c5807ce48c0aeb8e7333ea6"},
- {file = "propcache-0.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:e53f3a38d3510c11953f3e6a33f205c6d1b001129f972805ca9b42fc308bc239"},
- {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2"},
- {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403"},
- {file = "propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207"},
- {file = "propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72"},
- {file = "propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367"},
- {file = "propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4"},
- {file = "propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf"},
- {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3"},
- {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778"},
- {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6"},
- {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9"},
- {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75"},
- {file = "propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8"},
- {file = "propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db"},
- {file = "propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1"},
- {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf"},
- {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311"},
- {file = "propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74"},
- {file = "propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe"},
- {file = "propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af"},
- {file = "propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c"},
- {file = "propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f"},
- {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1"},
- {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24"},
- {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa"},
- {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61"},
- {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66"},
- {file = "propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81"},
- {file = "propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e"},
- {file = "propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1"},
- {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b"},
- {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566"},
- {file = "propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835"},
- {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e"},
- {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859"},
- {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b"},
- {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0"},
- {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af"},
- {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393"},
- {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874"},
- {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7"},
- {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1"},
- {file = "propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717"},
- {file = "propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37"},
- {file = "propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a"},
- {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12"},
- {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c"},
- {file = "propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded"},
- {file = "propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641"},
- {file = "propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4"},
- {file = "propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44"},
- {file = "propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d"},
- {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b"},
- {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e"},
- {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f"},
- {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49"},
- {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144"},
- {file = "propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f"},
- {file = "propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153"},
- {file = "propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992"},
- {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f"},
- {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393"},
- {file = "propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0"},
- {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a"},
- {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be"},
- {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc"},
- {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a"},
- {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89"},
- {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726"},
- {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367"},
- {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36"},
- {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455"},
- {file = "propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85"},
- {file = "propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1"},
- {file = "propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9"},
- {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d233076ccf9e450c8b3bc6720af226b898ef5d051a2d145f7d765e6e9f9bcff"},
- {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:357f5bb5c377a82e105e44bd3d52ba22b616f7b9773714bff93573988ef0a5fb"},
- {file = "propcache-0.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbc3b6dfc728105b2a57c06791eb07a94229202ea75c59db644d7d496b698cac"},
- {file = "propcache-0.4.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:182b51b421f0501952d938dc0b0eb45246a5b5153c50d42b495ad5fb7517c888"},
- {file = "propcache-0.4.1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b536b39c5199b96fc6245eb5fb796c497381d3942f169e44e8e392b29c9ebcc"},
- {file = "propcache-0.4.1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:db65d2af507bbfbdcedb254a11149f894169d90488dd3e7190f7cdcb2d6cd57a"},
- {file = "propcache-0.4.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd2dbc472da1f772a4dae4fa24be938a6c544671a912e30529984dd80400cd88"},
- {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:daede9cd44e0f8bdd9e6cc9a607fc81feb80fae7a5fc6cecaff0e0bb32e42d00"},
- {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:71b749281b816793678ae7f3d0d84bd36e694953822eaad408d682efc5ca18e0"},
- {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:0002004213ee1f36cfb3f9a42b5066100c44276b9b72b4e1504cddd3d692e86e"},
- {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fe49d0a85038f36ba9e3ffafa1103e61170b28e95b16622e11be0a0ea07c6781"},
- {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99d43339c83aaf4d32bda60928231848eee470c6bda8d02599cc4cebe872d183"},
- {file = "propcache-0.4.1-cp39-cp39-win32.whl", hash = "sha256:a129e76735bc792794d5177069691c3217898b9f5cee2b2661471e52ffe13f19"},
- {file = "propcache-0.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:948dab269721ae9a87fd16c514a0a2c2a1bdb23a9a61b969b0f9d9ee2968546f"},
- {file = "propcache-0.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:5fd37c406dd6dc85aa743e214cef35dc54bbdd1419baac4f6ae5e5b1a2976938"},
- {file = "propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237"},
- {file = "propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d"},
+ {file = "propcache-0.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:779aaae64089e2f4992e993faea801925395d26bb5de4a47df7ef7f942c14f80"},
+ {file = "propcache-0.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566552ed9b003030745e5bc7b402b83cf3cecae1bade95262d78543741786db5"},
+ {file = "propcache-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:944de70384c62d16d4a00c686b422aa75efbc67c4addaebefbb56475d1c16034"},
+ {file = "propcache-0.4.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e878553543ece1f8006d0ba4d096b40290580db173bfb18e16158045b9371335"},
+ {file = "propcache-0.4.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8659f995b19185179474b18de8755689e1f71e1334d05c14e1895caa4e409cf7"},
+ {file = "propcache-0.4.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7aa8cc5c94e682dce91cb4d12d7b81c01641f4ef5b3b3dc53325d43f0e3b9f2e"},
+ {file = "propcache-0.4.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da584d917a1a17f690fc726617fd2c3f3006ea959dae5bb07a5630f7b16f9f5f"},
+ {file = "propcache-0.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:892a072e5b19c3f324a4f8543c9f7e8fc2b0aa08579e46f69bdf0cfc1b440454"},
+ {file = "propcache-0.4.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c20d796210720455086ef3f85adc413d1e41d374742f9b439354f122bbc3b528"},
+ {file = "propcache-0.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:df7107a91126a495880576610ae989f19106e1900dd5218d08498391fa43b31d"},
+ {file = "propcache-0.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0b04ac2120c161416c866d0b6a4259e47e92231ff166b518cc0efb95777367c3"},
+ {file = "propcache-0.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1e7fa29c71ffa8d6a37324258737d09475f84715a6e8c350f67f0bc8e5e44993"},
+ {file = "propcache-0.4.0-cp310-cp310-win32.whl", hash = "sha256:01c0ebc172ca28e9d62876832befbf7f36080eee6ed9c9e00243de2a8089ad57"},
+ {file = "propcache-0.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:84f847e64f4d1a232e50460eebc1196642ee9b4c983612f41cd2d44fd2fe7c71"},
+ {file = "propcache-0.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:2166466a666a5bebc332cd209cad77d996fad925ca7e8a2a6310ba9e851ae641"},
+ {file = "propcache-0.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6a6a36b94c09711d6397d79006ca47901539fbc602c853d794c39abd6a326549"},
+ {file = "propcache-0.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:da47070e1340a1639aca6b1c18fe1f1f3d8d64d3a1f9ddc67b94475f44cd40f3"},
+ {file = "propcache-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de536cf796abc5b58d11c0ad56580215d231d9554ea4bb6b8b1b3bed80aa3234"},
+ {file = "propcache-0.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5c82af8e329c3cdc3e717dd3c7b2ff1a218b6de611f6ce76ee34967570a9de9"},
+ {file = "propcache-0.4.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:abe04e7aa5ab2e4056fcf3255ebee2071e4a427681f76d4729519e292c46ecc1"},
+ {file = "propcache-0.4.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:075ca32384294434344760fdcb95f7833e1d7cf7c4e55f0e726358140179da35"},
+ {file = "propcache-0.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:626ec13592928b677f48ff5861040b604b635e93d8e2162fb638397ea83d07e8"},
+ {file = "propcache-0.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:02e071548b6a376e173b0102c3f55dc16e7d055b5307d487e844c320e38cacf2"},
+ {file = "propcache-0.4.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:2af6de831a26f42a3f94592964becd8d7f238551786d7525807f02e53defbd13"},
+ {file = "propcache-0.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd6c6dba1a3b8949e08c4280071c86e38cb602f02e0ed6659234108c7a7cd710"},
+ {file = "propcache-0.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:783e91595cf9b66c2deda17f2e8748ae8591aa9f7c65dcab038872bfe83c5bb1"},
+ {file = "propcache-0.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c3f4b125285d354a627eb37f3ea7c13b8842c7c0d47783581d0df0e272dbf5f0"},
+ {file = "propcache-0.4.0-cp311-cp311-win32.whl", hash = "sha256:71c45f02ffbb8a21040ae816ceff7f6cd749ffac29fc0f9daa42dc1a9652d577"},
+ {file = "propcache-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:7d51f70f77950f8efafed4383865d3533eeee52d8a0dd1c35b65f24de41de4e0"},
+ {file = "propcache-0.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:858eaabd2191dd0da5272993ad08a748b5d3ae1aefabea8aee619b45c2af4a64"},
+ {file = "propcache-0.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:381c84a445efb8c9168f1393a5a7c566de22edc42bfe207a142fff919b37f5d9"},
+ {file = "propcache-0.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5a531d29d7b873b12730972237c48b1a4e5980b98cf21b3f09fa4710abd3a8c3"},
+ {file = "propcache-0.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cd6e22255ed73efeaaeb1765505a66a48a9ec9ebc919fce5ad490fe5e33b1555"},
+ {file = "propcache-0.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9a8d277dc218ddf04ec243a53ac309b1afcebe297c0526a8f82320139b56289"},
+ {file = "propcache-0.4.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:399c73201d88c856a994916200d7cba41d7687096f8eb5139eb68f02785dc3f7"},
+ {file = "propcache-0.4.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a1d5e474d43c238035b74ecf997f655afa67f979bae591ac838bb3fbe3076392"},
+ {file = "propcache-0.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f589652ee38de96aa58dd219335604e09666092bc250c1d9c26a55bcef9932"},
+ {file = "propcache-0.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5227da556b2939da6125cda1d5eecf9e412e58bc97b41e2f192605c3ccbb7c2"},
+ {file = "propcache-0.4.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:92bc43a1ab852310721ce856f40a3a352254aa6f5e26f0fad870b31be45bba2e"},
+ {file = "propcache-0.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:83ae2f5343f6f06f4c91ae530d95f56b415f768f9c401a5ee2a10459cf74370b"},
+ {file = "propcache-0.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:077a32977399dc05299b16e793210341a0b511eb0a86d1796873e83ce47334cc"},
+ {file = "propcache-0.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:94a278c45e6463031b5a8278e40a07edf2bcc3b5379510e22b6c1a6e6498c194"},
+ {file = "propcache-0.4.0-cp312-cp312-win32.whl", hash = "sha256:4c491462e1dc80f9deb93f428aad8d83bb286de212837f58eb48e75606e7726c"},
+ {file = "propcache-0.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cdb0cecafb528ab15ed89cdfed183074d15912d046d3e304955513b50a34b907"},
+ {file = "propcache-0.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:b2f29697d1110e8cdf7a39cc630498df0082d7898b79b731c1c863f77c6e8cfc"},
+ {file = "propcache-0.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e2d01fd53e89cb3d71d20b8c225a8c70d84660f2d223afc7ed7851a4086afe6d"},
+ {file = "propcache-0.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7dfa60953169d2531dd8ae306e9c27c5d4e5efe7a2ba77049e8afdaece062937"},
+ {file = "propcache-0.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:227892597953611fce2601d49f1d1f39786a6aebc2f253c2de775407f725a3f6"},
+ {file = "propcache-0.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e0a5bc019014531308fb67d86066d235daa7551baf2e00e1ea7b00531f6ea85"},
+ {file = "propcache-0.4.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6ebc6e2e65c31356310ddb6519420eaa6bb8c30fbd809d0919129c89dcd70f4c"},
+ {file = "propcache-0.4.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1927b78dd75fc31a7fdc76cc7039e39f3170cb1d0d9a271e60f0566ecb25211a"},
+ {file = "propcache-0.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b113feeda47f908562d9a6d0e05798ad2f83d4473c0777dafa2bc7756473218"},
+ {file = "propcache-0.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4596c12aa7e3bb2abf158ea8f79eb0fb4851606695d04ab846b2bb386f5690a1"},
+ {file = "propcache-0.4.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6d1f67dad8cc36e8abc2207a77f3f952ac80be7404177830a7af4635a34cbc16"},
+ {file = "propcache-0.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e6229ad15366cd8b6d6b4185c55dd48debf9ca546f91416ba2e5921ad6e210a6"},
+ {file = "propcache-0.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2a4bf309d057327f1f227a22ac6baf34a66f9af75e08c613e47c4d775b06d6c7"},
+ {file = "propcache-0.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c2e274f3d1cbb2ddcc7a55ce3739af0f8510edc68a7f37981b2258fa1eedc833"},
+ {file = "propcache-0.4.0-cp313-cp313-win32.whl", hash = "sha256:f114a3e1f8034e2957d34043b7a317a8a05d97dfe8fddb36d9a2252c0117dbbc"},
+ {file = "propcache-0.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ba68c57cde9c667f6b65b98bc342dfa7240b1272ffb2c24b32172ee61b6d281"},
+ {file = "propcache-0.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb77a85253174bf73e52c968b689d64be62d71e8ac33cabef4ca77b03fb4ef92"},
+ {file = "propcache-0.4.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:c0e1c218fff95a66ad9f2f83ad41a67cf4d0a3f527efe820f57bde5fda616de4"},
+ {file = "propcache-0.4.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:5710b1c01472542bb024366803812ca13e8774d21381bcfc1f7ae738eeb38acc"},
+ {file = "propcache-0.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d7f008799682e8826ce98f25e8bc43532d2cd26c187a1462499fa8d123ae054f"},
+ {file = "propcache-0.4.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0596d2ae99d74ca436553eb9ce11fe4163dc742fcf8724ebe07d7cb0db679bb1"},
+ {file = "propcache-0.4.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab9c1bd95ebd1689f0e24f2946c495808777e9e8df7bb3c1dfe3e9eb7f47fe0d"},
+ {file = "propcache-0.4.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a8ef2ea819549ae2e8698d2ec229ae948d7272feea1cb2878289f767b6c585a4"},
+ {file = "propcache-0.4.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:71a400b2f0b079438cc24f9a27f02eff24d8ef78f2943f949abc518b844ade3d"},
+ {file = "propcache-0.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4c2735d3305e6cecab6e53546909edf407ad3da5b9eeaf483f4cf80142bb21be"},
+ {file = "propcache-0.4.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:72b51340047ac43b3cf388eebd362d052632260c9f73a50882edbb66e589fd44"},
+ {file = "propcache-0.4.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:184c779363740d6664982ad05699f378f7694220e2041996f12b7c2a4acdcad0"},
+ {file = "propcache-0.4.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:a60634a9de41f363923c6adfb83105d39e49f7a3058511563ed3de6748661af6"},
+ {file = "propcache-0.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c9b8119244d122241a9c4566bce49bb20408a6827044155856735cf14189a7da"},
+ {file = "propcache-0.4.0-cp313-cp313t-win32.whl", hash = "sha256:515b610a364c8cdd2b72c734cc97dece85c416892ea8d5c305624ac8734e81db"},
+ {file = "propcache-0.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7ea86eb32e74f9902df57e8608e8ac66f1e1e1d24d1ed2ddeb849888413b924d"},
+ {file = "propcache-0.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c1443fa4bb306461a3a8a52b7de0932a2515b100ecb0ebc630cc3f87d451e0a9"},
+ {file = "propcache-0.4.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:de8e310d24b5a61de08812dd70d5234da1458d41b059038ee7895a9e4c8cae79"},
+ {file = "propcache-0.4.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:55a54de5266bc44aa274915cdf388584fa052db8748a869e5500ab5993bac3f4"},
+ {file = "propcache-0.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:88d50d662c917ec2c9d3858920aa7b9d5bfb74ab9c51424b775ccbe683cb1b4e"},
+ {file = "propcache-0.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae3adf88a66f5863cf79394bc359da523bb27a2ed6ba9898525a6a02b723bfc5"},
+ {file = "propcache-0.4.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7f088e21d15b3abdb9047e4b7b7a0acd79bf166893ac2b34a72ab1062feb219e"},
+ {file = "propcache-0.4.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a4efbaf10793fd574c76a5732c75452f19d93df6e0f758c67dd60552ebd8614b"},
+ {file = "propcache-0.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:681a168d06284602d56e97f09978057aa88bcc4177352b875b3d781df4efd4cb"},
+ {file = "propcache-0.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a7f06f077fc4ef37e8a37ca6bbb491b29e29db9fb28e29cf3896aad10dbd4137"},
+ {file = "propcache-0.4.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:082a643479f49a6778dcd68a80262fc324b14fd8e9b1a5380331fe41adde1738"},
+ {file = "propcache-0.4.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:26692850120241a99bb4a4eec675cd7b4fdc431144f0d15ef69f7f8599f6165f"},
+ {file = "propcache-0.4.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:33ad7d37b9a386f97582f5d042cc7b8d4b3591bb384cf50866b749a17e4dba90"},
+ {file = "propcache-0.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1e7fd82d4a5b7583588f103b0771e43948532f1292105f13ee6f3b300933c4ca"},
+ {file = "propcache-0.4.0-cp314-cp314-win32.whl", hash = "sha256:213eb0d3bc695a70cffffe11a1c2e1c2698d89ffd8dba35a49bc44a035d45c93"},
+ {file = "propcache-0.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:087e2d3d7613e1b59b2ffca0daabd500c1a032d189c65625ee05ea114afcad0b"},
+ {file = "propcache-0.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:94b0f7407d18001dbdcbb239512e753b1b36725a6e08a4983be1c948f5435f79"},
+ {file = "propcache-0.4.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:b730048ae8b875e2c0af1a09ca31b303fc7b5ed27652beec03fa22b29545aec9"},
+ {file = "propcache-0.4.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:f495007ada16a4e16312b502636fafff42a9003adf1d4fb7541e0a0870bc056f"},
+ {file = "propcache-0.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:659a0ea6d9017558ed7af00fb4028186f64d0ba9adfc70a4d2c85fcd3d026321"},
+ {file = "propcache-0.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d74aa60b1ec076d4d5dcde27c9a535fc0ebb12613f599681c438ca3daa68acac"},
+ {file = "propcache-0.4.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34000e31795bdcda9826e0e70e783847a42e3dcd0d6416c5d3cb717905ebaec0"},
+ {file = "propcache-0.4.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bcb5bfac5b9635e6fc520c8af6efc7a0a56f12a1fe9e9d3eb4328537e316dd6a"},
+ {file = "propcache-0.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ea11fceb31fa95b0fa2007037f19e922e2caceb7dc6c6cac4cb56e2d291f1a2"},
+ {file = "propcache-0.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:cd8684f628fe285ea5c86f88e1c30716239dc9d6ac55e7851a4b7f555b628da3"},
+ {file = "propcache-0.4.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:790286d3d542c0ef9f6d0280d1049378e5e776dcba780d169298f664c39394db"},
+ {file = "propcache-0.4.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:009093c9b5dbae114a5958e6a649f8a5d94dd6866b0f82b60395eb92c58002d4"},
+ {file = "propcache-0.4.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:728d98179e92d77096937fdfecd2c555a3d613abe56c9909165c24196a3b5012"},
+ {file = "propcache-0.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a9725d96a81e17e48a0fe82d0c3de2f5e623d7163fec70a6c7df90753edd1bec"},
+ {file = "propcache-0.4.0-cp314-cp314t-win32.whl", hash = "sha256:0964c55c95625193defeb4fd85f8f28a9a754ed012cab71127d10e3dc66b1373"},
+ {file = "propcache-0.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:24403152e41abf09488d3ae9c0c3bf7ff93e2fb12b435390718f21810353db28"},
+ {file = "propcache-0.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0363a696a9f24b37a04ed5e34c2e07ccbe92798c998d37729551120a1bb744c4"},
+ {file = "propcache-0.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0cd30341142c68377cf3c4e2d9f0581e6e528694b2d57c62c786be441053d2fc"},
+ {file = "propcache-0.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c46d37955820dd883cf9156ceb7825b8903e910bdd869902e20a5ac4ecd2c8b"},
+ {file = "propcache-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0b12df77eb19266efd146627a65b8ad414f9d15672d253699a50c8205661a820"},
+ {file = "propcache-0.4.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1cdabd60e109506462e6a7b37008e57979e737dc6e7dfbe1437adcfe354d1a0a"},
+ {file = "propcache-0.4.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65ff56a31f25925ef030b494fe63289bf07ef0febe6da181b8219146c590e185"},
+ {file = "propcache-0.4.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:96153e037ae065bb71cae889f23c933190d81ae183f3696a030b47352fd8655d"},
+ {file = "propcache-0.4.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4bf95be277fbb51513895c2cecc81ab12a421cdbd8837f159828a919a0167f96"},
+ {file = "propcache-0.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8d18d796ffecdc8253742fd53a94ceee2e77ad149eb9ed5960c2856b5f692f71"},
+ {file = "propcache-0.4.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4a52c25a51d5894ba60c567b0dbcf73de2f3cd642cf5343679e07ca3a768b085"},
+ {file = "propcache-0.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:e0ce7f3d1faf7ad58652ed758cc9753049af5308b38f89948aa71793282419c5"},
+ {file = "propcache-0.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:545987971b2aded25ba4698135ea0ae128836e7deb6e18c29a581076aaef44aa"},
+ {file = "propcache-0.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7da5c4c72ae40fd3ce87213ab057db66df53e55600d0b9e72e2b7f5a470a2cc4"},
+ {file = "propcache-0.4.0-cp39-cp39-win32.whl", hash = "sha256:2015218812ee8f13bbaebc9f52b1e424cc130b68d4857bef018e65e3834e1c4d"},
+ {file = "propcache-0.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:39f0f6a3b56e82dc91d84c763b783c5c33720a33c70ee48a1c13ba800ac1fa69"},
+ {file = "propcache-0.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:236c8da353ea7c22a8e963ab78cddb1126f700ae9538e2c4c6ef471e5545494b"},
+ {file = "propcache-0.4.0-py3-none-any.whl", hash = "sha256:015b2ca2f98ea9e08ac06eecc409d5d988f78c5fd5821b2ad42bc9afcd6b1557"},
+ {file = "propcache-0.4.0.tar.gz", hash = "sha256:c1ad731253eb738f9cadd9fa1844e019576c70bca6a534252e97cf33a57da529"},
]
[[package]]
name = "protobuf"
-version = "6.33.0"
+version = "6.32.1"
description = ""
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035"},
- {file = "protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee"},
- {file = "protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455"},
- {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90"},
- {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298"},
- {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef"},
- {file = "protobuf-6.33.0-cp39-cp39-win32.whl", hash = "sha256:cd33a8e38ea3e39df66e1bbc462b076d6e5ba3a4ebbde58219d777223a7873d3"},
- {file = "protobuf-6.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:c963e86c3655af3a917962c9619e1a6b9670540351d7af9439d06064e3317cc9"},
- {file = "protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995"},
- {file = "protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954"},
+ {file = "protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085"},
+ {file = "protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1"},
+ {file = "protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281"},
+ {file = "protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4"},
+ {file = "protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710"},
+ {file = "protobuf-6.32.1-cp39-cp39-win32.whl", hash = "sha256:68ff170bac18c8178f130d1ccb94700cf72852298e016a2443bdb9502279e5f1"},
+ {file = "protobuf-6.32.1-cp39-cp39-win_amd64.whl", hash = "sha256:d0975d0b2f3e6957111aa3935d08a0eb7e006b1505d825f862a1fffc8348e122"},
+ {file = "protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346"},
+ {file = "protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d"},
]
[[package]]
@@ -1931,50 +1785,23 @@ files = [
{file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
]
-[[package]]
-name = "pyasn1"
-version = "0.6.1"
-description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
- {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
-]
-
-[[package]]
-name = "pyasn1-modules"
-version = "0.4.2"
-description = "A collection of ASN.1-based protocols modules"
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"},
- {file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"},
-]
-
-[package.dependencies]
-pyasn1 = ">=0.6.1,<0.7.0"
-
[[package]]
name = "pydantic"
-version = "2.12.4"
+version = "2.11.10"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"},
- {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"},
+ {file = "pydantic-2.11.10-py3-none-any.whl", hash = "sha256:802a655709d49bd004c31e865ef37da30b540786a46bfce02333e0e24b5fe29a"},
+ {file = "pydantic-2.11.10.tar.gz", hash = "sha256:dc280f0982fbda6c38fada4e476dc0a4f3aeaf9c6ad4c28df68a666ec3c61423"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.41.5"
-typing-extensions = ">=4.14.1"
-typing-inspection = ">=0.4.2"
+pydantic-core = "2.33.2"
+typing-extensions = ">=4.12.2"
+typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
@@ -1982,137 +1809,115 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.41.5"
+version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"},
- {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"},
- {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"},
- {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"},
- {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"},
- {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"},
- {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"},
- {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"},
- {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"},
- {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"},
- {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"},
- {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"},
- {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"},
- {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"},
- {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"},
- {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"},
- {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"},
- {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"},
- {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"},
- {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"},
- {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"},
- {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"},
- {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"},
- {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"},
- {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"},
- {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"},
- {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"},
- {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"},
- {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"},
- {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"},
- {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"},
- {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"},
- {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"},
- {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"},
- {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"},
- {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"},
- {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"},
- {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"},
- {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"},
- {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"},
- {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"},
- {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"},
- {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"},
- {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"},
- {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"},
- {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"},
- {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"},
- {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"},
- {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"},
- {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"},
- {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"},
- {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"},
- {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"},
- {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"},
- {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"},
- {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"},
- {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"},
- {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"},
- {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"},
- {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"},
- {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"},
- {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"},
- {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"},
- {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
+ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
]
[package.dependencies]
-typing-extensions = ">=4.14.1"
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygments"
@@ -2143,10 +1948,12 @@ files = [
[package.dependencies]
colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""}
iniconfig = ">=1"
packaging = ">=20"
pluggy = ">=1.5,<2"
pygments = ">=2.7.2"
+tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"]
@@ -2208,34 +2015,19 @@ six = ">=1.5"
[[package]]
name = "python-dotenv"
-version = "1.2.1"
+version = "1.1.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61"},
- {file = "python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6"},
+ {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"},
+ {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"},
]
[package.extras]
cli = ["click (>=5.0)"]
-[[package]]
-name = "python-jsonpath"
-version = "2.0.1"
-description = "JSONPath, JSON Pointer and JSON Patch for Python."
-optional = false
-python-versions = ">=3.8"
-groups = ["main"]
-files = [
- {file = "python_jsonpath-2.0.1-py3-none-any.whl", hash = "sha256:ebd518b7c883acc5b976518d76b6c96288405edec7d9ef838641869c1e1a5eb7"},
- {file = "python_jsonpath-2.0.1.tar.gz", hash = "sha256:32a84ebb2dc0ec1b42a6e165b0f9174aef8310bad29154ad9aee31ac37cca18f"},
-]
-
-[package.extras]
-strict = ["iregexp-check (>=0.1.4)", "regex"]
-
[[package]]
name = "pyyaml"
version = "6.0.3"
@@ -2329,14 +2121,14 @@ prompt_toolkit = ">=2.0,<4.0"
[[package]]
name = "referencing"
-version = "0.37.0"
+version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
-python-versions = ">=3.10"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231"},
- {file = "referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8"},
+ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
+ {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
]
[package.dependencies]
@@ -2346,127 +2138,127 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""}
[[package]]
name = "regex"
-version = "2025.11.3"
+version = "2025.9.18"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af"},
- {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313"},
- {file = "regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56"},
- {file = "regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28"},
- {file = "regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7"},
- {file = "regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32"},
- {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391"},
- {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5"},
- {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7"},
- {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313"},
- {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9"},
- {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5"},
- {file = "regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec"},
- {file = "regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd"},
- {file = "regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e"},
- {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031"},
- {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4"},
- {file = "regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50"},
- {file = "regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f"},
- {file = "regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118"},
- {file = "regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2"},
- {file = "regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e"},
- {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0"},
- {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58"},
- {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab"},
- {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e"},
- {file = "regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf"},
- {file = "regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a"},
- {file = "regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc"},
- {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41"},
- {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36"},
- {file = "regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1"},
- {file = "regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7"},
- {file = "regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69"},
- {file = "regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48"},
- {file = "regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c"},
- {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695"},
- {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98"},
- {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74"},
- {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0"},
- {file = "regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204"},
- {file = "regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9"},
- {file = "regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26"},
- {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4"},
- {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76"},
- {file = "regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a"},
- {file = "regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361"},
- {file = "regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160"},
- {file = "regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe"},
- {file = "regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850"},
- {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc"},
- {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9"},
- {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b"},
- {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7"},
- {file = "regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c"},
- {file = "regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5"},
- {file = "regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467"},
- {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281"},
- {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39"},
- {file = "regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7"},
- {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed"},
- {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19"},
- {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b"},
- {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a"},
- {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6"},
- {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce"},
- {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd"},
- {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2"},
- {file = "regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a"},
- {file = "regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c"},
- {file = "regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e"},
- {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6"},
- {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4"},
- {file = "regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73"},
- {file = "regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f"},
- {file = "regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d"},
- {file = "regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be"},
- {file = "regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db"},
- {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62"},
- {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f"},
- {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02"},
- {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed"},
- {file = "regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4"},
- {file = "regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad"},
- {file = "regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f"},
- {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc"},
- {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49"},
- {file = "regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536"},
- {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95"},
- {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009"},
- {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9"},
- {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d"},
- {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6"},
- {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154"},
- {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267"},
- {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379"},
- {file = "regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38"},
- {file = "regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de"},
- {file = "regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801"},
- {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81519e25707fc076978c6143b81ea3dc853f176895af05bf7ec51effe818aeec"},
- {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3bf28b1873a8af8bbb58c26cc56ea6e534d80053b41fb511a35795b6de507e6a"},
- {file = "regex-2025.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:856a25c73b697f2ce2a24e7968285579e62577a048526161a2c0f53090bea9f9"},
- {file = "regex-2025.11.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a3d571bd95fade53c86c0517f859477ff3a93c3fde10c9e669086f038e0f207"},
- {file = "regex-2025.11.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:732aea6de26051af97b94bc98ed86448821f839d058e5d259c72bf6d73ad0fc0"},
- {file = "regex-2025.11.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:51c1c1847128238f54930edb8805b660305dca164645a9fd29243f5610beea34"},
- {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22dd622a402aad4558277305350699b2be14bc59f64d64ae1d928ce7d072dced"},
- {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f3b5a391c7597ffa96b41bd5cbd2ed0305f515fcbb367dfa72735679d5502364"},
- {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cc4076a5b4f36d849fd709284b4a3b112326652f3b0466f04002a6c15a0c96c1"},
- {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a295ca2bba5c1c885826ce3125fa0b9f702a1be547d821c01d65f199e10c01e2"},
- {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b4774ff32f18e0504bfc4e59a3e71e18d83bc1e171a3c8ed75013958a03b2f14"},
- {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e7d1cdfa88ef33a2ae6aa0d707f9255eb286ffbd90045f1088246833223aee"},
- {file = "regex-2025.11.3-cp39-cp39-win32.whl", hash = "sha256:74d04244852ff73b32eeede4f76f51c5bcf44bc3c207bc3e6cf1c5c45b890708"},
- {file = "regex-2025.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:7a50cd39f73faa34ec18d6720ee25ef10c4c1839514186fcda658a06c06057a2"},
- {file = "regex-2025.11.3-cp39-cp39-win_arm64.whl", hash = "sha256:43b4fb020e779ca81c1b5255015fe2b82816c76ec982354534ad9ec09ad7c9e3"},
- {file = "regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01"},
+ {file = "regex-2025.9.18-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:12296202480c201c98a84aecc4d210592b2f55e200a1d193235c4db92b9f6788"},
+ {file = "regex-2025.9.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:220381f1464a581f2ea988f2220cf2a67927adcef107d47d6897ba5a2f6d51a4"},
+ {file = "regex-2025.9.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87f681bfca84ebd265278b5daa1dcb57f4db315da3b5d044add7c30c10442e61"},
+ {file = "regex-2025.9.18-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34d674cbba70c9398074c8a1fcc1a79739d65d1105de2a3c695e2b05ea728251"},
+ {file = "regex-2025.9.18-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:385c9b769655cb65ea40b6eea6ff763cbb6d69b3ffef0b0db8208e1833d4e746"},
+ {file = "regex-2025.9.18-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8900b3208e022570ae34328712bef6696de0804c122933414014bae791437ab2"},
+ {file = "regex-2025.9.18-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c204e93bf32cd7a77151d44b05eb36f469d0898e3fba141c026a26b79d9914a0"},
+ {file = "regex-2025.9.18-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3acc471d1dd7e5ff82e6cacb3b286750decd949ecd4ae258696d04f019817ef8"},
+ {file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6479d5555122433728760e5f29edb4c2b79655a8deb681a141beb5c8a025baea"},
+ {file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:431bd2a8726b000eb6f12429c9b438a24062a535d06783a93d2bcbad3698f8a8"},
+ {file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0cc3521060162d02bd36927e20690129200e5ac9d2c6d32b70368870b122db25"},
+ {file = "regex-2025.9.18-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a021217b01be2d51632ce056d7a837d3fa37c543ede36e39d14063176a26ae29"},
+ {file = "regex-2025.9.18-cp310-cp310-win32.whl", hash = "sha256:4a12a06c268a629cb67cc1d009b7bb0be43e289d00d5111f86a2efd3b1949444"},
+ {file = "regex-2025.9.18-cp310-cp310-win_amd64.whl", hash = "sha256:47acd811589301298c49db2c56bde4f9308d6396da92daf99cba781fa74aa450"},
+ {file = "regex-2025.9.18-cp310-cp310-win_arm64.whl", hash = "sha256:16bd2944e77522275e5ee36f867e19995bcaa533dcb516753a26726ac7285442"},
+ {file = "regex-2025.9.18-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:51076980cd08cd13c88eb7365427ae27f0d94e7cebe9ceb2bb9ffdae8fc4d82a"},
+ {file = "regex-2025.9.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:828446870bd7dee4e0cbeed767f07961aa07f0ea3129f38b3ccecebc9742e0b8"},
+ {file = "regex-2025.9.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c28821d5637866479ec4cc23b8c990f5bc6dd24e5e4384ba4a11d38a526e1414"},
+ {file = "regex-2025.9.18-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:726177ade8e481db669e76bf99de0b278783be8acd11cef71165327abd1f170a"},
+ {file = "regex-2025.9.18-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f5cca697da89b9f8ea44115ce3130f6c54c22f541943ac8e9900461edc2b8bd4"},
+ {file = "regex-2025.9.18-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dfbde38f38004703c35666a1e1c088b778e35d55348da2b7b278914491698d6a"},
+ {file = "regex-2025.9.18-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f2f422214a03fab16bfa495cfec72bee4aaa5731843b771860a471282f1bf74f"},
+ {file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a295916890f4df0902e4286bc7223ee7f9e925daa6dcdec4192364255b70561a"},
+ {file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5db95ff632dbabc8c38c4e82bf545ab78d902e81160e6e455598014f0abe66b9"},
+ {file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fb967eb441b0f15ae610b7069bdb760b929f267efbf522e814bbbfffdf125ce2"},
+ {file = "regex-2025.9.18-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f04d2f20da4053d96c08f7fde6e1419b7ec9dbcee89c96e3d731fca77f411b95"},
+ {file = "regex-2025.9.18-cp311-cp311-win32.whl", hash = "sha256:895197241fccf18c0cea7550c80e75f185b8bd55b6924fcae269a1a92c614a07"},
+ {file = "regex-2025.9.18-cp311-cp311-win_amd64.whl", hash = "sha256:7e2b414deae99166e22c005e154a5513ac31493db178d8aec92b3269c9cce8c9"},
+ {file = "regex-2025.9.18-cp311-cp311-win_arm64.whl", hash = "sha256:fb137ec7c5c54f34a25ff9b31f6b7b0c2757be80176435bf367111e3f71d72df"},
+ {file = "regex-2025.9.18-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:436e1b31d7efd4dcd52091d076482031c611dde58bf9c46ca6d0a26e33053a7e"},
+ {file = "regex-2025.9.18-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c190af81e5576b9c5fdc708f781a52ff20f8b96386c6e2e0557a78402b029f4a"},
+ {file = "regex-2025.9.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e4121f1ce2b2b5eec4b397cc1b277686e577e658d8f5870b7eb2d726bd2300ab"},
+ {file = "regex-2025.9.18-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:300e25dbbf8299d87205e821a201057f2ef9aa3deb29caa01cd2cac669e508d5"},
+ {file = "regex-2025.9.18-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7b47fcf9f5316c0bdaf449e879407e1b9937a23c3b369135ca94ebc8d74b1742"},
+ {file = "regex-2025.9.18-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:57a161bd3acaa4b513220b49949b07e252165e6b6dc910ee7617a37ff4f5b425"},
+ {file = "regex-2025.9.18-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f130c3a7845ba42de42f380fff3c8aebe89a810747d91bcf56d40a069f15352"},
+ {file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f96fa342b6f54dcba928dd452e8d8cb9f0d63e711d1721cd765bb9f73bb048d"},
+ {file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0f0d676522d68c207828dcd01fb6f214f63f238c283d9f01d85fc664c7c85b56"},
+ {file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40532bff8a1a0621e7903ae57fce88feb2e8a9a9116d341701302c9302aef06e"},
+ {file = "regex-2025.9.18-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:039f11b618ce8d71a1c364fdee37da1012f5a3e79b1b2819a9f389cd82fd6282"},
+ {file = "regex-2025.9.18-cp312-cp312-win32.whl", hash = "sha256:e1dd06f981eb226edf87c55d523131ade7285137fbde837c34dc9d1bf309f459"},
+ {file = "regex-2025.9.18-cp312-cp312-win_amd64.whl", hash = "sha256:3d86b5247bf25fa3715e385aa9ff272c307e0636ce0c9595f64568b41f0a9c77"},
+ {file = "regex-2025.9.18-cp312-cp312-win_arm64.whl", hash = "sha256:032720248cbeeae6444c269b78cb15664458b7bb9ed02401d3da59fe4d68c3a5"},
+ {file = "regex-2025.9.18-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2a40f929cd907c7e8ac7566ac76225a77701a6221bca937bdb70d56cb61f57b2"},
+ {file = "regex-2025.9.18-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c90471671c2cdf914e58b6af62420ea9ecd06d1554d7474d50133ff26ae88feb"},
+ {file = "regex-2025.9.18-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a351aff9e07a2dabb5022ead6380cff17a4f10e4feb15f9100ee56c4d6d06af"},
+ {file = "regex-2025.9.18-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc4b8e9d16e20ddfe16430c23468a8707ccad3365b06d4536142e71823f3ca29"},
+ {file = "regex-2025.9.18-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b8cdbddf2db1c5e80338ba2daa3cfa3dec73a46fff2a7dda087c8efbf12d62f"},
+ {file = "regex-2025.9.18-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a276937d9d75085b2c91fb48244349c6954f05ee97bba0963ce24a9d915b8b68"},
+ {file = "regex-2025.9.18-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92a8e375ccdc1256401c90e9dc02b8642894443d549ff5e25e36d7cf8a80c783"},
+ {file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0dc6893b1f502d73037cf807a321cdc9be29ef3d6219f7970f842475873712ac"},
+ {file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a61e85bfc63d232ac14b015af1261f826260c8deb19401c0597dbb87a864361e"},
+ {file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1ef86a9ebc53f379d921fb9a7e42b92059ad3ee800fcd9e0fe6181090e9f6c23"},
+ {file = "regex-2025.9.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d3bc882119764ba3a119fbf2bd4f1b47bc56c1da5d42df4ed54ae1e8e66fdf8f"},
+ {file = "regex-2025.9.18-cp313-cp313-win32.whl", hash = "sha256:3810a65675845c3bdfa58c3c7d88624356dd6ee2fc186628295e0969005f928d"},
+ {file = "regex-2025.9.18-cp313-cp313-win_amd64.whl", hash = "sha256:16eaf74b3c4180ede88f620f299e474913ab6924d5c4b89b3833bc2345d83b3d"},
+ {file = "regex-2025.9.18-cp313-cp313-win_arm64.whl", hash = "sha256:4dc98ba7dd66bd1261927a9f49bd5ee2bcb3660f7962f1ec02617280fc00f5eb"},
+ {file = "regex-2025.9.18-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:fe5d50572bc885a0a799410a717c42b1a6b50e2f45872e2b40f4f288f9bce8a2"},
+ {file = "regex-2025.9.18-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b9d9a2d6cda6621551ca8cf7a06f103adf72831153f3c0d982386110870c4d3"},
+ {file = "regex-2025.9.18-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:13202e4c4ac0ef9a317fff817674b293c8f7e8c68d3190377d8d8b749f566e12"},
+ {file = "regex-2025.9.18-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:874ff523b0fecffb090f80ae53dc93538f8db954c8bb5505f05b7787ab3402a0"},
+ {file = "regex-2025.9.18-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d13ab0490128f2bb45d596f754148cd750411afc97e813e4b3a61cf278a23bb6"},
+ {file = "regex-2025.9.18-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:05440bc172bc4b4b37fb9667e796597419404dbba62e171e1f826d7d2a9ebcef"},
+ {file = "regex-2025.9.18-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5514b8e4031fdfaa3d27e92c75719cbe7f379e28cacd939807289bce76d0e35a"},
+ {file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:65d3c38c39efce73e0d9dc019697b39903ba25b1ad45ebbd730d2cf32741f40d"},
+ {file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ae77e447ebc144d5a26d50055c6ddba1d6ad4a865a560ec7200b8b06bc529368"},
+ {file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e3ef8cf53dc8df49d7e28a356cf824e3623764e9833348b655cfed4524ab8a90"},
+ {file = "regex-2025.9.18-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9feb29817df349c976da9a0debf775c5c33fc1c8ad7b9f025825da99374770b7"},
+ {file = "regex-2025.9.18-cp313-cp313t-win32.whl", hash = "sha256:168be0d2f9b9d13076940b1ed774f98595b4e3c7fc54584bba81b3cc4181742e"},
+ {file = "regex-2025.9.18-cp313-cp313t-win_amd64.whl", hash = "sha256:d59ecf3bb549e491c8104fea7313f3563c7b048e01287db0a90485734a70a730"},
+ {file = "regex-2025.9.18-cp313-cp313t-win_arm64.whl", hash = "sha256:dbef80defe9fb21310948a2595420b36c6d641d9bea4c991175829b2cc4bc06a"},
+ {file = "regex-2025.9.18-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c6db75b51acf277997f3adcd0ad89045d856190d13359f15ab5dda21581d9129"},
+ {file = "regex-2025.9.18-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8f9698b6f6895d6db810e0bda5364f9ceb9e5b11328700a90cae573574f61eea"},
+ {file = "regex-2025.9.18-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29cd86aa7cb13a37d0f0d7c21d8d949fe402ffa0ea697e635afedd97ab4b69f1"},
+ {file = "regex-2025.9.18-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7c9f285a071ee55cd9583ba24dde006e53e17780bb309baa8e4289cd472bcc47"},
+ {file = "regex-2025.9.18-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5adf266f730431e3be9021d3e5b8d5ee65e563fec2883ea8093944d21863b379"},
+ {file = "regex-2025.9.18-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1137cabc0f38807de79e28d3f6e3e3f2cc8cfb26bead754d02e6d1de5f679203"},
+ {file = "regex-2025.9.18-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7cc9e5525cada99699ca9223cce2d52e88c52a3d2a0e842bd53de5497c604164"},
+ {file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bbb9246568f72dce29bcd433517c2be22c7791784b223a810225af3b50d1aafb"},
+ {file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6a52219a93dd3d92c675383efff6ae18c982e2d7651c792b1e6d121055808743"},
+ {file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:ae9b3840c5bd456780e3ddf2f737ab55a79b790f6409182012718a35c6d43282"},
+ {file = "regex-2025.9.18-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d488c236ac497c46a5ac2005a952c1a0e22a07be9f10c3e735bc7d1209a34773"},
+ {file = "regex-2025.9.18-cp314-cp314-win32.whl", hash = "sha256:0c3506682ea19beefe627a38872d8da65cc01ffa25ed3f2e422dffa1474f0788"},
+ {file = "regex-2025.9.18-cp314-cp314-win_amd64.whl", hash = "sha256:57929d0f92bebb2d1a83af372cd0ffba2263f13f376e19b1e4fa32aec4efddc3"},
+ {file = "regex-2025.9.18-cp314-cp314-win_arm64.whl", hash = "sha256:6a4b44df31d34fa51aa5c995d3aa3c999cec4d69b9bd414a8be51984d859f06d"},
+ {file = "regex-2025.9.18-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:b176326bcd544b5e9b17d6943f807697c0cb7351f6cfb45bf5637c95ff7e6306"},
+ {file = "regex-2025.9.18-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0ffd9e230b826b15b369391bec167baed57c7ce39efc35835448618860995946"},
+ {file = "regex-2025.9.18-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ec46332c41add73f2b57e2f5b642f991f6b15e50e9f86285e08ffe3a512ac39f"},
+ {file = "regex-2025.9.18-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b80fa342ed1ea095168a3f116637bd1030d39c9ff38dc04e54ef7c521e01fc95"},
+ {file = "regex-2025.9.18-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4d97071c0ba40f0cf2a93ed76e660654c399a0a04ab7d85472239460f3da84b"},
+ {file = "regex-2025.9.18-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0ac936537ad87cef9e0e66c5144484206c1354224ee811ab1519a32373e411f3"},
+ {file = "regex-2025.9.18-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dec57f96d4def58c422d212d414efe28218d58537b5445cf0c33afb1b4768571"},
+ {file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:48317233294648bf7cd068857f248e3a57222259a5304d32c7552e2284a1b2ad"},
+ {file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:274687e62ea3cf54846a9b25fc48a04459de50af30a7bd0b61a9e38015983494"},
+ {file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:a78722c86a3e7e6aadf9579e3b0ad78d955f2d1f1a8ca4f67d7ca258e8719d4b"},
+ {file = "regex-2025.9.18-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:06104cd203cdef3ade989a1c45b6215bf42f8b9dd705ecc220c173233f7cba41"},
+ {file = "regex-2025.9.18-cp314-cp314t-win32.whl", hash = "sha256:2e1eddc06eeaffd249c0adb6fafc19e2118e6308c60df9db27919e96b5656096"},
+ {file = "regex-2025.9.18-cp314-cp314t-win_amd64.whl", hash = "sha256:8620d247fb8c0683ade51217b459cb4a1081c0405a3072235ba43a40d355c09a"},
+ {file = "regex-2025.9.18-cp314-cp314t-win_arm64.whl", hash = "sha256:b7531a8ef61de2c647cdf68b3229b071e46ec326b3138b2180acb4275f470b01"},
+ {file = "regex-2025.9.18-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3dbcfcaa18e9480669030d07371713c10b4f1a41f791ffa5cb1a99f24e777f40"},
+ {file = "regex-2025.9.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1e85f73ef7095f0380208269055ae20524bfde3f27c5384126ddccf20382a638"},
+ {file = "regex-2025.9.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9098e29b3ea4ffffeade423f6779665e2a4f8db64e699c0ed737ef0db6ba7b12"},
+ {file = "regex-2025.9.18-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90b6b7a2d0f45b7ecaaee1aec6b362184d6596ba2092dd583ffba1b78dd0231c"},
+ {file = "regex-2025.9.18-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c81b892af4a38286101502eae7aec69f7cd749a893d9987a92776954f3943408"},
+ {file = "regex-2025.9.18-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3b524d010973f2e1929aeb635418d468d869a5f77b52084d9f74c272189c251d"},
+ {file = "regex-2025.9.18-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b498437c026a3d5d0be0020023ff76d70ae4d77118e92f6f26c9d0423452446"},
+ {file = "regex-2025.9.18-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0716e4d6e58853d83f6563f3cf25c281ff46cf7107e5f11879e32cb0b59797d9"},
+ {file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:065b6956749379d41db2625f880b637d4acc14c0a4de0d25d609a62850e96d36"},
+ {file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d4a691494439287c08ddb9b5793da605ee80299dd31e95fa3f323fac3c33d9d4"},
+ {file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ef8d10cc0989565bcbe45fb4439f044594d5c2b8919d3d229ea2c4238f1d55b0"},
+ {file = "regex-2025.9.18-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4baeb1b16735ac969a7eeecc216f1f8b7caf60431f38a2671ae601f716a32d25"},
+ {file = "regex-2025.9.18-cp39-cp39-win32.whl", hash = "sha256:8e5f41ad24a1e0b5dfcf4c4e5d9f5bd54c895feb5708dd0c1d0d35693b24d478"},
+ {file = "regex-2025.9.18-cp39-cp39-win_amd64.whl", hash = "sha256:50e8290707f2fb8e314ab3831e594da71e062f1d623b05266f8cfe4db4949afd"},
+ {file = "regex-2025.9.18-cp39-cp39-win_arm64.whl", hash = "sha256:039a9d7195fd88c943d7c777d4941e8ef736731947becce773c31a1009cb3c35"},
+ {file = "regex-2025.9.18.tar.gz", hash = "sha256:c5ba23274c61c6fef447ba6a39333297d0c247f53059dba0bca415cac511edc4"},
]
[[package]]
@@ -2491,163 +2283,171 @@ urllib3 = ">=1.21.1,<3"
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
-[[package]]
-name = "restrictedpython"
-version = "8.1"
-description = "RestrictedPython is a defined subset of the Python language which allows to provide a program input into a trusted environment."
-optional = false
-python-versions = "<3.15,>=3.9"
-groups = ["main"]
-markers = "python_version < \"3.14\""
-files = [
- {file = "restrictedpython-8.1-py3-none-any.whl", hash = "sha256:4769449c6cdb10f2071649ba386902befff0eff2a8fd6217989fa7b16aeae926"},
- {file = "restrictedpython-8.1.tar.gz", hash = "sha256:4a69304aceacf6bee74bdf153c728221d4e3109b39acbfe00b3494927080d898"},
-]
-
-[package.extras]
-docs = ["Sphinx", "furo"]
-test = ["pytest", "pytest-mock"]
-
[[package]]
name = "rpds-py"
-version = "0.28.0"
+version = "0.27.1"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
-python-versions = ">=3.10"
-groups = ["main"]
-files = [
- {file = "rpds_py-0.28.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7b6013db815417eeb56b2d9d7324e64fcd4fa289caeee6e7a78b2e11fc9b438a"},
- {file = "rpds_py-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a4c6b05c685c0c03f80dabaeb73e74218c49deea965ca63f76a752807397207"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4794c6c3fbe8f9ac87699b131a1f26e7b4abcf6d828da46a3a52648c7930eba"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e8456b6ee5527112ff2354dd9087b030e3429e43a74f480d4a5ca79d269fd85"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:beb880a9ca0a117415f241f66d56025c02037f7c4efc6fe59b5b8454f1eaa50d"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6897bebb118c44b38c9cb62a178e09f1593c949391b9a1a6fe777ccab5934ee7"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b553dd06e875249fd43efd727785efb57a53180e0fde321468222eabbeaafa"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:f0b2044fdddeea5b05df832e50d2a06fe61023acb44d76978e1b060206a8a476"},
- {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05cf1e74900e8da73fa08cc76c74a03345e5a3e37691d07cfe2092d7d8e27b04"},
- {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:efd489fec7c311dae25e94fe7eeda4b3d06be71c68f2cf2e8ef990ffcd2cd7e8"},
- {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ada7754a10faacd4f26067e62de52d6af93b6d9542f0df73c57b9771eb3ba9c4"},
- {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c2a34fd26588949e1e7977cfcbb17a9a42c948c100cab890c6d8d823f0586457"},
- {file = "rpds_py-0.28.0-cp310-cp310-win32.whl", hash = "sha256:f9174471d6920cbc5e82a7822de8dfd4dcea86eb828b04fc8c6519a77b0ee51e"},
- {file = "rpds_py-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:6e32dd207e2c4f8475257a3540ab8a93eff997abfa0a3fdb287cae0d6cd874b8"},
- {file = "rpds_py-0.28.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:03065002fd2e287725d95fbc69688e0c6daf6c6314ba38bdbaa3895418e09296"},
- {file = "rpds_py-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28ea02215f262b6d078daec0b45344c89e161eab9526b0d898221d96fdda5f27"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dbade8fbf30bcc551cb352376c0ad64b067e4fc56f90e22ba70c3ce205988c"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c03002f54cc855860bfdc3442928ffdca9081e73b5b382ed0b9e8efe6e5e205"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9699fa7990368b22032baf2b2dce1f634388e4ffc03dfefaaac79f4695edc95"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9b06fe1a75e05e0713f06ea0c89ecb6452210fd60e2f1b6ddc1067b990e08d9"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9f83e7b326a3f9ec3ef84cda98fb0a74c7159f33e692032233046e7fd15da2"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:0d3259ea9ad8743a75a43eb7819324cdab393263c91be86e2d1901ee65c314e0"},
- {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a7548b345f66f6695943b4ef6afe33ccd3f1b638bd9afd0f730dd255c249c9e"},
- {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9a40040aa388b037eb39416710fbcce9443498d2eaab0b9b45ae988b53f5c67"},
- {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f60c7ea34e78c199acd0d3cda37a99be2c861dd2b8cf67399784f70c9f8e57d"},
- {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1571ae4292649100d743b26d5f9c63503bb1fedf538a8f29a98dce2d5ba6b4e6"},
- {file = "rpds_py-0.28.0-cp311-cp311-win32.whl", hash = "sha256:5cfa9af45e7c1140af7321fa0bef25b386ee9faa8928c80dc3a5360971a29e8c"},
- {file = "rpds_py-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:dd8d86b5d29d1b74100982424ba53e56033dc47720a6de9ba0259cf81d7cecaa"},
- {file = "rpds_py-0.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e27d3a5709cc2b3e013bf93679a849213c79ae0573f9b894b284b55e729e120"},
- {file = "rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f"},
- {file = "rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66"},
- {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28"},
- {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a"},
- {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5"},
- {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c"},
- {file = "rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08"},
- {file = "rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c"},
- {file = "rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd"},
- {file = "rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b"},
- {file = "rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d"},
- {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb"},
- {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41"},
- {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7"},
- {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9"},
- {file = "rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5"},
- {file = "rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e"},
- {file = "rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1"},
- {file = "rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c"},
- {file = "rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259"},
- {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a"},
- {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f"},
- {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37"},
- {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712"},
- {file = "rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342"},
- {file = "rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907"},
- {file = "rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472"},
- {file = "rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d"},
- {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728"},
- {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01"},
- {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515"},
- {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e"},
- {file = "rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f"},
- {file = "rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1"},
- {file = "rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d"},
- {file = "rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b"},
- {file = "rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b"},
- {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e"},
- {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1"},
- {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c"},
- {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092"},
- {file = "rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3"},
- {file = "rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f5e7101145427087e493b9c9b959da68d357c28c562792300dd21a095118ed16"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:31eb671150b9c62409a888850aaa8e6533635704fe2b78335f9aaf7ff81eec4d"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b55c1f64482f7d8bd39942f376bfdf2f6aec637ee8c805b5041e14eeb771db"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24743a7b372e9a76171f6b69c01aedf927e8ac3e16c474d9fe20d552a8cb45c7"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:389c29045ee8bbb1627ea190b4976a310a295559eaf9f1464a1a6f2bf84dde78"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23690b5827e643150cf7b49569679ec13fe9a610a15949ed48b85eb7f98f34ec"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f0c9266c26580e7243ad0d72fc3e01d6b33866cfab5084a6da7576bcf1c4f72"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4c6c4db5d73d179746951486df97fd25e92396be07fc29ee8ff9a8f5afbdfb27"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3b695a8fa799dd2cfdb4804b37096c5f6dba1ac7f48a7fbf6d0485bcd060316"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6aa1bfce3f83baf00d9c5fcdbba93a3ab79958b4c7d7d1f55e7fe68c20e63912"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b0f9dceb221792b3ee6acb5438eb1f02b0cb2c247796a72b016dcc92c6de829"},
- {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5d0145edba8abd3db0ab22b5300c99dc152f5c9021fab861be0f0544dc3cbc5f"},
- {file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"},
-]
-
-[[package]]
-name = "rsa"
-version = "4.9.1"
-description = "Pure-Python RSA implementation"
-optional = false
-python-versions = "<4,>=3.6"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"},
- {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"},
+ {file = "rpds_py-0.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:68afeec26d42ab3b47e541b272166a0b4400313946871cba3ed3a4fc0cab1cef"},
+ {file = "rpds_py-0.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74e5b2f7bb6fa38b1b10546d27acbacf2a022a8b5543efb06cfebc72a59c85be"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9024de74731df54546fab0bfbcdb49fae19159ecaecfc8f37c18d2c7e2c0bd61"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31d3ebadefcd73b73928ed0b2fd696f7fefda8629229f81929ac9c1854d0cffb"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2e7f8f169d775dd9092a1743768d771f1d1300453ddfe6325ae3ab5332b4657"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d905d16f77eb6ab2e324e09bfa277b4c8e5e6b8a78a3e7ff8f3cdf773b4c013"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50c946f048209e6362e22576baea09193809f87687a95a8db24e5fbdb307b93a"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:3deab27804d65cd8289eb814c2c0e807c4b9d9916c9225e363cb0cf875eb67c1"},
+ {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b61097f7488de4be8244c89915da8ed212832ccf1e7c7753a25a394bf9b1f10"},
+ {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8a3f29aba6e2d7d90528d3c792555a93497fe6538aa65eb675b44505be747808"},
+ {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd6cd0485b7d347304067153a6dc1d73f7d4fd995a396ef32a24d24b8ac63ac8"},
+ {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f4461bf931108c9fa226ffb0e257c1b18dc2d44cd72b125bec50ee0ab1248a9"},
+ {file = "rpds_py-0.27.1-cp310-cp310-win32.whl", hash = "sha256:ee5422d7fb21f6a00c1901bf6559c49fee13a5159d0288320737bbf6585bd3e4"},
+ {file = "rpds_py-0.27.1-cp310-cp310-win_amd64.whl", hash = "sha256:3e039aabf6d5f83c745d5f9a0a381d031e9ed871967c0a5c38d201aca41f3ba1"},
+ {file = "rpds_py-0.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:be898f271f851f68b318872ce6ebebbc62f303b654e43bf72683dbdc25b7c881"},
+ {file = "rpds_py-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62ac3d4e3e07b58ee0ddecd71d6ce3b1637de2d373501412df395a0ec5f9beb5"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4708c5c0ceb2d034f9991623631d3d23cb16e65c83736ea020cdbe28d57c0a0e"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:abfa1171a9952d2e0002aba2ad3780820b00cc3d9c98c6630f2e93271501f66c"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b507d19f817ebaca79574b16eb2ae412e5c0835542c93fe9983f1e432aca195"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168b025f8fd8d8d10957405f3fdcef3dc20f5982d398f90851f4abc58c566c52"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb56c6210ef77caa58e16e8c17d35c63fe3f5b60fd9ba9d424470c3400bcf9ed"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:d252f2d8ca0195faa707f8eb9368955760880b2b42a8ee16d382bf5dd807f89a"},
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e5e54da1e74b91dbc7996b56640f79b195d5925c2b78efaa8c5d53e1d88edde"},
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ffce0481cc6e95e5b3f0a47ee17ffbd234399e6d532f394c8dce320c3b089c21"},
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a205fdfe55c90c2cd8e540ca9ceba65cbe6629b443bc05db1f590a3db8189ff9"},
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:689fb5200a749db0415b092972e8eba85847c23885c8543a8b0f5c009b1a5948"},
+ {file = "rpds_py-0.27.1-cp311-cp311-win32.whl", hash = "sha256:3182af66048c00a075010bc7f4860f33913528a4b6fc09094a6e7598e462fe39"},
+ {file = "rpds_py-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4938466c6b257b2f5c4ff98acd8128ec36b5059e5c8f8372d79316b1c36bb15"},
+ {file = "rpds_py-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:2f57af9b4d0793e53266ee4325535a31ba48e2f875da81a9177c9926dfa60746"},
+ {file = "rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90"},
+ {file = "rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a"},
+ {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444"},
+ {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a"},
+ {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1"},
+ {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998"},
+ {file = "rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39"},
+ {file = "rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594"},
+ {file = "rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502"},
+ {file = "rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b"},
+ {file = "rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d"},
+ {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274"},
+ {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd"},
+ {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2"},
+ {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002"},
+ {file = "rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3"},
+ {file = "rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83"},
+ {file = "rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688"},
+ {file = "rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797"},
+ {file = "rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334"},
+ {file = "rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9"},
+ {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60"},
+ {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e"},
+ {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212"},
+ {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675"},
+ {file = "rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3"},
+ {file = "rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456"},
+ {file = "rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a"},
+ {file = "rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772"},
+ {file = "rpds_py-0.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c918c65ec2e42c2a78d19f18c553d77319119bf43aa9e2edf7fb78d624355527"},
+ {file = "rpds_py-0.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1fea2b1a922c47c51fd07d656324531adc787e415c8b116530a1d29c0516c62d"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbf94c58e8e0cd6b6f38d8de67acae41b3a515c26169366ab58bdca4a6883bb8"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2a8fed130ce946d5c585eddc7c8eeef0051f58ac80a8ee43bd17835c144c2cc"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:037a2361db72ee98d829bc2c5b7cc55598ae0a5e0ec1823a56ea99374cfd73c1"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5281ed1cc1d49882f9997981c88df1a22e140ab41df19071222f7e5fc4e72125"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fd50659a069c15eef8aa3d64bbef0d69fd27bb4a50c9ab4f17f83a16cbf8905"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_31_riscv64.whl", hash = "sha256:c4b676c4ae3921649a15d28ed10025548e9b561ded473aa413af749503c6737e"},
+ {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:079bc583a26db831a985c5257797b2b5d3affb0386e7ff886256762f82113b5e"},
+ {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4e44099bd522cba71a2c6b97f68e19f40e7d85399de899d66cdb67b32d7cb786"},
+ {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e202e6d4188e53c6661af813b46c37ca2c45e497fc558bacc1a7630ec2695aec"},
+ {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f41f814b8eaa48768d1bb551591f6ba45f87ac76899453e8ccd41dba1289b04b"},
+ {file = "rpds_py-0.27.1-cp39-cp39-win32.whl", hash = "sha256:9e71f5a087ead99563c11fdaceee83ee982fd39cf67601f4fd66cb386336ee52"},
+ {file = "rpds_py-0.27.1-cp39-cp39-win_amd64.whl", hash = "sha256:71108900c9c3c8590697244b9519017a400d9ba26a36c48381b3f64743a44aab"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7ba22cb9693df986033b91ae1d7a979bc399237d45fccf875b76f62bb9e52ddf"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b640501be9288c77738b5492b3fd3abc4ba95c50c2e41273c8a1459f08298d3"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb08b65b93e0c6dd70aac7f7890a9c0938d5ec71d5cb32d45cf844fb8ae47636"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7ff07d696a7a38152ebdb8212ca9e5baab56656749f3d6004b34ab726b550b8"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb7c72262deae25366e3b6c0c0ba46007967aea15d1eea746e44ddba8ec58dcc"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b002cab05d6339716b03a4a3a2ce26737f6231d7b523f339fa061d53368c9d8"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23f6b69d1c26c4704fec01311963a41d7de3ee0570a84ebde4d544e5a1859ffc"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:530064db9146b247351f2a0250b8f00b289accea4596a033e94be2389977de71"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b90b0496570bd6b0321724a330d8b545827c4df2034b6ddfc5f5275f55da2ad"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879b0e14a2da6a1102a3fc8af580fc1ead37e6d6692a781bd8c83da37429b5ab"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:0d807710df3b5faa66c731afa162ea29717ab3be17bdc15f90f2d9f183da4059"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3adc388fc3afb6540aec081fa59e6e0d3908722771aa1e37ffe22b220a436f0b"},
+ {file = "rpds_py-0.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c796c0c1cc68cb08b0284db4229f5af76168172670c74908fdbd4b7d7f515819"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdfe4bb2f9fe7458b7453ad3c33e726d6d1c7c0a72960bcc23800d77384e42df"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8fabb8fd848a5f75a2324e4a84501ee3a5e3c78d8603f83475441866e60b94a3"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda8719d598f2f7f3e0f885cba8646644b55a187762bec091fa14a2b819746a9"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c64d07e95606ec402a0a1c511fe003873fa6af630bda59bac77fac8b4318ebc"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93a2ed40de81bcff59aabebb626562d48332f3d028ca2036f1d23cbb52750be4"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:387ce8c44ae94e0ec50532d9cb0edce17311024c9794eb196b90e1058aadeb66"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf94f812c95b5e60ebaf8bfb1898a7d7cb9c1af5744d4a67fa47796e0465d4e"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4848ca84d6ded9b58e474dfdbad4b8bfb450344c0551ddc8d958bf4b36aa837c"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2bde09cbcf2248b73c7c323be49b280180ff39fadcfe04e7b6f54a678d02a7cf"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:94c44ee01fd21c9058f124d2d4f0c9dc7634bec93cd4b38eefc385dabe71acbf"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:df8b74962e35c9249425d90144e721eed198e6555a0e22a563d29fe4486b51f6"},
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dc23e6820e3b40847e2f4a7726462ba0cf53089512abe9ee16318c366494c17a"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa8933159edc50be265ed22b401125c9eebff3171f570258854dbce3ecd55475"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a50431bf02583e21bf273c71b89d710e7a710ad5e39c725b14e685610555926f"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78af06ddc7fe5cc0e967085a9115accee665fb912c22a3f54bad70cc65b05fe6"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70d0738ef8fee13c003b100c2fbd667ec4f133468109b3472d249231108283a3"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2f6fd8a1cea5bbe599b6e78a6e5ee08db434fc8ffea51ff201c8765679698b3"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8177002868d1426305bb5de1e138161c2ec9eb2d939be38291d7c431c4712df8"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:008b839781d6c9bf3b6a8984d1d8e56f0ec46dc56df61fd669c49b58ae800400"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:a55b9132bb1ade6c734ddd2759c8dc132aa63687d259e725221f106b83a0e485"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a46fdec0083a26415f11d5f236b79fa1291c32aaa4a17684d82f7017a1f818b1"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8a63b640a7845f2bdd232eb0d0a4a2dd939bcdd6c57e6bb134526487f3160ec5"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7e32721e5d4922deaaf963469d795d5bde6093207c52fec719bd22e5d1bedbc4"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:2c426b99a068601b5f4623573df7a7c3d72e87533a2dd2253353a03e7502566c"},
+ {file = "rpds_py-0.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4fc9b7fe29478824361ead6e14e4f5aed570d477e06088826537e202d25fe859"},
+ {file = "rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8"},
]
-[package.dependencies]
-pyasn1 = ">=0.1.3"
-
[[package]]
name = "s3transfer"
version = "0.14.0"
@@ -2732,16 +2532,19 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
[[package]]
name = "structlog"
-version = "25.5.0"
+version = "25.4.0"
description = "Structured Logging for Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "structlog-25.5.0-py3-none-any.whl", hash = "sha256:a8453e9b9e636ec59bd9e79bbd4a72f025981b3ba0f5837aebf48f02f37a7f9f"},
- {file = "structlog-25.5.0.tar.gz", hash = "sha256:098522a3bebed9153d4570c6d0288abf80a031dfdb2048d59a49e9dc2190fc98"},
+ {file = "structlog-25.4.0-py3-none-any.whl", hash = "sha256:fe809ff5c27e557d14e613f45ca441aabda051d119ee5a0102aaba6ce40eed2c"},
+ {file = "structlog-25.4.0.tar.gz", hash = "sha256:186cd1b0a8ae762e29417095664adf1d6a31702160a46dacb7796ea82f7409e4"},
]
+[package.dependencies]
+typing-extensions = {version = "*", markers = "python_version < \"3.11\""}
+
[[package]]
name = "tiktoken"
version = "0.11.0"
@@ -2835,6 +2638,49 @@ files = [
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
]
+[[package]]
+name = "tomli"
+version = "2.2.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
+files = [
+ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
+ {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
+ {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
+ {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
+ {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
+ {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
+ {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
+ {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
+ {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
+ {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
+ {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
+ {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
+ {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
+ {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
+ {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
+ {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
+ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
+ {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
+]
+
[[package]]
name = "tqdm"
version = "4.67.1"
@@ -2884,6 +2730,24 @@ files = [
[package.dependencies]
typing-extensions = ">=4.12.0"
+[[package]]
+name = "urllib3"
+version = "1.26.20"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.10\""
+files = [
+ {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
+ {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
+]
+
+[package.extras]
+brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
[[package]]
name = "urllib3"
version = "2.5.0"
@@ -2891,6 +2755,7 @@ description = "HTTP library with thread-safe connection pooling, file post, and
optional = false
python-versions = ">=3.9"
groups = ["main", "dev"]
+markers = "python_version >= \"3.10\""
files = [
{file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
{file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
@@ -2917,6 +2782,7 @@ files = [
[package.dependencies]
click = ">=7.0"
h11 = ">=0.8"
+typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
@@ -3191,5 +3057,5 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
-python-versions = "^3.11"
-content-hash = "f2b3913ac70cfb499bdfaab2f11ad61438db4b24d6c8d757d4838f62ed9d6bb0"
+python-versions = "^3.9"
+content-hash = "7ffcadeaaf02192474b0e6a6a9e4492603d7a97391e3bcbd166f7b7b684c22d3"
diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml
index 47db074d71..fd19935298 100644
--- a/sdk/pyproject.toml
+++ b/sdk/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.62.0"
+version = "0.59.3"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = [
@@ -23,7 +23,7 @@ exclude = [
]
[tool.poetry.dependencies]
-python = "^3.11"
+python = "^3.9"
starlette = "^0.47.0"
fastapi = "^0.116.0"
pydantic = "^2"
@@ -32,24 +32,20 @@ importlib-metadata = ">=8.0.0,<9.0"
httpx = "^0.28.0"
pyyaml = "^6.0.2"
toml = "^0.10.2"
-litellm = "==1.78.7"
+litellm = "==1.76.0"
jinja2 = "^3.1.6"
-python-jsonpath = "^2.0.0"
opentelemetry-api = "^1.27.0"
opentelemetry-sdk = "^1.27.0"
opentelemetry-instrumentation = ">=0.56b0"
opentelemetry-exporter-otlp-proto-http ="^1.27.0"
structlog = "^25.2.0"
huggingface-hub = "<0.31.0"
-restrictedpython = { version = "^8.0", python = ">=3.11,<3.14" }
# audit fixes
h11 = "^0.16.0"
decorator = "^5.2.1"
-openai = ">=1.106.0"
+openai = "^1.106.0"
tiktoken = "0.11.0"
-google-auth = ">=2.23,<3"
-
[tool.poetry.group.dev.dependencies]
posthog = "^3.1.0"
diff --git a/sdk/tests/legacy/annotations/agenta_instrumented.py b/sdk/tests/legacy/annotations/agenta_instrumented.py
index 52d3e331e9..f4573a7c9e 100644
--- a/sdk/tests/legacy/annotations/agenta_instrumented.py
+++ b/sdk/tests/legacy/annotations/agenta_instrumented.py
@@ -2,7 +2,7 @@
from typing import List, Dict
import litellm
from agenta.sdk.litellm import mockllm
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
# Set up mockllm to use litellm
mockllm.litellm = litellm
@@ -97,9 +97,9 @@ def search_docs(
@ag.instrument()
async def llm(query: str, results: List[Dict]):
- # Set the mock in the routing context to use the 'hello' mock
+ # Set the mock in the serving context to use the 'hello' mock
# You can replace 'hello' with any mock defined in the MOCKS dictionary
- ctx = RoutingContext.get()
+ ctx = serving_context.get()
ctx.mock = "hello"
config = Config()
@@ -133,7 +133,7 @@ async def generate(query: str):
# import uvicorn
# uvicorn.run(
- # "agenta.sdk.decorators.routing:app", host="0.0.0.0", port=803, reload=True
+ # "agenta.sdk.decorators.serving:app", host="0.0.0.0", port=803, reload=True
# )
import asyncio
diff --git a/sdk/tests/legacy/baggage/config.toml b/sdk/tests/legacy/baggage/config.toml
index ff48c26621..d5a5f01895 100644
--- a/sdk/tests/legacy/baggage/config.toml
+++ b/sdk/tests/legacy/baggage/config.toml
@@ -1,4 +1,4 @@
app_name = "baggage"
app_id = "0193b67a-b673-7919-85c2-0b5b0a2183d3"
backend_host = "http://localhost"
-api_key = "..."
+api_key = "XELnjVve.xxxx"
diff --git a/sdk/tests/legacy/custom_workflows/chat_custom.py b/sdk/tests/legacy/custom_workflows/chat_custom.py
index 45ae1c1089..2a4019f5a7 100644
--- a/sdk/tests/legacy/custom_workflows/chat_custom.py
+++ b/sdk/tests/legacy/custom_workflows/chat_custom.py
@@ -2,7 +2,7 @@
from typing import List, Dict
import litellm
from agenta.sdk.litellm import mockllm
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
# Set up mockllm to use litellm
mockllm.litellm = litellm
@@ -93,9 +93,9 @@ def search_docs(
@ag.instrument()
async def llm(query: str, results: List[Dict]):
- # Set the mock in the routing context to use the 'hello' mock
+ # Set the mock in the serving context to use the 'hello' mock
# You can replace 'hello' with any mock defined in the MOCKS dictionary
- ctx = RoutingContext.get()
+ ctx = serving_context.get()
ctx.mock = "hello"
config = ag.ConfigManager.get_from_route(Config)
@@ -127,5 +127,5 @@ async def generate(context: str, messages: List[Message]):
import uvicorn
uvicorn.run(
- "agenta.sdk.decorators.routing:app", host="0.0.0.0", port=804, reload=True
+ "agenta.sdk.decorators.serving:app", host="0.0.0.0", port=804, reload=True
)
diff --git a/sdk/tests/legacy/custom_workflows/completion_custom.py b/sdk/tests/legacy/custom_workflows/completion_custom.py
index efeefbbca1..c03349d247 100644
--- a/sdk/tests/legacy/custom_workflows/completion_custom.py
+++ b/sdk/tests/legacy/custom_workflows/completion_custom.py
@@ -2,7 +2,7 @@
from typing import List, Dict
import litellm
from agenta.sdk.litellm import mockllm
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
# Set up mockllm to use litellm
mockllm.litellm = litellm
@@ -93,9 +93,9 @@ def search_docs(
@ag.instrument()
async def llm(query: str, results: List[Dict]):
- # Set the mock in the routing context to use the 'hello' mock
+ # Set the mock in the serving context to use the 'hello' mock
# You can replace 'hello' with any mock defined in the MOCKS dictionary
- ctx = RoutingContext.get()
+ ctx = serving_context.get()
ctx.mock = "hello"
config = ag.ConfigManager.get_from_route(Config)
@@ -126,5 +126,5 @@ async def generate(query: str):
import uvicorn
uvicorn.run(
- "agenta.sdk.decorators.routing:app", host="0.0.0.0", port=803, reload=True
+ "agenta.sdk.decorators.serving:app", host="0.0.0.0", port=803, reload=True
)
diff --git a/sdk/tests/legacy/custom_workflows/exception_workflow.py b/sdk/tests/legacy/custom_workflows/exception_workflow.py
index 78c876afc1..8563f16296 100644
--- a/sdk/tests/legacy/custom_workflows/exception_workflow.py
+++ b/sdk/tests/legacy/custom_workflows/exception_workflow.py
@@ -2,7 +2,7 @@
from typing import List, Dict
import litellm
from agenta.sdk.litellm import mockllm
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
# Set up mockllm to use litellm
mockllm.litellm = litellm
@@ -91,9 +91,9 @@ def search_docs(
async def llm(query: str, results: List[Dict]):
- # Set the mock in the routing context to use the 'hello' mock
+ # Set the mock in the serving context to use the 'hello' mock
# You can replace 'hello' with any mock defined in the MOCKS dictionary
- ctx = RoutingContext.get()
+ ctx = serving_context.get()
ctx.mock = "hello"
config = ag.ConfigManager.get_from_route(Config)
@@ -123,5 +123,5 @@ async def generate(query: str):
import uvicorn
uvicorn.run(
- "agenta.sdk.decorators.routing:app", host="0.0.0.0", port=803, reload=True
+ "agenta.sdk.decorators.serving:app", host="0.0.0.0", port=803, reload=True
)
diff --git a/sdk/tests/legacy/custom_workflows/noinstrument_custom.py b/sdk/tests/legacy/custom_workflows/noinstrument_custom.py
index ffdeeb3a6c..c090db89af 100644
--- a/sdk/tests/legacy/custom_workflows/noinstrument_custom.py
+++ b/sdk/tests/legacy/custom_workflows/noinstrument_custom.py
@@ -2,7 +2,7 @@
from typing import List, Dict
import litellm
from agenta.sdk.litellm import mockllm
-from agenta.sdk.contexts.routing import RoutingContext
+from agenta.sdk.context.serving import serving_context
# Set up mockllm to use litellm
mockllm.litellm = litellm
@@ -91,9 +91,9 @@ def search_docs(
async def llm(query: str, results: List[Dict]):
- # Set the mock in the routing context to use the 'hello' mock
+ # Set the mock in the serving context to use the 'hello' mock
# You can replace 'hello' with any mock defined in the MOCKS dictionary
- ctx = RoutingContext.get()
+ ctx = serving_context.get()
ctx.mock = "hello"
config = ag.ConfigManager.get_from_route(Config)
@@ -132,5 +132,5 @@ async def generate(query: str):
import uvicorn
uvicorn.run(
- "agenta.sdk.decorators.routing:app", host="0.0.0.0", port=803, reload=True
+ "agenta.sdk.decorators.serving:app", host="0.0.0.0", port=803, reload=True
)
diff --git a/sdk/tests/legacy/debugging/simple-app/config.toml b/sdk/tests/legacy/debugging/simple-app/config.toml
index 4fe4679ba2..7c2a204758 100644
--- a/sdk/tests/legacy/debugging/simple-app/config.toml
+++ b/sdk/tests/legacy/debugging/simple-app/config.toml
@@ -1,6 +1,6 @@
app_name = "asdf"
app_id = "0193bbaa-4f2b-7510-9170-9bdf95249ca0"
backend_host = "https://cloud.agenta.ai"
-api_key = "..."
+api_key = "dWdKluoL.xxxx"
variants = []
variant_ids = []
diff --git a/sdk/tests/legacy/management/conftest.py b/sdk/tests/legacy/management/conftest.py
index 295627dbb6..a5074791a6 100644
--- a/sdk/tests/legacy/management/conftest.py
+++ b/sdk/tests/legacy/management/conftest.py
@@ -67,7 +67,7 @@ async def fetch_completion_template(fetch_templates):
def get_random_name():
- return f"completion_{uuid.uuid4().hex[:8]}"
+ return f"completion_app_{uuid.uuid4().hex[:8]}"
@pytest_asyncio.fixture(scope="session")
diff --git a/sdk/tests/legacy/new_tests/conftest.py b/sdk/tests/legacy/new_tests/conftest.py
index 7a3782a6bd..2c10a62443 100644
--- a/sdk/tests/legacy/new_tests/conftest.py
+++ b/sdk/tests/legacy/new_tests/conftest.py
@@ -937,20 +937,7 @@ def get_all_supported_models():
"gpt-4o-mini",
"gpt-4-1106-preview",
],
- "Gemini": [
- "gemini/gemini-2.5-flash-preview-05-20",
- "gemini/gemini-2.5-flash-preview-04-17",
- "gemini/gemini-2.0-flash-001",
- "gemini/gemini-2.5-pro-preview-05-06",
- "gemini/gemini-2.0-flash-lite-preview-02-05",
- "gemini/gemini-2.5-pro",
- "gemini/gemini-2.5-flash",
- "gemini/gemini-2.5-flash-preview-09-2025",
- "gemini/gemini-2.5-flash-lite",
- "gemini/gemini-2.5-flash-lite-preview-09-2025",
- "gemini/gemini-2.0-flash",
- "gemini/gemini-2.0-flash-lite",
- ],
+ "Gemini": ["gemini/gemini-1.5-pro-latest", "gemini/gemini-1.5-flash"],
"Cohere": [
"cohere/command-light",
"cohere/command-r-plus",
diff --git a/sdk/tests/manual/workflows/interface.py b/sdk/tests/manual/workflows/interface.py
index 4630426e2c..8b14d1dfc3 100644
--- a/sdk/tests/manual/workflows/interface.py
+++ b/sdk/tests/manual/workflows/interface.py
@@ -1,4 +1,4 @@
-from typing import Callable, Union
+from typing import Callable
from asyncio import run as run_async
from time import time_ns
from uuid import uuid4
@@ -9,21 +9,19 @@
from aiohttp import ClientSession
from fastapi import FastAPI
-from agenta.sdk.models.workflows import (
+from agenta.sdk.workflows.types import (
WorkflowRevision,
WorkflowRevisionData,
WorkflowServiceRequest,
WorkflowServiceResponse,
WorkflowServiceInterface,
- WorkflowServiceRequestData,
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
+ WorkflowServiceData,
Status,
Data,
)
from agenta.sdk.workflows.utils import parse_service_uri
-from agenta.sdk.workflows.handlers import exact_match_v1
+from agenta.sdk.workflows.registry import exact_match_v1
import agenta as ag
@@ -77,12 +75,10 @@ async def wrapper(*args, **kwargs):
from agenta.sdk.workflows.types import (
WorkflowRevision,
WorkflowRevisionData,
- WorkflowServiceRequestData,
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
WorkflowServiceRequest,
WorkflowServiceResponse,
WorkflowServiceInterface,
+ WorkflowServiceData,
Status,
Data,
)
@@ -117,7 +113,7 @@ async def local_call(
code=200,
message="Success",
),
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
outputs=outputs,
# trace=
),
@@ -229,7 +225,7 @@ async def run_script_locally(
*,
workflow_service_request: WorkflowServiceRequest,
workflow_revision_data: WorkflowRevisionData,
-) -> Union[Data, str]:
+) -> Data | str:
actual_script = (
TEST_SCRIPT
+ "\n"
@@ -294,7 +290,7 @@ async def local_call(
code=200,
message="Success",
),
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
outputs=outputs,
# trace=
),
@@ -319,7 +315,7 @@ async def workflow_decorator_wrapper(
return workflow_decorator_wrapper
-HANDLER_REGISTRY = {
+REGISTRY = {
"agenta": {
"function": {
"exact_match": {
@@ -388,7 +384,7 @@ async def invoke(
)
handler = (
- HANDLER_REGISTRY.get(service_provider, {})
+ REGISTRY.get(service_provider, {})
.get(service_kind, {})
.get(service_key, {})
.get(service_version, None)
@@ -469,7 +465,7 @@ async def test_local_function_workflow_by_value():
# create the workflow request
workflow_service_request = WorkflowServiceRequest(
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
inputs=TEST_INPUTS,
outputs=TEST_OUTPUTS,
)
@@ -516,7 +512,7 @@ async def test_remote_function_workflow_by_value():
# create the workflow request
workflow_service_request = WorkflowServiceRequest(
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
inputs=TEST_INPUTS,
outputs=TEST_OUTPUTS,
)
@@ -571,7 +567,7 @@ async def test_code_workflow_by_value():
# create the workflow request
workflow_service_request = WorkflowServiceRequest(
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
inputs=TEST_INPUTS,
outputs=TEST_OUTPUTS,
)
@@ -617,7 +613,7 @@ async def test_hook_workflow_by_value_direct():
# create the workflow request
workflow_service_request = WorkflowServiceRequest(
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
inputs=TEST_INPUTS,
outputs=TEST_OUTPUTS,
)
diff --git a/sdk/tests/manual/workflows/sdk_test.py b/sdk/tests/manual/workflows/sdk_test.py
index eec99d3879..6b7ddac476 100644
--- a/sdk/tests/manual/workflows/sdk_test.py
+++ b/sdk/tests/manual/workflows/sdk_test.py
@@ -79,13 +79,12 @@ async def exact_match_v1(
from asyncio import run as run_async
-from agenta.sdk.models.workflows import (
+from agenta.sdk.workflows.types import (
WorkflowRevision,
WorkflowRevisionData,
- WorkflowServiceRequestData,
- WorkflowServiceResponseData,
WorkflowServiceRequest,
WorkflowServiceResponse,
+ WorkflowServiceData,
Status,
Data,
)
@@ -119,7 +118,7 @@ async def main():
# create the workflow request
workflow_service_request = WorkflowServiceRequest(
flags={"is_annotation": True},
- data=WorkflowServiceRequestData(
+ data=WorkflowServiceData(
inputs=TEST_INPUTS,
outputs=TEST_OUTPUTS,
),
diff --git a/sdk/tests/unit/README.md b/sdk/tests/unit/README.md
index 9ff5d12981..8ffcf22e83 100644
--- a/sdk/tests/unit/README.md
+++ b/sdk/tests/unit/README.md
@@ -18,7 +18,8 @@ poetry run pytest tests/unit/test_tracing_decorators.py::TestGeneratorTracing -v
## Test Organization
- **`conftest.py`** - Shared fixtures and test configuration
-- **`test_*.py`** - Individual test modules
+- **`test_tracing_decorators.py`** - Tests for @instrument() decorator functionality
+- **`test_prompt_template.py`** - Tests for PromptTemplate class template formatting
- **`TESTING_PATTERNS.md`** - Common testing approaches and patterns
## Prerequisites
diff --git a/sdk/tests/unit/TESTING_PATTERNS.md b/sdk/tests/unit/TESTING_PATTERNS.md
index ce14f1f467..7c9dea3fed 100644
--- a/sdk/tests/unit/TESTING_PATTERNS.md
+++ b/sdk/tests/unit/TESTING_PATTERNS.md
@@ -81,7 +81,7 @@ poetry run pytest tests/unit/test_tracing_decorators.py::TestGeneratorTracing -v
## Test Data Patterns
-### Simple Testcases
+### Simple Test Cases
```python
# Basic generator
def simple_generator():
@@ -92,7 +92,7 @@ def simple_generator():
# Expected result: ["first", "second", "third"]
```
-### Complex Testcases
+### Complex Test Cases
```python
# Generator with return value
def generator_with_return():
@@ -158,7 +158,7 @@ poetry run pytest tests/unit/ -v
## Extending Tests
-### Adding New Testcases
+### Adding New Test Cases
1. **Choose appropriate test class**:
- `TestExistingFunctionality`: For regression tests
diff --git a/sdk/tests/unit/test_prompt_template.py b/sdk/tests/unit/test_prompt_template.py
new file mode 100644
index 0000000000..a2c4d071b9
--- /dev/null
+++ b/sdk/tests/unit/test_prompt_template.py
@@ -0,0 +1,442 @@
+"""
+Unit tests for PromptTemplate class template formatting functionality.
+
+This module tests the _format_with_template method in the PromptTemplate class
+from agenta.sdk.types. The PromptTemplate is a core SDK type used for managing
+LLM prompts with variable substitution.
+
+Test Coverage:
+--------------
+✅ Curly format basic replacement
+✅ Multiple variable replacement
+✅ Missing variables raise TemplateFormatError
+✅ User input containing {{}} is preserved (AGE-2946 fix)
+✅ Self-referential values work ({{x}} = "{{x}}")
+✅ Cross-referential values (single-pass replacement)
+✅ Backslash sequences are preserved
+✅ Regex metacharacters in variable names
+✅ Complete format() method workflow
+✅ Input validation with input_keys
+✅ Message content formatting
+✅ F-string format basic tests
+✅ Jinja2 format basic tests
+
+Why These Tests Matter:
+-----------------------
+The PromptTemplate class is used by SDK users to create reusable prompt
+templates. These edge cases were discovered through production bugs:
+
+1. LLM responses with backslash sequences caused regex errors
+2. Users couldn't ask questions about template syntax (AGE-2946)
+3. Variable names with special characters failed to match
+4. Self-referential values caused false positive errors
+
+The tests ensure SDK users have a robust template system.
+
+Test Architecture:
+------------------
+Tests are organized into classes by functionality:
+- TestPromptTemplateFormatWithTemplate: Core formatting logic
+- TestPromptTemplateFormat: Complete format() method workflow
+- TestPromptTemplateEdgeCases: Special scenarios and edge cases
+"""
+
+import pytest
+from agenta.sdk.types import (
+ PromptTemplate,
+ Message,
+ TemplateFormatError,
+ InputValidationError,
+)
+
+
+class TestPromptTemplateFormatWithTemplate:
+ """Tests for the _format_with_template method (curly format)."""
+
+ def setup_method(self):
+ """Set up test fixtures."""
+ self.template = PromptTemplate(
+ messages=[Message(role="user", content="{{input}}")],
+ template_format="curly",
+ )
+
+ def test_simple_variable_replacement(self):
+ """Single variable is replaced correctly."""
+ result = self.template._format_with_template("Hello {{name}}!", {"name": "Alice"})
+ assert result == "Hello Alice!"
+
+ def test_multiple_variable_replacement(self):
+ """Multiple variables are replaced correctly."""
+ result = self.template._format_with_template(
+ "{{greeting}} {{name}}, you are {{age}} years old",
+ {"greeting": "Hi", "name": "Bob", "age": "30"},
+ )
+ assert result == "Hi Bob, you are 30 years old"
+
+ def test_missing_variable_raises_template_error(self):
+ """Missing template variable raises TemplateFormatError."""
+ with pytest.raises(TemplateFormatError, match="Unreplaced variables"):
+ self.template._format_with_template(
+ "Hello {{name}}, age {{age}}", {"name": "Charlie"}
+ )
+
+ def test_user_input_with_curly_braces(self):
+ """User input containing {{}} is preserved (AGE-2946 fix)."""
+ result = self.template._format_with_template(
+ "Answer: {{question}}",
+ {"question": "What does {{variable}} mean?"},
+ )
+ assert result == "Answer: What does {{variable}} mean?"
+ assert "{{variable}}" in result
+
+ def test_self_referential_value(self):
+ """Self-referential value works ({{x}} = '{{x}}')."""
+ result = self.template._format_with_template("Value: {{x}}", {"x": "{{x}}"})
+ assert result == "Value: {{x}}"
+
+ def test_cross_referential_values(self):
+ """Cross-referential values handled by single-pass replacement."""
+ result = self.template._format_with_template(
+ "{{a}} and {{b}}", {"a": "{{b}}", "b": "{{a}}"}
+ )
+ assert result == "{{b}} and {{a}}"
+
+ def test_user_discussing_template_variable_name(self):
+ """User input can contain same variable name as template."""
+ result = self.template._format_with_template(
+ "User {{name}} said: {{message}}",
+ {"name": "Alice", "message": "I use {{name}} in my templates"},
+ )
+ assert result == "User Alice said: I use {{name}} in my templates"
+
+ def test_backslash_sequences_preserved(self):
+ """Backslash sequences in values are preserved."""
+ result = self.template._format_with_template(
+ "Path: {{path}}", {"path": "C:\\Users\\Documents"}
+ )
+ assert result == "Path: C:\\Users\\Documents"
+
+ def test_hex_escape_sequences(self):
+ """Hex escape sequences are preserved (original bug)."""
+ result = self.template._format_with_template(
+ "Color: {{color}}", {"color": "\\x1b[31mRed\\x1b[0m"}
+ )
+ assert result == "Color: \\x1b[31mRed\\x1b[0m"
+
+ def test_newline_tab_sequences(self):
+ """Newline and tab sequences are preserved."""
+ result = self.template._format_with_template(
+ "Text: {{text}}", {"text": "Line1\\nLine2\\tTabbed"}
+ )
+ assert result == "Text: Line1\\nLine2\\tTabbed"
+
+ def test_regex_metacharacters_in_variable_name(self):
+ """Variable names with regex special characters work."""
+ result = self.template._format_with_template(
+ "Value: {{my.var}}", {"my.var": "test"}
+ )
+ assert result == "Value: test"
+
+ def test_variable_with_brackets(self):
+ """Variable names with brackets work."""
+ result = self.template._format_with_template(
+ "First: {{data[0]}}", {"data[0]": "first"}
+ )
+ assert result == "First: first"
+
+ def test_variable_with_special_chars(self):
+ """Variable names with various special characters work."""
+ result = self.template._format_with_template(
+ "Price: {{price$}}", {"price$": "99.99"}
+ )
+ assert result == "Price: 99.99"
+
+ def test_empty_template(self):
+ """Template with no variables works."""
+ result = self.template._format_with_template("Just plain text", {})
+ assert result == "Just plain text"
+
+ def test_extra_inputs_ignored(self):
+ """Extra inputs that aren't in template are ignored."""
+ result = self.template._format_with_template(
+ "Hello {{name}}",
+ {"name": "Alice", "age": "25", "city": "NYC"},
+ )
+ assert result == "Hello Alice"
+
+ def test_same_variable_multiple_times(self):
+ """Same variable used multiple times is replaced consistently."""
+ result = self.template._format_with_template(
+ "{{name}} and {{name}} and {{name}}", {"name": "Bob"}
+ )
+ assert result == "Bob and Bob and Bob"
+
+ def test_unc_path_double_backslash(self):
+ """UNC paths with double backslashes work."""
+ result = self.template._format_with_template(
+ "Server: {{server}}", {"server": "\\\\Server\\Share"}
+ )
+ assert result == "Server: \\\\Server\\Share"
+
+ def test_json_with_escapes(self):
+ """JSON strings with escape sequences work."""
+ json_str = '{"message": "Hello\\nWorld", "code": "\\t\\tindented"}'
+ result = self.template._format_with_template("JSON: {{json}}", {"json": json_str})
+ assert result == f"JSON: {json_str}"
+
+
+class TestPromptTemplateFormat:
+ """Tests for the complete format() method workflow."""
+
+ def test_format_simple_message(self):
+ """Basic format() workflow with single message."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Hello {{name}}!")],
+ template_format="curly",
+ )
+
+ formatted = template.format(name="Alice")
+
+ assert len(formatted.messages) == 1
+ assert formatted.messages[0].content == "Hello Alice!"
+ assert formatted.messages[0].role == "user"
+
+ def test_format_multiple_messages(self):
+ """Format() works with multiple messages."""
+ template = PromptTemplate(
+ messages=[
+ Message(role="system", content="You are {{role}}"),
+ Message(role="user", content="{{question}}"),
+ ],
+ template_format="curly",
+ )
+
+ formatted = template.format(role="helpful assistant", question="What is AI?")
+
+ assert len(formatted.messages) == 2
+ assert formatted.messages[0].content == "You are helpful assistant"
+ assert formatted.messages[1].content == "What is AI?"
+
+ def test_format_with_input_keys_validation(self):
+ """Input validation works when input_keys is set."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="{{input}}")],
+ template_format="curly",
+ input_keys=["input"],
+ )
+
+ # Valid input
+ formatted = template.format(input="test")
+ assert formatted.messages[0].content == "test"
+
+ # Missing input
+ with pytest.raises(InputValidationError, match="Missing required inputs"):
+ template.format()
+
+ # Extra input
+ with pytest.raises(InputValidationError, match="Unexpected inputs"):
+ template.format(input="test", extra="value")
+
+ def test_format_without_input_keys_accepts_any(self):
+ """Without input_keys set, any inputs are accepted."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="{{input}}")],
+ template_format="curly",
+ # input_keys not set
+ )
+
+ # Extra inputs are fine when input_keys is None
+ formatted = template.format(input="test", extra="ignored")
+ assert formatted.messages[0].content == "test"
+
+ def test_format_preserves_message_attributes(self):
+ """Format preserves all message attributes."""
+ template = PromptTemplate(
+ messages=[
+ Message(
+ role="user",
+ content="{{input}}",
+ name="test_user",
+ )
+ ],
+ template_format="curly",
+ )
+
+ formatted = template.format(input="hello")
+ assert formatted.messages[0].role == "user"
+ assert formatted.messages[0].name == "test_user"
+ assert formatted.messages[0].content == "hello"
+
+ def test_format_with_user_input_containing_curly_braces(self):
+ """Complete format() workflow with user input containing {{}}."""
+ template = PromptTemplate(
+ messages=[
+ Message(role="system", content="You are a helpful assistant"),
+ Message(role="user", content="{{question}}"),
+ ],
+ template_format="curly",
+ )
+
+ formatted = template.format(question="How do I use {{variable}} in templates?")
+
+ assert formatted.messages[1].content == "How do I use {{variable}} in templates?"
+ assert "{{variable}}" in formatted.messages[1].content
+
+ def test_format_error_includes_message_index(self):
+ """Format error includes which message failed."""
+ template = PromptTemplate(
+ messages=[
+ Message(role="system", content="OK"),
+ Message(role="user", content="{{missing}}"),
+ ],
+ template_format="curly",
+ )
+
+ with pytest.raises(TemplateFormatError, match="Error in message 1"):
+ template.format()
+
+
+class TestPromptTemplateFStringFormat:
+ """Tests for f-string template format."""
+
+ def test_fstring_simple_replacement(self):
+ """F-string format works for simple replacement."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Hello {name}!")],
+ template_format="fstring",
+ )
+
+ formatted = template.format(name="Alice")
+ assert formatted.messages[0].content == "Hello Alice!"
+
+ def test_fstring_missing_key_raises_error(self):
+ """F-string format raises error for missing keys."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Hello {name}!")],
+ template_format="fstring",
+ )
+
+ with pytest.raises(TemplateFormatError):
+ template.format(other="value")
+
+
+class TestPromptTemplateJinja2Format:
+ """Tests for Jinja2 template format."""
+
+ def test_jinja2_simple_replacement(self):
+ """Jinja2 format works for simple replacement."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Hello {{ name }}!")],
+ template_format="jinja2",
+ )
+
+ formatted = template.format(name="Alice")
+ assert formatted.messages[0].content == "Hello Alice!"
+
+ def test_jinja2_with_filter(self):
+ """Jinja2 format works with filters."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Hello {{ name|upper }}!")],
+ template_format="jinja2",
+ )
+
+ formatted = template.format(name="alice")
+ assert formatted.messages[0].content == "Hello ALICE!"
+
+
+class TestPromptTemplateEdgeCases:
+ """Edge cases and special scenarios."""
+
+ def test_numeric_values(self):
+ """Numeric values are converted to strings."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Age: {{age}}, Score: {{score}}")],
+ template_format="curly",
+ )
+
+ formatted = template.format(age=25, score=95.5)
+ assert formatted.messages[0].content == "Age: 25, Score: 95.5"
+
+ def test_boolean_values(self):
+ """Boolean values are converted to strings."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Active: {{active}}")],
+ template_format="curly",
+ )
+
+ formatted = template.format(active=True)
+ assert formatted.messages[0].content == "Active: True"
+
+ def test_none_value(self):
+ """None value is converted to string 'None'."""
+ template = PromptTemplate(
+ messages=[Message(role="user", content="Value: {{val}}")],
+ template_format="curly",
+ )
+
+ formatted = template.format(val=None)
+ assert formatted.messages[0].content == "Value: None"
+
+ def test_multiline_content(self):
+ """Multiline message content works correctly."""
+ template = PromptTemplate(
+ messages=[
+ Message(
+ role="system",
+ content="""You are a helpful assistant.
+Please answer {{question}}.""",
+ )
+ ],
+ template_format="curly",
+ )
+
+ formatted = template.format(question="carefully")
+ assert "carefully" in formatted.messages[0].content
+ assert "\\n" not in formatted.messages[0].content # Real newline, not escaped
+
+ def test_empty_message_content(self):
+ """Empty message content is handled."""
+ template = PromptTemplate(
+ messages=[
+ Message(role="system", content=None),
+ Message(role="user", content="{{input}}"),
+ ],
+ template_format="curly",
+ )
+
+ formatted = template.format(input="test")
+ assert formatted.messages[0].content is None
+ assert formatted.messages[1].content == "test"
+
+ def test_realistic_llm_prompt_template(self):
+ """Realistic LLM prompt template with multiple variables."""
+ template = PromptTemplate(
+ messages=[
+ Message(
+ role="system",
+ content="You are a {{role}}. Be {{tone}} in your responses.",
+ ),
+ Message(
+ role="user",
+ content="""Context: {{context}}
+
+Question: {{question}}
+
+Please provide a detailed answer.""",
+ ),
+ ],
+ template_format="curly",
+ input_keys=["role", "tone", "context", "question"],
+ )
+
+ formatted = template.format(
+ role="helpful assistant",
+ tone="friendly and concise",
+ context="The user is learning about Python",
+ question="What are decorators?",
+ )
+
+ assert "helpful assistant" in formatted.messages[0].content
+ assert "friendly and concise" in formatted.messages[0].content
+ assert "Python" in formatted.messages[1].content
+ assert "decorators" in formatted.messages[1].content
diff --git a/sdk/tests/unit/test_tracing_decorators.py b/sdk/tests/unit/test_tracing_decorators.py
index 67ffc59da5..9f49597216 100644
--- a/sdk/tests/unit/test_tracing_decorators.py
+++ b/sdk/tests/unit/test_tracing_decorators.py
@@ -8,7 +8,7 @@
-----------------
The tests are organized into two main classes:
-1. TestExistingFunctionality: Regression tests ensuring that existing sync/async
+1. TestExistingFunctionality: Regression tests ensuring that existing sync/async
function tracing continues to work without issues after generator support was added.
2. TestGeneratorTracing: Comprehensive tests for the new generator tracing functionality,
@@ -32,7 +32,7 @@
Coverage:
---------
✅ Sync function tracing (regression)
-✅ Async function tracing (regression)
+✅ Async function tracing (regression)
✅ Exception handling for sync/async functions (regression)
✅ Parameter handling and complex return types (regression)
✅ Sync generator tracing
diff --git a/services/chat/ee/docker/Dockerfile.dev b/services/chat/ee/docker/Dockerfile.dev
deleted file mode 100644
index 40a0e7981e..0000000000
--- a/services/chat/ee/docker/Dockerfile.dev
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM python:3.11-slim
-
-ARG ROOT_PATH=/
-ENV ROOT_PATH=${ROOT_PATH}
-
-WORKDIR /app/
-
-RUN pip install --upgrade pip
-
-COPY ./requirements.txt /app/requirements.txt
-
-RUN pip install -r requirements.txt
-
-COPY ./oss /app/oss/
-
-ENV PYTHONPATH=/sdk:$PYTHONPATH
-
-EXPOSE 80
diff --git a/services/chat/ee/docker/Dockerfile.gh b/services/chat/ee/docker/Dockerfile.gh
index 5cc58a9410..7e2351a555 100644
--- a/services/chat/ee/docker/Dockerfile.gh
+++ b/services/chat/ee/docker/Dockerfile.gh
@@ -1,4 +1,4 @@
-FROM python:3.11-slim
+FROM python:3.10-slim
ARG ROOT_PATH=/
ENV ROOT_PATH=${ROOT_PATH}
@@ -13,6 +13,6 @@ RUN pip install -r requirements.txt
COPY ./oss /app/oss/
-#
+ENV PYTHONPATH=/sdk:$PYTHONPATH
EXPOSE 80
diff --git a/services/chat/oss/docker/Dockerfile.gh b/services/chat/oss/docker/Dockerfile.gh
index 5cc58a9410..7e2351a555 100644
--- a/services/chat/oss/docker/Dockerfile.gh
+++ b/services/chat/oss/docker/Dockerfile.gh
@@ -1,4 +1,4 @@
-FROM python:3.11-slim
+FROM python:3.10-slim
ARG ROOT_PATH=/
ENV ROOT_PATH=${ROOT_PATH}
@@ -13,6 +13,6 @@ RUN pip install -r requirements.txt
COPY ./oss /app/oss/
-#
+ENV PYTHONPATH=/sdk:$PYTHONPATH
EXPOSE 80
diff --git a/services/chat/requirements.txt b/services/chat/requirements.txt
index 2d1c54bf2e..1ae14a1dd8 100644
--- a/services/chat/requirements.txt
+++ b/services/chat/requirements.txt
@@ -1,12 +1,9 @@
agenta
uvicorn
boto3
+litellm==1.76.0
structlog
-
-# test
-google-auth>=2.23,<3
-google-cloud-aiplatform>=1.38
+huggingface-hub<0.31.0
# temporary
-decorator
-restrictedpython
\ No newline at end of file
+decorator
\ No newline at end of file
diff --git a/services/completion/ee/docker/Dockerfile.dev b/services/completion/ee/docker/Dockerfile.dev
deleted file mode 100644
index 40a0e7981e..0000000000
--- a/services/completion/ee/docker/Dockerfile.dev
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM python:3.11-slim
-
-ARG ROOT_PATH=/
-ENV ROOT_PATH=${ROOT_PATH}
-
-WORKDIR /app/
-
-RUN pip install --upgrade pip
-
-COPY ./requirements.txt /app/requirements.txt
-
-RUN pip install -r requirements.txt
-
-COPY ./oss /app/oss/
-
-ENV PYTHONPATH=/sdk:$PYTHONPATH
-
-EXPOSE 80
diff --git a/services/completion/ee/docker/Dockerfile.gh b/services/completion/ee/docker/Dockerfile.gh
index 5cc58a9410..7e2351a555 100644
--- a/services/completion/ee/docker/Dockerfile.gh
+++ b/services/completion/ee/docker/Dockerfile.gh
@@ -1,4 +1,4 @@
-FROM python:3.11-slim
+FROM python:3.10-slim
ARG ROOT_PATH=/
ENV ROOT_PATH=${ROOT_PATH}
@@ -13,6 +13,6 @@ RUN pip install -r requirements.txt
COPY ./oss /app/oss/
-#
+ENV PYTHONPATH=/sdk:$PYTHONPATH
EXPOSE 80
diff --git a/services/completion/oss/docker/Dockerfile.gh b/services/completion/oss/docker/Dockerfile.gh
index 5cc58a9410..7e2351a555 100644
--- a/services/completion/oss/docker/Dockerfile.gh
+++ b/services/completion/oss/docker/Dockerfile.gh
@@ -1,4 +1,4 @@
-FROM python:3.11-slim
+FROM python:3.10-slim
ARG ROOT_PATH=/
ENV ROOT_PATH=${ROOT_PATH}
@@ -13,6 +13,6 @@ RUN pip install -r requirements.txt
COPY ./oss /app/oss/
-#
+ENV PYTHONPATH=/sdk:$PYTHONPATH
EXPOSE 80
diff --git a/services/completion/requirements.txt b/services/completion/requirements.txt
index 2d1c54bf2e..1ae14a1dd8 100644
--- a/services/completion/requirements.txt
+++ b/services/completion/requirements.txt
@@ -1,12 +1,9 @@
agenta
uvicorn
boto3
+litellm==1.76.0
structlog
-
-# test
-google-auth>=2.23,<3
-google-cloud-aiplatform>=1.38
+huggingface-hub<0.31.0
# temporary
-decorator
-restrictedpython
\ No newline at end of file
+decorator
\ No newline at end of file
diff --git a/web/ee/public/assets/Agenta-logo-full-dark-accent.png b/web/ee/public/assets/Agenta-logo-full-dark-accent.png
deleted file mode 100644
index c14833dab1..0000000000
Binary files a/web/ee/public/assets/Agenta-logo-full-dark-accent.png and /dev/null differ
diff --git a/web/ee/public/assets/Agenta-logo-full-light.png b/web/ee/public/assets/Agenta-logo-full-light.png
deleted file mode 100644
index 4c9b31a813..0000000000
Binary files a/web/ee/public/assets/Agenta-logo-full-light.png and /dev/null differ
diff --git a/web/ee/public/assets/dark-complete-transparent-CROPPED.png b/web/ee/public/assets/dark-complete-transparent-CROPPED.png
new file mode 100644
index 0000000000..7d134ac59a
Binary files /dev/null and b/web/ee/public/assets/dark-complete-transparent-CROPPED.png differ
diff --git a/web/ee/public/assets/dark-complete-transparent_white_logo.png b/web/ee/public/assets/dark-complete-transparent_white_logo.png
new file mode 100644
index 0000000000..8685bbf981
Binary files /dev/null and b/web/ee/public/assets/dark-complete-transparent_white_logo.png differ
diff --git a/web/ee/public/assets/dark-logo.svg b/web/ee/public/assets/dark-logo.svg
new file mode 100644
index 0000000000..6cb8ef3330
--- /dev/null
+++ b/web/ee/public/assets/dark-logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/web/ee/public/assets/favicon.ico b/web/ee/public/assets/favicon.ico
index dad02fe072..4dc8619b1d 100644
Binary files a/web/ee/public/assets/favicon.ico and b/web/ee/public/assets/favicon.ico differ
diff --git a/web/ee/public/assets/light-complete-transparent-CROPPED.png b/web/ee/public/assets/light-complete-transparent-CROPPED.png
new file mode 100644
index 0000000000..6be2e99e08
Binary files /dev/null and b/web/ee/public/assets/light-complete-transparent-CROPPED.png differ
diff --git a/web/ee/public/assets/light-logo.svg b/web/ee/public/assets/light-logo.svg
new file mode 100644
index 0000000000..9c795f8e88
--- /dev/null
+++ b/web/ee/public/assets/light-logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/web/oss/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx b/web/ee/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx
similarity index 100%
rename from web/oss/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx
rename to web/ee/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx
diff --git a/web/oss/src/components/DeleteEvaluationModal/types.ts b/web/ee/src/components/DeleteEvaluationModal/types.ts
similarity index 100%
rename from web/oss/src/components/DeleteEvaluationModal/types.ts
rename to web/ee/src/components/DeleteEvaluationModal/types.ts
diff --git a/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx b/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx
index 3b6f9cdb32..d596e2bc42 100644
--- a/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx
+++ b/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx
@@ -15,7 +15,7 @@ import {
fetchAllDeploymentRevisions,
} from "@/oss/services/deploymentVersioning/api"
-import {DeploymentRevisionConfig, DeploymentRevisions} from "@agenta/oss/src/lib/types_ee"
+import {DeploymentRevisionConfig, DeploymentRevisions} from "../../lib/types_ee"
dayjs.extend(relativeTime)
dayjs.extend(duration)
diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
similarity index 81%
rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
index 06f82f0a14..748597c436 100644
--- a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
+++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
@@ -5,7 +5,7 @@ import {useRouter} from "next/router"
import EvalRunOverviewViewerSkeleton from "../../components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton"
import EvalRunHeaderSkeleton from "../components/EvalRunHeader/assets/EvalRunHeaderSkeleton"
import EvalRunPromptConfigViewerSkeleton from "../components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton"
-import EvalRunTestcaseViewerSkeleton from "../components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton"
+import EvalRunTestCaseViewerSkeleton from "../components/EvalRunTestCaseViewer/assets/EvalRunTestCaseViewerSkeleton"
const AutoEvalRunSkeleton = () => {
const router = useRouter()
@@ -14,8 +14,8 @@ const AutoEvalRunSkeleton = () => {
return (
- {viewType === "testcases" ? (
-
+ {viewType === "test-cases" ? (
+
) : viewType === "prompt" ? (
) : (
diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
similarity index 55%
rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
index 9c964e467b..c67397dbce 100644
--- a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
+++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
@@ -31,13 +31,6 @@ interface EvalNameTagProps extends TagProps {
onlyShowBasePin?: boolean
popoverProps?: PopoverProps
allowVariantNavigation?: boolean
- appContext?: {
- appId?: string
- appName?: string
- variantName?: string
- revisionLabel?: string
- isOnlineEval?: boolean
- }
}
const EvalNameTag = ({
run,
@@ -48,7 +41,6 @@ const EvalNameTag = ({
className,
popoverProps,
allowVariantNavigation = true,
- appContext,
...props
}: EvalNameTagProps) => {
const router = useRouter()
@@ -151,107 +143,87 @@ const EvalNameTag = ({
bordered={false}
className="bg-[#0517290F] hover:bg-[#05172916]"
>
- {run?.id
- ? run?.id.split("-")[run?.id.split("-").length - 1]
- : ""}
+ {run?.id.split("-")[run?.id.split("-").length - 1]}
- {appContext?.isOnlineEval ? null : (
-