diff --git a/.gitignore b/.gitignore index 0c4b3800f5..d70de45982 100644 --- a/.gitignore +++ b/.gitignore @@ -47,4 +47,5 @@ sdk/agenta/templates/agenta.py web/ee/public/__env.js web/oss/public/__env.js -web/oss/tests/datalayer/results \ No newline at end of file +web/oss/tests/datalayer/results +.* \ No newline at end of file diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py index a704ca9826..2c5a241acc 100644 --- a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py +++ b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py @@ -56,8 +56,7 @@ def create_default_project_for_workspaces(session: Connection): for workspace in workspaces: # Create a new default project for each workspace get_or_create_workspace_default_project( - session=session, - workspace=workspace, # type: ignore + session=session, workspace=workspace # type: ignore ) # Commit the changes for the current batch diff --git a/api/ee/databases/postgres/migrations/core/utils.py b/api/ee/databases/postgres/migrations/core/utils.py index 4691a38ec0..206e46db64 100644 --- a/api/ee/databases/postgres/migrations/core/utils.py +++ b/api/ee/databases/postgres/migrations/core/utils.py @@ -73,9 +73,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): async with engine.connect() as connection: try: - result = await connection.execute( - text("SELECT version_num FROM alembic_version") - ) # type: ignore + result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore except (asyncpg.exceptions.UndefinedTableError, ProgrammingError): # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception. # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \ @@ -85,9 +83,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): return "alembic_version" migration_heads = [row[0] for row in result.fetchall()] - assert len(migration_heads) == 1, ( - "There can only be one migration head stored in the database." - ) + assert ( + len(migration_heads) == 1 + ), "There can only be one migration head stored in the database." return migration_heads[0] diff --git a/api/ee/databases/postgres/migrations/tracing/utils.py b/api/ee/databases/postgres/migrations/tracing/utils.py index f0d62a3c3d..15f3e66b5f 100644 --- a/api/ee/databases/postgres/migrations/tracing/utils.py +++ b/api/ee/databases/postgres/migrations/tracing/utils.py @@ -66,9 +66,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): async with engine.connect() as connection: try: - result = await connection.execute( - text("SELECT version_num FROM alembic_version") - ) # type: ignore + result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore except (asyncpg.exceptions.UndefinedTableError, ProgrammingError): # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception. # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \ @@ -78,9 +76,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): return "alembic_version" migration_heads = [row[0] for row in result.fetchall()] - assert len(migration_heads) == 1, ( - "There can only be one migration head stored in the database." - ) + assert ( + len(migration_heads) == 1 + ), "There can only be one migration head stored in the database." return migration_heads[0] diff --git a/api/ee/docker/Dockerfile.dev b/api/ee/docker/Dockerfile.dev index 2074141a18..814cbec3ff 100644 --- a/api/ee/docker/Dockerfile.dev +++ b/api/ee/docker/Dockerfile.dev @@ -34,8 +34,8 @@ RUN cat -A /etc/cron.d/meters-cron RUN chmod +x /meters.sh \ && chmod 0644 /etc/cron.d/meters-cron -COPY ./oss/src/crons/queries.sh /queries.sh -COPY ./oss/src/crons/queries.txt /etc/cron.d/queries-cron +COPY ./ee/src/crons/queries.sh /queries.sh +COPY ./ee/src/crons/queries.txt /etc/cron.d/queries-cron RUN sed -i -e '$a\' /etc/cron.d/queries-cron RUN cat -A /etc/cron.d/queries-cron diff --git a/api/ee/docker/Dockerfile.gh b/api/ee/docker/Dockerfile.gh index ab3a06b2ff..c3652a59df 100644 --- a/api/ee/docker/Dockerfile.gh +++ b/api/ee/docker/Dockerfile.gh @@ -34,8 +34,8 @@ RUN cat -A /etc/cron.d/meters-cron RUN chmod +x /meters.sh \ && chmod 0644 /etc/cron.d/meters-cron -COPY ./oss/src/crons/queries.sh /queries.sh -COPY ./oss/src/crons/queries.txt /etc/cron.d/queries-cron +COPY ./ee/src/crons/queries.sh /queries.sh +COPY ./ee/src/crons/queries.txt /etc/cron.d/queries-cron RUN sed -i -e '$a\' /etc/cron.d/queries-cron RUN cat -A /etc/cron.d/queries-cron diff --git a/api/ee/src/apis/fastapi/billing/router.py b/api/ee/src/apis/fastapi/billing/router.py index ff8dbaf476..7ac23142c5 100644 --- a/api/ee/src/apis/fastapi/billing/router.py +++ b/api/ee/src/apis/fastapi/billing/router.py @@ -35,7 +35,7 @@ stripe.api_key = environ.get("STRIPE_API_KEY") -MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xFF:02x}" for ele in range(40, -1, -8)) +MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8)) STRIPE_WEBHOOK_SECRET = environ.get("STRIPE_WEBHOOK_SECRET") STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}") @@ -824,13 +824,12 @@ async def create_portal_user_route( self, request: Request, ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.EDIT_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.EDIT_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.create_portal( organization_id=request.state.organization_id, @@ -852,13 +851,12 @@ async def create_checkout_user_route( plan: Plan = Query(...), success_url: str = Query(...), # find a way to make this optional or moot ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.EDIT_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.EDIT_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.create_checkout( organization_id=request.state.organization_id, @@ -884,13 +882,12 @@ async def fetch_plan_user_route( self, request: Request, ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.VIEW_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.VIEW_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.fetch_plans( organization_id=request.state.organization_id, @@ -902,13 +899,12 @@ async def switch_plans_user_route( request: Request, plan: Plan = Query(...), ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.EDIT_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.EDIT_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.switch_plans( organization_id=request.state.organization_id, @@ -931,13 +927,12 @@ async def fetch_subscription_user_route( self, request: Request, ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.VIEW_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.VIEW_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.fetch_subscription( organization_id=request.state.organization_id, @@ -948,13 +943,12 @@ async def cancel_subscription_user_route( self, request: Request, ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.EDIT_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.EDIT_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.cancel_subscription( organization_id=request.state.organization_id, @@ -974,13 +968,12 @@ async def fetch_usage_user_route( self, request: Request, ): - if is_ee(): - if not await check_action_access( - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.VIEW_BILLING, - ): - return FORBIDDEN_RESPONSE + if not await check_action_access( + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.VIEW_BILLING, + ): + return FORBIDDEN_RESPONSE return await self.fetch_usage( organization_id=request.state.organization_id, diff --git a/api/ee/src/core/subscriptions/service.py b/api/ee/src/core/subscriptions/service.py index 30e026eded..f69adcbd74 100644 --- a/api/ee/src/core/subscriptions/service.py +++ b/api/ee/src/core/subscriptions/service.py @@ -25,7 +25,7 @@ stripe.api_key = environ.get("STRIPE_SECRET_KEY") -MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xFF:02x}" for ele in range(40, -1, -8)) +MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8)) STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}") diff --git a/api/oss/src/crons/queries.sh b/api/ee/src/crons/queries.sh similarity index 100% rename from api/oss/src/crons/queries.sh rename to api/ee/src/crons/queries.sh diff --git a/api/oss/src/crons/queries.txt b/api/ee/src/crons/queries.txt similarity index 100% rename from api/oss/src/crons/queries.txt rename to api/ee/src/crons/queries.txt diff --git a/api/oss/src/tasks/__init__.py b/api/ee/src/dbs/postgres/shared/__init__.py similarity index 100% rename from api/oss/src/tasks/__init__.py rename to api/ee/src/dbs/postgres/shared/__init__.py diff --git a/api/ee/src/main.py b/api/ee/src/main.py index 036bda6f0f..86d8ecf618 100644 --- a/api/ee/src/main.py +++ b/api/ee/src/main.py @@ -2,7 +2,12 @@ from oss.src.utils.logging import get_module_logger -from ee.src.routers import workspace_router, organization_router +from ee.src.routers import ( + workspace_router, + organization_router, + evaluation_router, + human_evaluation_router, +) from ee.src.dbs.postgres.meters.dao import MetersDAO from ee.src.dbs.postgres.subscriptions.dao import SubscriptionsDAO @@ -66,11 +71,29 @@ def extend_main(app: FastAPI): prefix="/workspaces", ) + app.include_router( + evaluation_router.router, + prefix="/evaluations", + tags=["Evaluations"], + ) + + app.include_router( + human_evaluation_router.router, + prefix="/human-evaluations", + tags=["Human-Evaluations"], + ) + # -------------------------------------------------------------------------- return app +def load_tasks(): + import ee.src.tasks.evaluations.live + import ee.src.tasks.evaluations.legacy + import ee.src.tasks.evaluations.batch + + def extend_app_schema(app: FastAPI): app.openapi()["info"]["title"] = "Agenta API" app.openapi()["info"]["description"] = "Agenta API" diff --git a/api/ee/src/models/db_models.py b/api/ee/src/models/db_models.py index b05b633659..b5a4c194da 100644 --- a/api/ee/src/models/db_models.py +++ b/api/ee/src/models/db_models.py @@ -252,3 +252,267 @@ class ProjectMemberDB(Base): class DeploymentDB(OssDeploymentDB): pass + + +class HumanEvaluationVariantDB(Base): + __tablename__ = "human_evaluation_variants" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + human_evaluation_id = Column( + UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE") + ) + variant_id = Column( + UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") + ) + variant_revision_id = Column( + UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL") + ) + + variant = relationship("AppVariantDB", backref="evaluation_variant") + variant_revision = relationship( + "AppVariantRevisionsDB", backref="evaluation_variant_revision" + ) + + +class HumanEvaluationDB(Base): + __tablename__ = "human_evaluations" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE")) + project_id = Column( + UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") + ) + status = Column(String) + evaluation_type = Column(String) + testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id")) + created_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + updated_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + + testset = relationship("TestsetDB") + evaluation_variant = relationship( + "HumanEvaluationVariantDB", + cascade=CASCADE_ALL_DELETE, + backref="human_evaluation", + ) + evaluation_scenario = relationship( + "HumanEvaluationScenarioDB", + cascade=CASCADE_ALL_DELETE, + backref="evaluation_scenario", + ) + + +class HumanEvaluationScenarioDB(Base): + __tablename__ = "human_evaluations_scenarios" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + project_id = Column( + UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") + ) + evaluation_id = Column( + UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE") + ) + inputs = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # List of HumanEvaluationScenarioInput + outputs = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # List of HumanEvaluationScenarioOutput + vote = Column(String) + score = Column(String) + correct_answer = Column(String) + created_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + updated_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + is_pinned = Column(Boolean) + note = Column(String) + + +class EvaluationAggregatedResultDB(Base): + __tablename__ = "auto_evaluation_aggregated_results" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + evaluation_id = Column( + UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE") + ) + evaluator_config_id = Column( + UUID(as_uuid=True), + ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), + ) + result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + + evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config") + + +class EvaluationScenarioResultDB(Base): + __tablename__ = "auto_evaluation_scenario_results" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + evaluation_scenario_id = Column( + UUID(as_uuid=True), + ForeignKey("auto_evaluation_scenarios.id", ondelete="CASCADE"), + ) + evaluator_config_id = Column( + UUID(as_uuid=True), + ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), + ) + result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + + +class EvaluationDB(Base): + __tablename__ = "auto_evaluations" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE")) + project_id = Column( + UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") + ) + status = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + testset_id = Column( + UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL") + ) + variant_id = Column( + UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") + ) + variant_revision_id = Column( + UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL") + ) + average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result + created_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + updated_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + + project = relationship("ee.src.models.db_models.ProjectDB") + testset = relationship("TestsetDB") + variant = relationship("AppVariantDB") + variant_revision = relationship("AppVariantRevisionsDB") + aggregated_results = relationship( + "EvaluationAggregatedResultDB", + cascade=CASCADE_ALL_DELETE, + backref="evaluation", + ) + evaluation_scenarios = relationship( + "EvaluationScenarioDB", cascade=CASCADE_ALL_DELETE, backref="evaluation" + ) + evaluator_configs = relationship( + "EvaluationEvaluatorConfigDB", + cascade=CASCADE_ALL_DELETE, + backref="evaluation", + ) + + +class EvaluationEvaluatorConfigDB(Base): + __tablename__ = "auto_evaluation_evaluator_configs" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + evaluation_id = Column( + UUID(as_uuid=True), + ForeignKey("auto_evaluations.id", ondelete="CASCADE"), + primary_key=True, + ) + evaluator_config_id = Column( + UUID(as_uuid=True), + ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), + primary_key=True, + ) + + +class EvaluationScenarioDB(Base): + __tablename__ = "auto_evaluation_scenarios" + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + project_id = Column( + UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") + ) + evaluation_id = Column( + UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE") + ) + variant_id = Column( + UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") + ) + inputs = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # List of EvaluationScenarioInput + outputs = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # List of EvaluationScenarioOutput + correct_answers = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # List of CorrectAnswer + is_pinned = Column(Boolean) + note = Column(String) + latency = Column(Integer) + cost = Column(Integer) + created_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + updated_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + + project = relationship("ee.src.models.db_models.ProjectDB") + variant = relationship("AppVariantDB") + results = relationship( + "EvaluationScenarioResultDB", + cascade=CASCADE_ALL_DELETE, + backref="evaluation_scenario", + ) diff --git a/api/oss/src/routers/evaluation_router.py b/api/ee/src/routers/evaluation_router.py similarity index 96% rename from api/oss/src/routers/evaluation_router.py rename to api/ee/src/routers/evaluation_router.py index cac2b06523..a01679d161 100644 --- a/api/oss/src/routers/evaluation_router.py +++ b/api/ee/src/routers/evaluation_router.py @@ -7,10 +7,10 @@ from oss.src.utils.logging import get_module_logger from oss.src.utils.caching import get_cache, set_cache -from oss.src.services import converters -from oss.src.services import evaluation_service +from ee.src.services import converters +from ee.src.services import evaluation_service -from oss.src.tasks.evaluations.legacy import ( +from ee.src.tasks.evaluations.legacy import ( setup_evaluation, annotate, ) @@ -21,6 +21,7 @@ NewEvaluation, DeleteEvaluation, ) +from ee.src.services import db_manager_ee from oss.src.services import app_manager, db_manager if is_ee(): @@ -81,7 +82,7 @@ async def fetch_evaluation_ids( {"detail": error_msg}, status_code=403, ) - evaluations = await db_manager.fetch_evaluations_by_resource( + evaluations = await db_manager_ee.fetch_evaluations_by_resource( resource_type, request.state.project_id, resource_ids, @@ -135,7 +136,7 @@ async def fetch_evaluation_status( status_code=403, ) - evaluation_status = await db_manager.fetch_evaluation_status_by_id( + evaluation_status = await db_manager_ee.fetch_evaluation_status_by_id( project_id=request.state.project_id, evaluation_id=evaluation_id, ) @@ -169,7 +170,7 @@ async def fetch_evaluation_results( _type_: _description_ """ - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=request.state.project_id, evaluation_id=evaluation_id, ) @@ -214,7 +215,7 @@ async def fetch_evaluation_scenarios( List[EvaluationScenario]: A list of evaluation scenarios. """ - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=request.state.project_id, evaluation_id=evaluation_id, ) @@ -297,7 +298,7 @@ async def fetch_evaluation( Evaluation: The fetched evaluation. """ - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=request.state.project_id, evaluation_id=evaluation_id, ) @@ -342,7 +343,7 @@ async def delete_evaluations( A list of the deleted comparison tables' IDs. """ - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=request.state.project_id, evaluation_id=payload.evaluations_ids[0], ) @@ -394,7 +395,7 @@ async def fetch_evaluation_scenarios_comparison_results( """ evaluations_ids_list = evaluations_ids.split(",") - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=request.state.project_id, evaluation_id=evaluations_ids_list[0], ) diff --git a/api/oss/src/routers/human_evaluation_router.py b/api/ee/src/routers/human_evaluation_router.py similarity index 93% rename from api/oss/src/routers/human_evaluation_router.py rename to api/ee/src/routers/human_evaluation_router.py index 681f7588bf..eb8e7e27f8 100644 --- a/api/oss/src/routers/human_evaluation_router.py +++ b/api/ee/src/routers/human_evaluation_router.py @@ -2,10 +2,11 @@ from fastapi import HTTPException, Body, Request, status, Response from oss.src.utils.logging import get_module_logger -from oss.src.services import converters +from ee.src.services import converters from oss.src.services import db_manager -from oss.src.services import results_service -from oss.src.services import evaluation_service +from ee.src.services import db_manager_ee +from ee.src.services import results_service +from ee.src.services import evaluation_service from oss.src.utils.common import APIRouter, is_ee from oss.src.models.api.evaluation_model import ( DeleteEvaluation, @@ -18,7 +19,7 @@ NewHumanEvaluation, SimpleEvaluationOutput, ) -from oss.src.services.evaluation_service import ( +from ee.src.services.evaluation_service import ( update_human_evaluation_scenario, update_human_evaluation_service, ) @@ -129,7 +130,7 @@ async def fetch_human_evaluation( HumanEvaluation: The fetched evaluation. """ - human_evaluation = await db_manager.fetch_human_evaluation_by_id(evaluation_id) + human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id) if not human_evaluation: raise HTTPException(status_code=404, detail="Evaluation not found") @@ -170,7 +171,7 @@ async def fetch_human_evaluation_scenarios( List[EvaluationScenario]: A list of evaluation scenarios. """ - human_evaluation = await db_manager.fetch_human_evaluation_by_id(evaluation_id) + human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id) if human_evaluation is None: raise HTTPException( status_code=404, @@ -215,7 +216,9 @@ async def update_human_evaluation( """ try: - human_evaluation = await db_manager.fetch_human_evaluation_by_id(evaluation_id) + human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id( + evaluation_id + ) if not human_evaluation: raise HTTPException(status_code=404, detail="Evaluation not found") @@ -261,7 +264,7 @@ async def update_evaluation_scenario_router( None: 204 No Content status code upon successful update. """ - evaluation_scenario_db = await db_manager.fetch_human_evaluation_scenario_by_id( + evaluation_scenario_db = await db_manager_ee.fetch_human_evaluation_scenario_by_id( evaluation_scenario_id ) if evaluation_scenario_db is None: @@ -306,7 +309,7 @@ async def get_evaluation_scenario_score_router( Dictionary containing the scenario ID and its score. """ - evaluation_scenario = db_manager.fetch_evaluation_scenario_by_id( + evaluation_scenario = db_manager_ee.fetch_evaluation_scenario_by_id( evaluation_scenario_id ) if evaluation_scenario is None: @@ -349,7 +352,7 @@ async def update_evaluation_scenario_score_router( None: 204 No Content status code upon successful update. """ - evaluation_scenario = await db_manager.fetch_evaluation_scenario_by_id( + evaluation_scenario = await db_manager_ee.fetch_evaluation_scenario_by_id( evaluation_scenario_id ) if evaluation_scenario is None: @@ -392,7 +395,7 @@ async def fetch_results( _description_ """ - evaluation = await db_manager.fetch_human_evaluation_by_id(evaluation_id) + evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id) if evaluation is None: raise HTTPException( status_code=404, @@ -437,7 +440,7 @@ async def delete_evaluations( A list of the deleted comparison tables' IDs. """ - evaluation = await db_manager.fetch_human_evaluation_by_id( + evaluation = await db_manager_ee.fetch_human_evaluation_by_id( payload.evaluations_ids[0] ) if is_ee(): diff --git a/api/ee/src/services/aggregation_service.py b/api/ee/src/services/aggregation_service.py new file mode 100644 index 0000000000..55a14e5f8f --- /dev/null +++ b/api/ee/src/services/aggregation_service.py @@ -0,0 +1,135 @@ +import re +import traceback +from typing import List, Optional + +from oss.src.models.shared_models import InvokationResult, Result, Error + + +def aggregate_ai_critique(results: List[Result]) -> Result: + """Aggregates the results for the ai critique evaluation. + + Args: + results (List[Result]): list of result objects + + Returns: + Result: aggregated result + """ + + try: + numeric_scores = [] + for result in results: + # Extract the first number found in the result value + match = re.search(r"\d+", result.value) + if match: + try: + score = int(match.group()) + numeric_scores.append(score) + except ValueError: + # Ignore if the extracted value is not an integer + continue + + # Calculate the average of numeric scores if any are present + average_value = ( + sum(numeric_scores) / len(numeric_scores) if numeric_scores else None + ) + return Result( + type="number", + value=average_value, + ) + except Exception as exc: + return Result( + type="error", + value=None, + error=Error(message=str(exc), stacktrace=str(traceback.format_exc())), + ) + + +def aggregate_binary(results: List[Result]) -> Result: + """Aggregates the results for the binary (auto regex) evaluation. + + Args: + results (List[Result]): list of result objects + + Returns: + Result: aggregated result + """ + + if all(isinstance(result.value, bool) for result in results): + average_value = sum(int(result.value) for result in results) / len(results) + else: + average_value = None + return Result(type="number", value=average_value) + + +def aggregate_float(results: List[Result]) -> Result: + """Aggregates the results for evaluations aside from auto regex and ai critique. + + Args: + results (List[Result]): list of result objects + + Returns: + Result: aggregated result + """ + + try: + average_value = sum(result.value for result in results) / len(results) + return Result(type="number", value=average_value) + except Exception as exc: + return Result( + type="error", + value=None, + error=Error(message=str(exc), stacktrace=str(traceback.format_exc())), + ) + + +def aggregate_float_from_llm_app_response( + invocation_results: List[InvokationResult], key: Optional[str] +) -> Result: + try: + if not key: + raise ValueError("Key is required to aggregate InvokationResult objects.") + + values = [ + getattr(inv_result, key) + for inv_result in invocation_results + if hasattr(inv_result, key) and getattr(inv_result, key) is not None + ] + + if not values: + return Result(type=key, value=None) + + average_value = sum(values) / len(values) + return Result(type=key, value=average_value) + except Exception as exc: + return Result( + type="error", + value=None, + error=Error(message=str(exc), stacktrace=str(traceback.format_exc())), + ) + + +def sum_float_from_llm_app_response( + invocation_results: List[InvokationResult], key: Optional[str] +) -> Result: + try: + if not key: + raise ValueError("Key is required to aggregate InvokationResult objects.") + + values = [ + getattr(inv_result, key) + for inv_result in invocation_results + if hasattr(inv_result, key) and getattr(inv_result, key) is not None + ] + + if not values: + return Result(type=key, value=None) + + total_value = sum(values) + + return Result(type=key, value=total_value) + except Exception as exc: + return Result( + type="error", + value=None, + error=Error(message=str(exc), stacktrace=str(traceback.format_exc())), + ) diff --git a/api/ee/src/services/converters.py b/api/ee/src/services/converters.py index 2bfc1d330b..5b120899fc 100644 --- a/api/ee/src/services/converters.py +++ b/api/ee/src/services/converters.py @@ -3,9 +3,28 @@ from datetime import datetime, timezone from oss.src.services import db_manager +from oss.src.models.api.evaluation_model import ( + CorrectAnswer, + Evaluation, + HumanEvaluation, + EvaluationScenario, + SimpleEvaluationOutput, + EvaluationScenarioInput, + HumanEvaluationScenario, + EvaluationScenarioOutput, +) from ee.src.services import db_manager_ee -from ee.src.models.api.workspace_models import WorkspaceRole, WorkspaceResponse +from ee.src.models.api.workspace_models import ( + WorkspaceRole, + WorkspaceResponse, +) from ee.src.models.shared_models import Permission +from ee.src.models.db_models import ( + EvaluationDB, + HumanEvaluationDB, + EvaluationScenarioDB, + HumanEvaluationScenarioDB, +) from oss.src.models.db_models import WorkspaceDB @@ -130,3 +149,173 @@ def get_all_workspace_permissions_by_role(role_name: str) -> Dict[str, List[Any] getattr(WorkspaceRole, role_name.upper()) ) return workspace_permissions + + +async def human_evaluation_db_to_simple_evaluation_output( + human_evaluation_db: HumanEvaluationDB, +) -> SimpleEvaluationOutput: + evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants( + human_evaluation_id=str(human_evaluation_db.id) + ) + return SimpleEvaluationOutput( + id=str(human_evaluation_db.id), + app_id=str(human_evaluation_db.app_id), + project_id=str(human_evaluation_db.project_id), + status=human_evaluation_db.status, # type: ignore + evaluation_type=human_evaluation_db.evaluation_type, # type: ignore + variant_ids=[ + str(evaluation_variant.variant_id) + for evaluation_variant in evaluation_variants + ], + ) + + +async def evaluation_db_to_pydantic( + evaluation_db: EvaluationDB, +) -> Evaluation: + variant_name = ( + evaluation_db.variant.variant_name + if evaluation_db.variant.variant_name + else str(evaluation_db.variant_id) + ) + aggregated_results = aggregated_result_of_evaluation_to_pydantic( + evaluation_db.aggregated_results + ) + + return Evaluation( + id=str(evaluation_db.id), + app_id=str(evaluation_db.app_id), + project_id=str(evaluation_db.project_id), + status=evaluation_db.status, + variant_ids=[str(evaluation_db.variant_id)], + variant_revision_ids=[str(evaluation_db.variant_revision_id)], + revisions=[str(evaluation_db.variant_revision.revision)], + variant_names=[variant_name], + testset_id=str(evaluation_db.testset_id), + testset_name=evaluation_db.testset.name, + aggregated_results=aggregated_results, + created_at=str(evaluation_db.created_at), + updated_at=str(evaluation_db.updated_at), + average_cost=evaluation_db.average_cost, + total_cost=evaluation_db.total_cost, + average_latency=evaluation_db.average_latency, + ) + + +async def human_evaluation_db_to_pydantic( + evaluation_db: HumanEvaluationDB, +) -> HumanEvaluation: + evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants( + human_evaluation_id=str(evaluation_db.id) # type: ignore + ) + + revisions = [] + variants_ids = [] + variants_names = [] + variants_revision_ids = [] + for evaluation_variant in evaluation_variants: + variant_name = ( + evaluation_variant.variant.variant_name + if isinstance(evaluation_variant.variant_id, uuid.UUID) + else str(evaluation_variant.variant_id) + ) + variants_names.append(str(variant_name)) + variants_ids.append(str(evaluation_variant.variant_id)) + variant_revision = ( + str(evaluation_variant.variant_revision.revision) + if isinstance(evaluation_variant.variant_revision_id, uuid.UUID) + else " None" + ) + revisions.append(variant_revision) + variants_revision_ids.append(str(evaluation_variant.variant_revision_id)) + + return HumanEvaluation( + id=str(evaluation_db.id), + app_id=str(evaluation_db.app_id), + project_id=str(evaluation_db.project_id), + status=evaluation_db.status, # type: ignore + evaluation_type=evaluation_db.evaluation_type, # type: ignore + variant_ids=variants_ids, + variant_names=variants_names, + testset_id=str(evaluation_db.testset_id), + testset_name=evaluation_db.testset.name, + variants_revision_ids=variants_revision_ids, + revisions=revisions, + created_at=str(evaluation_db.created_at), # type: ignore + updated_at=str(evaluation_db.updated_at), # type: ignore + ) + + +def human_evaluation_scenario_db_to_pydantic( + evaluation_scenario_db: HumanEvaluationScenarioDB, evaluation_id: str +) -> HumanEvaluationScenario: + return HumanEvaluationScenario( + id=str(evaluation_scenario_db.id), + evaluation_id=evaluation_id, + inputs=evaluation_scenario_db.inputs, # type: ignore + outputs=evaluation_scenario_db.outputs, # type: ignore + vote=evaluation_scenario_db.vote, # type: ignore + score=evaluation_scenario_db.score, # type: ignore + correct_answer=evaluation_scenario_db.correct_answer, # type: ignore + is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore + note=evaluation_scenario_db.note or "", # type: ignore + ) + + +def aggregated_result_of_evaluation_to_pydantic( + evaluation_aggregated_results: List, +) -> List[dict]: + transformed_results = [] + for aggregated_result in evaluation_aggregated_results: + evaluator_config_dict = ( + { + "id": str(aggregated_result.evaluator_config.id), + "name": aggregated_result.evaluator_config.name, + "evaluator_key": aggregated_result.evaluator_config.evaluator_key, + "settings_values": aggregated_result.evaluator_config.settings_values, + "created_at": str(aggregated_result.evaluator_config.created_at), + "updated_at": str(aggregated_result.evaluator_config.updated_at), + } + if isinstance(aggregated_result.evaluator_config_id, uuid.UUID) + else None + ) + transformed_results.append( + { + "evaluator_config": ( + {} if evaluator_config_dict is None else evaluator_config_dict + ), + "result": aggregated_result.result, + } + ) + return transformed_results + + +async def evaluation_scenario_db_to_pydantic( + evaluation_scenario_db: EvaluationScenarioDB, evaluation_id: str +) -> EvaluationScenario: + scenario_results = [ + { + "evaluator_config": str(scenario_result.evaluator_config_id), + "result": scenario_result.result, + } + for scenario_result in evaluation_scenario_db.results + ] + return EvaluationScenario( + id=str(evaluation_scenario_db.id), + evaluation_id=evaluation_id, + inputs=[ + EvaluationScenarioInput(**scenario_input) # type: ignore + for scenario_input in evaluation_scenario_db.inputs + ], + outputs=[ + EvaluationScenarioOutput(**scenario_output) # type: ignore + for scenario_output in evaluation_scenario_db.outputs + ], + correct_answers=[ + CorrectAnswer(**correct_answer) # type: ignore + for correct_answer in evaluation_scenario_db.correct_answers + ], + is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore + note=evaluation_scenario_db.note or "", # type: ignore + results=scenario_results, # type: ignore + ) diff --git a/api/ee/src/services/db_manager.py b/api/ee/src/services/db_manager.py index 0d8e36c384..1091c4f736 100644 --- a/api/ee/src/services/db_manager.py +++ b/api/ee/src/services/db_manager.py @@ -1,7 +1,7 @@ import uuid from oss.src.dbs.postgres.shared.engine import engine -from ee.src.models.db_models import DeploymentDB +from ee.src.models.db_models import DeploymentDB_ as DeploymentDB async def create_deployment( diff --git a/api/ee/src/services/db_manager_ee.py b/api/ee/src/services/db_manager_ee.py index f7cb117e65..b101f7b68d 100644 --- a/api/ee/src/services/db_manager_ee.py +++ b/api/ee/src/services/db_manager_ee.py @@ -1,12 +1,14 @@ import uuid -from typing import List, Union, NoReturn, Optional, Tuple +from typing import List, Dict, Union, Any, NoReturn, Optional, Tuple import sendgrid from fastapi import HTTPException +from sendgrid.helpers.mail import Mail +from sqlalchemy import func, asc from sqlalchemy.future import select from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import joinedload, load_only +from sqlalchemy.orm import joinedload, load_only, aliased from sqlalchemy.exc import NoResultFound, MultipleResultsFound from oss.src.utils.logging import get_module_logger @@ -29,14 +31,36 @@ from ee.src.models.db_models import ( ProjectDB, WorkspaceDB, + EvaluationDB, OrganizationDB, ProjectMemberDB, WorkspaceMemberDB, + HumanEvaluationDB, OrganizationMemberDB, + EvaluationScenarioDB, + HumanEvaluationScenarioDB, + HumanEvaluationVariantDB, + EvaluationScenarioResultDB, + EvaluationEvaluatorConfigDB, + EvaluationAggregatedResultDB, ) from oss.src.models.db_models import ( + AppVariantDB, UserDB, + AppDB, + TestsetDB, InvitationDB, + EvaluatorConfigDB, + AppVariantRevisionsDB, +) +from oss.src.models.shared_models import ( + Result, + CorrectAnswer, + AggregatedResult, + EvaluationScenarioResult, + EvaluationScenarioInput, + EvaluationScenarioOutput, + HumanEvaluationScenarioInput, ) from ee.src.services.converters import get_workspace_in_format from ee.src.services.selectors import get_org_default_workspace @@ -645,7 +669,9 @@ async def remove_user_from_workspace( project = await db_manager.get_project_by_id(project_id=project_id) async with engine.core_session() as session: - if not user: # User is an invited user who has not yet created an account and therefore does not have a user object + if ( + not user + ): # User is an invited user who has not yet created an account and therefore does not have a user object pass else: # Ensure that a user can not remove the owner of the workspace @@ -1181,6 +1207,85 @@ async def get_all_workspace_roles() -> List[WorkspaceRole]: return workspace_roles +# async def get_project_id_from_db_entity( +# object_id: str, type: str, project_id: str +# ) -> dict: +# """ +# Get the project id of the object. + +# Args: +# object_id (str): The ID of the object. +# type (str): The type of the object. + +# Returns: +# dict: The project_id of the object. + +# Raises: +# ValueError: If the object type is unknown. +# Exception: If there is an error retrieving the project_id. +# """ +# try: +# if type == "app": +# app = await db_manager.fetch_app_by_id(object_id) +# project_id = app.project_id + +# elif type == "app_variant": +# app_variant = await db_manager.fetch_app_variant_by_id(object_id) +# project_id = app_variant.project_id + +# elif type == "base": +# base = await db_manager.fetch_base_by_id(object_id) +# project_id = base.project_id + +# elif type == "deployment": +# deployment = await db_manager.get_deployment_by_id(object_id) +# project_id = deployment.project_id + +# elif type == "testset": +# testset = await db_manager.fetch_testset_by_id(object_id) +# project_id = testset.project_id + +# elif type == "evaluation": +# evaluation = await db_manager.fetch_evaluation_by_id(object_id) +# project_id = evaluation.project_id + +# elif type == "evaluation_scenario": +# evaluation_scenario = await db_manager.fetch_evaluation_scenario_by_id( +# object_id +# ) +# project_id = evaluation_scenario.project_id + +# elif type == "evaluator_config": +# evaluator_config = await db_manager.fetch_evaluator_config(object_id) +# project_id = evaluator_config.project_id + +# elif type == "human_evaluation": +# human_evaluation = await db_manager.fetch_human_evaluation_by_id(object_id) +# project_id = human_evaluation.project_id + +# elif type == "human_evaluation_scenario": +# human_evaluation_scenario = ( +# await db_manager.fetch_human_evaluation_scenario_by_id(object_id) +# ) +# project_id = human_evaluation_scenario.project_id + +# elif type == "human_evaluation_scenario_by_evaluation_id": +# human_evaluation_scenario_by_evaluation = ( +# await db_manager.fetch_human_evaluation_scenario_by_evaluation_id( +# object_id +# ) +# ) +# project_id = human_evaluation_scenario_by_evaluation.project_id + +# else: +# raise ValueError(f"Unknown object type: {type}") + +# return str(project_id) + +# except Exception as e: +# raise e + + async def add_user_to_organization( organization_id: str, user_id: str, @@ -1272,3 +1377,755 @@ async def add_user_to_project( ) await session.commit() + + +async def fetch_evaluation_status_by_id( + project_id: str, + evaluation_id: str, +) -> Optional[str]: + """Fetch only the status of an evaluation by its ID.""" + assert evaluation_id is not None, "evaluation_id cannot be None" + + async with engine.core_session() as session: + query = ( + select(EvaluationDB) + .filter_by(project_id=project_id, id=uuid.UUID(evaluation_id)) + .options(load_only(EvaluationDB.status)) + ) + + result = await session.execute(query) + evaluation = result.scalars().first() + return evaluation.status if evaluation else None + + +async def fetch_evaluation_by_id( + project_id: str, + evaluation_id: str, +) -> Optional[EvaluationDB]: + """Fetches a evaluation by its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + EvaluationDB: The fetched evaluation, or None if no evaluation was found. + """ + + assert evaluation_id is not None, "evaluation_id cannot be None" + async with engine.core_session() as session: + base_query = select(EvaluationDB).filter_by( + project_id=project_id, + id=uuid.UUID(evaluation_id), + ) + query = base_query.options( + joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore + ) + + result = await session.execute( + query.options( + joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore + joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore + joinedload( + EvaluationDB.aggregated_results.of_type( + EvaluationAggregatedResultDB + ) + ).joinedload(EvaluationAggregatedResultDB.evaluator_config), + ) + ) + evaluation = result.unique().scalars().first() + return evaluation + + +async def list_human_evaluations(app_id: str, project_id: str): + """ + Fetches human evaluations belonging to an App. + + Args: + app_id (str): The application identifier + """ + + async with engine.core_session() as session: + base_query = ( + select(HumanEvaluationDB) + .filter_by(app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id)) + .filter(HumanEvaluationDB.testset_id.isnot(None)) + ) + query = base_query.options( + joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore + ) + + result = await session.execute(query) + human_evaluations = result.scalars().all() + return human_evaluations + + +async def create_human_evaluation( + app: AppDB, + status: str, + evaluation_type: str, + testset_id: str, + variants_ids: List[str], +): + """ + Creates a human evaluation. + + Args: + app (AppDB: The app object + status (str): The status of the evaluation + evaluation_type (str): The evaluation type + testset_id (str): The ID of the evaluation testset + variants_ids (List[str]): The IDs of the variants for the evaluation + """ + + async with engine.core_session() as session: + human_evaluation = HumanEvaluationDB( + app_id=app.id, + project_id=app.project_id, + status=status, + evaluation_type=evaluation_type, + testset_id=testset_id, + ) + + session.add(human_evaluation) + await session.commit() + await session.refresh(human_evaluation, attribute_names=["testset"]) + + # create variants for human evaluation + await create_human_evaluation_variants( + human_evaluation_id=str(human_evaluation.id), + variants_ids=variants_ids, + ) + return human_evaluation + + +async def fetch_human_evaluation_variants(human_evaluation_id: str): + """ + Fetches human evaluation variants. + + Args: + human_evaluation_id (str): The human evaluation ID + + Returns: + The human evaluation variants. + """ + + async with engine.core_session() as session: + base_query = select(HumanEvaluationVariantDB).filter_by( + human_evaluation_id=uuid.UUID(human_evaluation_id) + ) + query = base_query.options( + joinedload(HumanEvaluationVariantDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore + joinedload(HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore + ) + + result = await session.execute(query) + evaluation_variants = result.scalars().all() + return evaluation_variants + + +async def create_human_evaluation_variants( + human_evaluation_id: str, variants_ids: List[str] +): + """ + Creates human evaluation variants. + + Args: + human_evaluation_id (str): The human evaluation identifier + variants_ids (List[str]): The variants identifiers + project_id (str): The project ID + """ + + variants_dict = {} + for variant_id in variants_ids: + variant = await db_manager.fetch_app_variant_by_id(app_variant_id=variant_id) + if variant: + variants_dict[variant_id] = variant + + variants_revisions_dict = {} + for variant_id, variant in variants_dict.items(): + variant_revision = await db_manager.fetch_app_variant_revision_by_variant( + app_variant_id=str(variant.id), project_id=str(variant.project_id), revision=variant.revision # type: ignore + ) + if variant_revision: + variants_revisions_dict[variant_id] = variant_revision + + if set(variants_dict.keys()) != set(variants_revisions_dict.keys()): + raise ValueError("Mismatch between variants and their revisions") + + async with engine.core_session() as session: + for variant_id in variants_ids: + variant = variants_dict[variant_id] + variant_revision = variants_revisions_dict[variant_id] + human_evaluation_variant = HumanEvaluationVariantDB( + human_evaluation_id=uuid.UUID(human_evaluation_id), + variant_id=variant.id, # type: ignore + variant_revision_id=variant_revision.id, # type: ignore + ) + session.add(human_evaluation_variant) + + await session.commit() + + +async def fetch_human_evaluation_by_id( + evaluation_id: str, +) -> Optional[HumanEvaluationDB]: + """ + Fetches a evaluation by its ID. + + Args: + evaluation_id (str): The ID of the evaluation to fetch. + + Returns: + EvaluationDB: The fetched evaluation, or None if no evaluation was found. + """ + + assert evaluation_id is not None, "evaluation_id cannot be None" + async with engine.core_session() as session: + base_query = select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) + query = base_query.options( + joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore + ) + result = await session.execute(query) + evaluation = result.scalars().first() + return evaluation + + +async def update_human_evaluation(evaluation_id: str, values_to_update: dict): + """Updates human evaluation with the specified values. + + Args: + evaluation_id (str): The evaluation ID + values_to_update (dict): The values to update + + Exceptions: + NoResultFound: if human evaluation is not found + """ + + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) + ) + human_evaluation = result.scalars().first() + if not human_evaluation: + raise NoResultFound(f"Human evaluation with id {evaluation_id} not found") + + for key, value in values_to_update.items(): + if hasattr(human_evaluation, key): + setattr(human_evaluation, key, value) + + await session.commit() + await session.refresh(human_evaluation) + + +async def delete_human_evaluation(evaluation_id: str): + """Delete the evaluation by its ID. + + Args: + evaluation_id (str): The ID of the evaluation to delete. + """ + + assert evaluation_id is not None, "evaluation_id cannot be None" + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) + ) + evaluation = result.scalars().first() + if not evaluation: + raise NoResultFound(f"Human evaluation with id {evaluation_id} not found") + + await session.delete(evaluation) + await session.commit() + + +async def create_human_evaluation_scenario( + inputs: List[HumanEvaluationScenarioInput], + project_id: str, + evaluation_id: str, + evaluation_extend: Dict[str, Any], +): + """ + Creates a human evaluation scenario. + + Args: + inputs (List[HumanEvaluationScenarioInput]): The inputs. + evaluation_id (str): The evaluation identifier. + evaluation_extend (Dict[str, any]): An extended required payload for the evaluation scenario. Contains score, vote, and correct_answer. + """ + + async with engine.core_session() as session: + evaluation_scenario = HumanEvaluationScenarioDB( + **evaluation_extend, + project_id=uuid.UUID(project_id), + evaluation_id=uuid.UUID(evaluation_id), + inputs=[input.model_dump() for input in inputs], + outputs=[], + ) + + session.add(evaluation_scenario) + await session.commit() + + +async def update_human_evaluation_scenario( + evaluation_scenario_id: str, values_to_update: dict +): + """Updates human evaluation scenario with the specified values. + + Args: + evaluation_scenario_id (str): The evaluation scenario ID + values_to_update (dict): The values to update + + Exceptions: + NoResultFound: if human evaluation scenario is not found + """ + + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationScenarioDB).filter_by( + id=uuid.UUID(evaluation_scenario_id) + ) + ) + human_evaluation_scenario = result.scalars().first() + if not human_evaluation_scenario: + raise NoResultFound( + f"Human evaluation scenario with id {evaluation_scenario_id} not found" + ) + + for key, value in values_to_update.items(): + if hasattr(human_evaluation_scenario, key): + setattr(human_evaluation_scenario, key, value) + + await session.commit() + await session.refresh(human_evaluation_scenario) + + +async def fetch_human_evaluation_scenarios(evaluation_id: str): + """ + Fetches human evaluation scenarios. + + Args: + evaluation_id (str): The evaluation identifier + + Returns: + The evaluation scenarios. + """ + + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationScenarioDB) + .filter_by(evaluation_id=uuid.UUID(evaluation_id)) + .order_by(asc(HumanEvaluationScenarioDB.created_at)) + ) + evaluation_scenarios = result.scalars().all() + return evaluation_scenarios + + +async def fetch_evaluation_scenarios(evaluation_id: str, project_id: str): + """ + Fetches evaluation scenarios. + + Args: + evaluation_id (str): The evaluation identifier + project_id (str): The ID of the project + + Returns: + The evaluation scenarios. + """ + + async with engine.core_session() as session: + result = await session.execute( + select(EvaluationScenarioDB) + .filter_by( + evaluation_id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id) + ) + .options(joinedload(EvaluationScenarioDB.results)) + ) + evaluation_scenarios = result.unique().scalars().all() + return evaluation_scenarios + + +async def fetch_evaluation_scenario_by_id( + evaluation_scenario_id: str, +) -> Optional[EvaluationScenarioDB]: + """Fetches and evaluation scenario by its ID. + + Args: + evaluation_scenario_id (str): The ID of the evaluation scenario to fetch. + + Returns: + EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. + """ + + assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None" + async with engine.core_session() as session: + result = await session.execute( + select(EvaluationScenarioDB).filter_by(id=uuid.UUID(evaluation_scenario_id)) + ) + evaluation_scenario = result.scalars().first() + return evaluation_scenario + + +async def fetch_human_evaluation_scenario_by_id( + evaluation_scenario_id: str, +) -> Optional[HumanEvaluationScenarioDB]: + """Fetches and evaluation scenario by its ID. + + Args: + evaluation_scenario_id (str): The ID of the evaluation scenario to fetch. + + Returns: + EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. + """ + + assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None" + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationScenarioDB).filter_by( + id=uuid.UUID(evaluation_scenario_id) + ) + ) + evaluation_scenario = result.scalars().first() + return evaluation_scenario + + +async def fetch_human_evaluation_scenario_by_evaluation_id( + evaluation_id: str, +) -> Optional[HumanEvaluationScenarioDB]: + """Fetches and evaluation scenario by its ID. + Args: + evaluation_id (str): The ID of the evaluation object to use in fetching the human evaluation. + Returns: + EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. + """ + + evaluation = await fetch_human_evaluation_by_id(evaluation_id) + async with engine.core_session() as session: + result = await session.execute( + select(HumanEvaluationScenarioDB).filter_by( + evaluation_id=evaluation.id # type: ignore + ) + ) + human_eval_scenario = result.scalars().first() + return human_eval_scenario + + +async def create_new_evaluation( + app: AppDB, + project_id: str, + testset: TestsetDB, + status: Result, + variant: str, + variant_revision: str, +) -> EvaluationDB: + """Create a new evaluation scenario. + Returns: + EvaluationScenarioDB: The created evaluation scenario. + """ + + async with engine.core_session() as session: + evaluation = EvaluationDB( + app_id=app.id, + project_id=uuid.UUID(project_id), + testset_id=testset.id, + status=status.model_dump(), + variant_id=uuid.UUID(variant), + variant_revision_id=uuid.UUID(variant_revision), + ) + + session.add(evaluation) + await session.commit() + await session.refresh( + evaluation, + attribute_names=[ + "testset", + "variant", + "variant_revision", + "aggregated_results", + ], + ) + + return evaluation + + +async def list_evaluations(app_id: str, project_id: str): + """Retrieves evaluations of the specified app from the db. + + Args: + app_id (str): The ID of the app + project_id (str): The ID of the project + """ + + async with engine.core_session() as session: + base_query = select(EvaluationDB).filter_by( + app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id) + ) + query = base_query.options( + joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore + ) + + result = await session.execute( + query.options( + joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore + joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore + joinedload( + EvaluationDB.aggregated_results.of_type( + EvaluationAggregatedResultDB + ) + ).joinedload(EvaluationAggregatedResultDB.evaluator_config), + ) + ) + evaluations = result.unique().scalars().all() + return evaluations + + +async def fetch_evaluations_by_resource( + resource_type: str, project_id: str, resource_ids: List[str] +): + """ + Fetches an evaluations by resource. + + Args: + resource_type (str): The resource type + project_id (str): The ID of the project + resource_ids (List[str]): The resource identifiers + + Returns: + The evaluations by resource. + + Raises: + HTTPException:400 resource_type {type} is not supported + """ + + ids = list(map(uuid.UUID, resource_ids)) + + async with engine.core_session() as session: + if resource_type == "variant": + result_evaluations = await session.execute( + select(EvaluationDB) + .filter( + EvaluationDB.variant_id.in_(ids), + EvaluationDB.project_id == uuid.UUID(project_id), + ) + .options(load_only(EvaluationDB.id)) # type: ignore + ) + result_human_evaluations = await session.execute( + select(HumanEvaluationDB) + .join(HumanEvaluationVariantDB) + .filter( + HumanEvaluationVariantDB.variant_id.in_(ids), + HumanEvaluationDB.project_id == uuid.UUID(project_id), + ) + .options(load_only(HumanEvaluationDB.id)) # type: ignore + ) + res_evaluations = result_evaluations.scalars().all() + res_human_evaluations = result_human_evaluations.scalars().all() + return res_evaluations + res_human_evaluations + + elif resource_type == "testset": + result_evaluations = await session.execute( + select(EvaluationDB) + .filter( + EvaluationDB.testset_id.in_(ids), + EvaluationDB.project_id == uuid.UUID(project_id), + ) + .options(load_only(EvaluationDB.id)) # type: ignore + ) + result_human_evaluations = await session.execute( + select(HumanEvaluationDB) + .filter( + HumanEvaluationDB.testset_id.in_(ids), + HumanEvaluationDB.project_id + == uuid.UUID(project_id), # Fixed to match HumanEvaluationDB + ) + .options(load_only(HumanEvaluationDB.id)) # type: ignore + ) + res_evaluations = result_evaluations.scalars().all() + res_human_evaluations = result_human_evaluations.scalars().all() + return res_evaluations + res_human_evaluations + + elif resource_type == "evaluator_config": + query = ( + select(EvaluationDB) + .join(EvaluationDB.evaluator_configs) + .filter( + EvaluationEvaluatorConfigDB.evaluator_config_id.in_(ids), + EvaluationDB.project_id == uuid.UUID(project_id), + ) + ) + result = await session.execute(query) + res = result.scalars().all() + return res + + raise HTTPException( + status_code=400, + detail=f"resource_type {resource_type} is not supported", + ) + + +async def delete_evaluations(evaluation_ids: List[str]) -> None: + """Delete evaluations based on the ids provided from the db. + + Args: + evaluations_ids (list[str]): The IDs of the evaluation + """ + + async with engine.core_session() as session: + query = select(EvaluationDB).where(EvaluationDB.id.in_(evaluation_ids)) + result = await session.execute(query) + evaluations = result.scalars().all() + for evaluation in evaluations: + await session.delete(evaluation) + await session.commit() + + +async def create_new_evaluation_scenario( + project_id: str, + evaluation_id: str, + variant_id: str, + inputs: List[EvaluationScenarioInput], + outputs: List[EvaluationScenarioOutput], + correct_answers: Optional[List[CorrectAnswer]], + is_pinned: Optional[bool], + note: Optional[str], + results: List[EvaluationScenarioResult], +) -> EvaluationScenarioDB: + """Create a new evaluation scenario. + + Returns: + EvaluationScenarioDB: The created evaluation scenario. + """ + + async with engine.core_session() as session: + evaluation_scenario = EvaluationScenarioDB( + project_id=uuid.UUID(project_id), + evaluation_id=uuid.UUID(evaluation_id), + variant_id=uuid.UUID(variant_id), + inputs=[input.model_dump() for input in inputs], + outputs=[output.model_dump() for output in outputs], + correct_answers=( + [correct_answer.model_dump() for correct_answer in correct_answers] + if correct_answers is not None + else [] + ), + is_pinned=is_pinned, + note=note, + ) + + session.add(evaluation_scenario) + await session.commit() + await session.refresh(evaluation_scenario) + + # create evaluation scenario result + for result in results: + evaluation_scenario_result = EvaluationScenarioResultDB( + evaluation_scenario_id=evaluation_scenario.id, + evaluator_config_id=uuid.UUID(result.evaluator_config), + result=result.result.model_dump(), + ) + + session.add(evaluation_scenario_result) + + await session.commit() # ensures that scenario results insertion is committed + await session.refresh(evaluation_scenario) + + return evaluation_scenario + + +async def update_evaluation_with_aggregated_results( + evaluation_id: str, aggregated_results: List[AggregatedResult] +): + async with engine.core_session() as session: + for result in aggregated_results: + aggregated_result = EvaluationAggregatedResultDB( + evaluation_id=uuid.UUID(evaluation_id), + evaluator_config_id=uuid.UUID(result.evaluator_config), + result=result.result.model_dump(), + ) + session.add(aggregated_result) + + await session.commit() + + +async def fetch_eval_aggregated_results(evaluation_id: str): + """ + Fetches an evaluation aggregated results by evaluation identifier. + + Args: + evaluation_id (str): The evaluation identifier + + Returns: + The evaluation aggregated results by evaluation identifier. + """ + + async with engine.core_session() as session: + base_query = select(EvaluationAggregatedResultDB).filter_by( + evaluation_id=uuid.UUID(evaluation_id) + ) + query = base_query.options( + joinedload( + EvaluationAggregatedResultDB.evaluator_config.of_type(EvaluatorConfigDB) + ).load_only( + EvaluatorConfigDB.id, # type: ignore + EvaluatorConfigDB.name, # type: ignore + EvaluatorConfigDB.evaluator_key, # type: ignore + EvaluatorConfigDB.settings_values, # type: ignore + EvaluatorConfigDB.created_at, # type: ignore + EvaluatorConfigDB.updated_at, # type: ignore + ) + ) + + result = await session.execute(query) + aggregated_results = result.scalars().all() + return aggregated_results + + +async def update_evaluation( + evaluation_id: str, project_id: str, updates: Dict[str, Any] +) -> EvaluationDB: + """ + Update an evaluator configuration in the database with the provided id. + + Arguments: + evaluation_id (str): The ID of the evaluator configuration to be updated. + project_id (str): The ID of the project. + updates (Dict[str, Any]): The updates to apply to the evaluator configuration. + + Returns: + EvaluatorConfigDB: The updated evaluator configuration object. + """ + + async with engine.core_session() as session: + result = await session.execute( + select(EvaluationDB).filter_by( + id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id) + ) + ) + evaluation = result.scalars().first() + for key, value in updates.items(): + if hasattr(evaluation, key): + setattr(evaluation, key, value) + + await session.commit() + await session.refresh(evaluation) + + return evaluation + + +async def check_if_evaluation_contains_failed_evaluation_scenarios( + evaluation_id: str, +) -> bool: + async with engine.core_session() as session: + EvaluationResultAlias = aliased(EvaluationScenarioResultDB) + query = ( + select(func.count(EvaluationScenarioDB.id)) + .join(EvaluationResultAlias, EvaluationScenarioDB.results) + .where( + EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id), + EvaluationResultAlias.result["type"].astext == "error", + ) + ) + + result = await session.execute(query) + count = result.scalar() + if not count: + return False + return count > 0 diff --git a/api/oss/src/services/evaluation_service.py b/api/ee/src/services/evaluation_service.py similarity index 92% rename from api/oss/src/services/evaluation_service.py rename to api/ee/src/services/evaluation_service.py index ba0e146678..9d7b61cb3d 100644 --- a/api/oss/src/services/evaluation_service.py +++ b/api/ee/src/services/evaluation_service.py @@ -3,8 +3,9 @@ from fastapi import HTTPException from oss.src.utils.logging import get_module_logger -from oss.src.services import converters +from ee.src.services import converters from oss.src.services import db_manager +from ee.src.services import db_manager_ee from oss.src.models.api.evaluation_model import ( Evaluation, @@ -17,7 +18,7 @@ NewHumanEvaluation, ) from oss.src.models.db_models import AppDB -from oss.src.models.db_models import ( +from ee.src.models.db_models import ( EvaluationDB, HumanEvaluationDB, HumanEvaluationScenarioDB, @@ -65,13 +66,13 @@ async def prepare_csvdata_and_create_evaluation_scenario( for name in payload_inputs ] except KeyError: - await db_manager.delete_human_evaluation( + await db_manager_ee.delete_human_evaluation( evaluation_id=str(new_evaluation.id) ) msg = f""" Columns in the testset should match the names of the inputs in the variant. Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while - columns in testset are: {[col for col in datum.keys() if col != "correct_answer"]} + columns in testset are: {[col for col in datum.keys() if col != 'correct_answer']} """ raise HTTPException( status_code=400, @@ -91,7 +92,7 @@ async def prepare_csvdata_and_create_evaluation_scenario( **_extend_with_evaluation(evaluation_type), **_extend_with_correct_answer(evaluation_type, datum), } - await db_manager.create_human_evaluation_scenario( + await db_manager_ee.create_human_evaluation_scenario( inputs=list_of_scenario_input, project_id=project_id, evaluation_id=str(new_evaluation.id), @@ -111,7 +112,7 @@ async def update_human_evaluation_service( """ # Update the evaluation - await db_manager.update_human_evaluation( + await db_manager_ee.update_human_evaluation( evaluation_id=str(evaluation.id), values_to_update=update_payload.model_dump() ) @@ -130,7 +131,7 @@ async def fetch_evaluation_scenarios_for_evaluation( List[EvaluationScenario]: A list of evaluation scenarios. """ - evaluation_scenarios = await db_manager.fetch_evaluation_scenarios( + evaluation_scenarios = await db_manager_ee.fetch_evaluation_scenarios( evaluation_id=evaluation_id, project_id=project_id ) return [ @@ -156,7 +157,7 @@ async def fetch_human_evaluation_scenarios_for_evaluation( Returns: List[EvaluationScenario]: A list of evaluation scenarios. """ - human_evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios( + human_evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios( evaluation_id=str(human_evaluation.id) ) eval_scenarios = [ @@ -224,7 +225,7 @@ async def update_human_evaluation_scenario( if "correct_answer" in payload: values_to_update["correct_answer"] = payload["correct_answer"] - await db_manager.update_human_evaluation_scenario( + await db_manager_ee.update_human_evaluation_scenario( evaluation_scenario_id=str(evaluation_scenario_db.id), values_to_update=values_to_update, ) @@ -259,7 +260,7 @@ async def fetch_list_evaluations(app: AppDB, project_id: str) -> List[Evaluation List[Evaluation]: A list of evaluations. """ - evaluations_db = await db_manager.list_evaluations( + evaluations_db = await db_manager_ee.list_evaluations( app_id=str(app.id), project_id=project_id ) return [ @@ -282,7 +283,7 @@ async def fetch_list_human_evaluations( List[Evaluation]: A list of evaluations. """ - evaluations_db = await db_manager.list_human_evaluations( + evaluations_db = await db_manager_ee.list_human_evaluations( app_id=app_id, project_id=project_id ) return [ @@ -318,7 +319,7 @@ async def delete_human_evaluations(evaluation_ids: List[str]) -> None: """ for evaluation_id in evaluation_ids: - await db_manager.delete_human_evaluation(evaluation_id=evaluation_id) + await db_manager_ee.delete_human_evaluation(evaluation_id=evaluation_id) async def delete_evaluations(evaluation_ids: List[str]) -> None: @@ -332,7 +333,7 @@ async def delete_evaluations(evaluation_ids: List[str]) -> None: HTTPException: If evaluation not found or access denied. """ - await db_manager.delete_evaluations(evaluation_ids=evaluation_ids) + await db_manager_ee.delete_evaluations(evaluation_ids=evaluation_ids) async def create_new_human_evaluation(payload: NewHumanEvaluation) -> HumanEvaluationDB: @@ -353,7 +354,7 @@ async def create_new_human_evaluation(payload: NewHumanEvaluation) -> HumanEvalu detail=f"App with id {payload.app_id} does not exist", ) - human_evaluation = await db_manager.create_human_evaluation( + human_evaluation = await db_manager_ee.create_human_evaluation( app=app, status=payload.status, evaluation_type=payload.evaluation_type, @@ -404,13 +405,13 @@ async def create_new_evaluation( variant_revision_id=revision_id ) - assert variant_revision and variant_revision.revision is not None, ( - f"Variant revision with {revision_id} cannot be None" - ) + assert ( + variant_revision and variant_revision.revision is not None + ), f"Variant revision with {revision_id} cannot be None" assert testset is not None, f"Testset with id {testset_id} does not exist" - evaluation_db = await db_manager.create_new_evaluation( + evaluation_db = await db_manager_ee.create_new_evaluation( app=app, project_id=project_id, testset=testset, @@ -424,7 +425,7 @@ async def create_new_evaluation( async def compare_evaluations_scenarios(evaluations_ids: List[str], project_id: str): - evaluation = await db_manager.fetch_evaluation_by_id( + evaluation = await db_manager_ee.fetch_evaluation_by_id( project_id=project_id, evaluation_id=evaluations_ids[0], ) diff --git a/api/oss/src/services/llm_apps_service.py b/api/ee/src/services/llm_apps_service.py similarity index 99% rename from api/oss/src/services/llm_apps_service.py rename to api/ee/src/services/llm_apps_service.py index d5ba69f965..b1d8ab5995 100644 --- a/api/oss/src/services/llm_apps_service.py +++ b/api/ee/src/services/llm_apps_service.py @@ -146,9 +146,9 @@ async def make_payload( payload["ag_config"] = parameters elif param["type"] == "input": item = datapoint.get(param["name"], parameters.get(param["name"], "")) - assert param["name"] != "ag_config", ( - "ag_config should be handled separately" - ) + assert ( + param["name"] != "ag_config" + ), "ag_config should be handled separately" payload[param["name"]] = item # in case of dynamic inputs (as in our templates) diff --git a/api/oss/src/services/results_service.py b/api/ee/src/services/results_service.py similarity index 91% rename from api/oss/src/services/results_service.py rename to api/ee/src/services/results_service.py index cccb32164d..ca52151315 100644 --- a/api/oss/src/services/results_service.py +++ b/api/ee/src/services/results_service.py @@ -1,16 +1,16 @@ import uuid from typing import Sequence, Dict, Any -from oss.src.services import db_manager +from ee.src.services import db_manager_ee from oss.src.models.api.evaluation_model import EvaluationType -from oss.src.models.db_models import ( +from ee.src.models.db_models import ( HumanEvaluationDB, EvaluationScenarioDB, ) async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB): - evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios( + evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios( evaluation_id=str(evaluation.id) ) @@ -18,7 +18,7 @@ async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB): if len(evaluation_scenarios) == 0: return results - evaluation_variants = await db_manager.fetch_human_evaluation_variants( + evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants( human_evaluation_id=str(evaluation.id) ) results["variants"] = [ @@ -99,7 +99,7 @@ async def _compute_stats_for_human_a_b_testing_evaluation( async def fetch_results_for_single_model_test(evaluation_id: str): - evaluation_scenarios = await db_manager.fetch_human_evaluation_scenarios( + evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios( evaluation_id=str(evaluation_id) ) scores_and_counts: Dict[str, Any] = {} diff --git a/api/ee/src/services/utils.py b/api/ee/src/services/utils.py new file mode 100644 index 0000000000..0eaedde4ff --- /dev/null +++ b/api/ee/src/services/utils.py @@ -0,0 +1,21 @@ +# Stdlib Imports +import asyncio +from functools import partial +from typing import Callable, Coroutine + + +async def run_in_separate_thread(func: Callable, *args, **kwargs) -> Coroutine: + """ + Run a synchronous function in a separate thread. + + Args: + func (callable): The synchronous function to be executed. + args (tuple): Positional arguments to be passed to `func`. + kwargs (dict): Keyword arguments to be passed to `func`. + + Returns: + The result of the synchronous function. + """ + + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, partial(func, *args, **kwargs)) diff --git a/api/ee/src/services/workspace_manager.py b/api/ee/src/services/workspace_manager.py index 49fe01b426..5bbebc78e6 100644 --- a/api/ee/src/services/workspace_manager.py +++ b/api/ee/src/services/workspace_manager.py @@ -317,9 +317,9 @@ async def accept_workspace_invitation( invitation = await check_valid_invitation(project_id, user.email, token) if invitation is not None: - assert invitation.role is not None, ( - "Invitation does not have any workspace role" - ) + assert ( + invitation.role is not None + ), "Invitation does not have any workspace role" await db_manager_ee.add_user_to_workspace_and_org( organization, workspace, user, project_id, invitation.role ) diff --git a/api/oss/src/tasks/evaluations/__init__.py b/api/ee/src/tasks/__init__.py similarity index 100% rename from api/oss/src/tasks/evaluations/__init__.py rename to api/ee/src/tasks/__init__.py diff --git a/web/oss/src/components/Evaluations/HumanEvaluationResult.tsx b/api/ee/src/tasks/evaluations/__init__.py similarity index 100% rename from web/oss/src/components/Evaluations/HumanEvaluationResult.tsx rename to api/ee/src/tasks/evaluations/__init__.py diff --git a/api/oss/src/tasks/evaluations/batch.py b/api/ee/src/tasks/evaluations/batch.py similarity index 97% rename from api/oss/src/tasks/evaluations/batch.py rename to api/ee/src/tasks/evaluations/batch.py index 324ed74e49..5fdef15b3c 100644 --- a/api/oss/src/tasks/evaluations/batch.py +++ b/api/ee/src/tasks/evaluations/batch.py @@ -10,9 +10,8 @@ from oss.src.utils.helpers import parse_url, get_slug_from_name_and_id from oss.src.utils.logging import get_module_logger -from oss.src.utils.common import is_ee from oss.src.services.auth_helper import sign_secret_token -from oss.src.services import llm_apps_service +from ee.src.services import llm_apps_service from oss.src.models.shared_models import InvokationResult from oss.src.services.db_manager import ( fetch_app_by_id, @@ -22,9 +21,7 @@ get_project_by_id, ) from oss.src.core.secrets.utils import get_llm_providers_secrets - -if is_ee(): - from ee.src.utils.entitlements import check_entitlements, Counter +from ee.src.utils.entitlements import check_entitlements, Counter from oss.src.dbs.postgres.queries.dbes import ( QueryArtifactDBE, diff --git a/api/oss/src/tasks/evaluations/legacy.py b/api/ee/src/tasks/evaluations/legacy.py similarity index 94% rename from api/oss/src/tasks/evaluations/legacy.py rename to api/ee/src/tasks/evaluations/legacy.py index e034439691..579c6853b9 100644 --- a/api/oss/src/tasks/evaluations/legacy.py +++ b/api/ee/src/tasks/evaluations/legacy.py @@ -9,9 +9,8 @@ from oss.src.utils.helpers import parse_url, get_slug_from_name_and_id from oss.src.utils.logging import get_module_logger -from oss.src.utils.common import is_ee from oss.src.services.auth_helper import sign_secret_token -from oss.src.services import llm_apps_service +from ee.src.services import llm_apps_service from oss.src.models.shared_models import InvokationResult from oss.src.services.db_manager import ( fetch_app_by_id, @@ -22,9 +21,7 @@ get_project_by_id, ) from oss.src.core.secrets.utils import get_llm_providers_secrets - -if is_ee(): - from ee.src.utils.entitlements import check_entitlements, Counter +from ee.src.utils.entitlements import check_entitlements, Counter from oss.src.dbs.postgres.queries.dbes import ( QueryArtifactDBE, @@ -292,9 +289,9 @@ async def setup_evaluation( testset_id=UUID(testset_id), ) - assert testset_response.count != 0, ( - f"Testset with id {testset_id} not found!" - ) + assert ( + testset_response.count != 0 + ), f"Testset with id {testset_id} not found!" testset = testset_response.testset testcases = testset.data.testcases @@ -334,9 +331,9 @@ async def setup_evaluation( query_ref=query_ref, ) - assert query_revision is not None, ( - f"Query revision with id {query_id} not found!" - ) + assert ( + query_revision is not None + ), f"Query revision with id {query_id} not found!" query_revision_ref = Reference( id=query_revision.id, @@ -352,9 +349,9 @@ async def setup_evaluation( ), ) - assert query_variant is not None, ( - f"Query variant with id {query_revision.variant_id} not found!" - ) + assert ( + query_variant is not None + ), f"Query variant with id {query_revision.variant_id} not found!" query_variant_ref = Reference( id=query_variant.id, @@ -374,9 +371,9 @@ async def setup_evaluation( if revision_id: revision = await fetch_app_variant_revision_by_id(revision_id) - assert revision is not None, ( - f"App revision with id {revision_id} not found!" - ) + assert ( + revision is not None + ), f"App revision with id {revision_id} not found!" application_references["revision"] = Reference( id=UUID(str(revision.id)), @@ -384,9 +381,9 @@ async def setup_evaluation( variant = await fetch_app_variant_by_id(str(revision.variant_id)) - assert variant is not None, ( - f"App variant with id {revision.variant_id} not found!" - ) + assert ( + variant is not None + ), f"App variant with id {revision.variant_id} not found!" application_references["variant"] = Reference( id=UUID(str(variant.id)), @@ -402,9 +399,9 @@ async def setup_evaluation( deployment = await get_deployment_by_id(str(revision.base.deployment_id)) - assert deployment is not None, ( - f"Deployment with id {revision.base.deployment_id} not found!" - ) + assert ( + deployment is not None + ), f"Deployment with id {revision.base.deployment_id} not found!" uri = parse_url(url=deployment.uri) @@ -412,9 +409,9 @@ async def setup_evaluation( revision_parameters = revision.config_parameters - assert revision_parameters is not None, ( - f"Revision parameters for variant {variant.id} not found!" - ) + assert ( + revision_parameters is not None + ), f"Revision parameters for variant {variant.id} not found!" invocation_steps_keys.append( get_slug_from_name_and_id(app.app_name, revision.id) @@ -498,18 +495,18 @@ async def setup_evaluation( workflow_ref=workflow_ref, ) - assert workflow_revision is not None, ( - f"Workflow revision with id {workflow_ref.id} not found!" - ) + assert ( + workflow_revision is not None + ), f"Workflow revision with id {workflow_ref.id} not found!" workflow_revision_ref = Reference( id=workflow_revision.id, slug=workflow_revision.slug, ) - evaluator_references[annotation_step_key]["revision"] = ( - workflow_revision_ref - ) + evaluator_references[annotation_step_key][ + "revision" + ] = workflow_revision_ref evaluators[annotation_step_key]["revision"] = workflow_revision @@ -520,9 +517,9 @@ async def setup_evaluation( ), ) - assert workflow_variant is not None, ( - f"Workflow variant with id {workflow_revision.variant_id} not found!" - ) + assert ( + workflow_variant is not None + ), f"Workflow variant with id {workflow_revision.variant_id} not found!" workflow_variant_ref = Reference( id=workflow_variant.id, @@ -893,9 +890,9 @@ def annotate( fetch_app_variant_by_id(str(revision.variant_id)), ) - assert variant is not None, ( - f"App variant with id {revision.variant_id} not found!" - ) + assert ( + variant is not None + ), f"App variant with id {revision.variant_id} not found!" app = loop.run_until_complete( fetch_app_by_id(str(variant.app_id)), @@ -907,9 +904,9 @@ def annotate( get_deployment_by_id(str(revision.base.deployment_id)), ) - assert deployment is not None, ( - f"Deployment with id {revision.base.deployment_id} not found!" - ) + assert ( + deployment is not None + ), f"Deployment with id {revision.base.deployment_id} not found!" uri = parse_url(url=deployment.uri) @@ -917,9 +914,9 @@ def annotate( revision_parameters = revision.config_parameters - assert revision_parameters is not None, ( - f"Revision parameters for variant {variant.id} not found!" - ) + assert ( + revision_parameters is not None + ), f"Revision parameters for variant {variant.id} not found!" # ---------------------------------------------------------------------- # fetch evaluators ----------------------------------------------------- @@ -997,9 +994,9 @@ def annotate( ) ) - assert len(scenarios) == nof_testcases, ( - f"Failed to create evaluation scenarios for run {run_id}!" - ) + assert ( + len(scenarios) == nof_testcases + ), f"Failed to create evaluation scenarios for run {run_id}!" # ---------------------------------------------------------------------- # create input steps --------------------------------------------------- @@ -1025,9 +1022,9 @@ def annotate( ) ) - assert len(steps) == nof_testcases, ( - f"Failed to create evaluation steps for run {run_id}!" - ) + assert ( + len(steps) == nof_testcases + ), f"Failed to create evaluation steps for run {run_id}!" # ---------------------------------------------------------------------- # flatten testcases ---------------------------------------------------- @@ -1101,9 +1098,9 @@ def annotate( ) ) - assert len(steps) == nof_testcases, ( - f"Failed to create evaluation steps for run {run_id}!" - ) + assert ( + len(steps) == nof_testcases + ), f"Failed to create evaluation steps for run {run_id}!" # ---------------------------------------------------------------------- run_has_errors = 0 @@ -1431,9 +1428,9 @@ def annotate( ) ) - assert len(steps) == 1, ( - f"Failed to create evaluation step for scenario with id {scenario.id}!" - ) + assert ( + len(steps) == 1 + ), f"Failed to create evaluation step for scenario with id {scenario.id}!" # ------------------------------------------------------------------ scenario_edit = EvaluationScenarioEdit( @@ -1452,9 +1449,9 @@ def annotate( ) ) - assert scenario, ( - f"Failed to edit evaluation scenario with id {scenario.id}!" - ) + assert ( + scenario + ), f"Failed to edit evaluation scenario with id {scenario.id}!" if scenario_status != EvaluationStatus.FAILURE: try: @@ -1544,14 +1541,13 @@ def annotate( # edit meters to avoid conting failed evaluations -------------------------- if run_status == EvaluationStatus.FAILURE: - if is_ee(): - loop.run_until_complete( - check_entitlements( - organization_id=project.organization_id, - key=Counter.EVALUATIONS, - delta=-1, - ) + loop.run_until_complete( + check_entitlements( + organization_id=project.organization_id, + key=Counter.EVALUATIONS, + delta=-1, ) + ) log.info("[DONE] ", run_id=run_id, project_id=project_id, user_id=user_id) diff --git a/api/oss/src/tasks/evaluations/live.py b/api/ee/src/tasks/evaluations/live.py similarity index 98% rename from api/oss/src/tasks/evaluations/live.py rename to api/ee/src/tasks/evaluations/live.py index 932ae57ebd..43208bd42d 100644 --- a/api/oss/src/tasks/evaluations/live.py +++ b/api/ee/src/tasks/evaluations/live.py @@ -253,9 +253,7 @@ def evaluate( steps = run.data.steps input_steps = { - step.key: step - for step in steps - if step.type == "input" # -------- + step.key: step for step in steps if step.type == "input" # -------- } invocation_steps = { step.key: step for step in steps if step.type == "invocation" @@ -492,9 +490,9 @@ def evaluate( ) ) - assert len(results) == nof_traces, ( - f"Failed to create evaluation results for run {run_id}!" - ) + assert ( + len(results) == nof_traces + ), f"Failed to create evaluation results for run {run_id}!" # ------------------------------------------------------------------ scenario_has_errors: Dict[int, int] = dict() @@ -774,9 +772,9 @@ def evaluate( ) ) - assert len(results) == 1, ( - f"Failed to create evaluation result for scenario with id {scenario.id}!" - ) + assert ( + len(results) == 1 + ), f"Failed to create evaluation result for scenario with id {scenario.id}!" # -------------------------------------------------------------- scenario_edit = EvaluationScenarioEdit( diff --git a/api/ee/src/utils/entitlements.py b/api/ee/src/utils/entitlements.py index 99614caec4..13360aad77 100644 --- a/api/ee/src/utils/entitlements.py +++ b/api/ee/src/utils/entitlements.py @@ -36,25 +36,25 @@ class EntitlementsException(Exception): pass -NOT_ENTITLED_RESPONSE: Callable[[Tracker], JSONResponse] = ( - lambda tracker=None: JSONResponse( - status_code=403, - content={ - "detail": ( - "You have reached your monthly quota limit. Please upgrade your plan to continue." - if tracker == Tracker.COUNTERS +NOT_ENTITLED_RESPONSE: Callable[ + [Tracker], JSONResponse +] = lambda tracker=None: JSONResponse( + status_code=403, + content={ + "detail": ( + "You have reached your monthly quota limit. Please upgrade your plan to continue." + if tracker == Tracker.COUNTERS + else ( + "You have reached your quota limit. Please upgrade your plan to continue." + if tracker == Tracker.GAUGES else ( - "You have reached your quota limit. Please upgrade your plan to continue." - if tracker == Tracker.GAUGES - else ( - "You do not have access to this feature. Please upgrade your plan to continue." - if tracker == Tracker.FLAGS - else "You do not have access to this feature." - ) + "You do not have access to this feature. Please upgrade your plan to continue." + if tracker == Tracker.FLAGS + else "You do not have access to this feature." ) - ), - }, - ) + ) + ), + }, ) @@ -163,7 +163,7 @@ async def check_entitlements( # TODO: remove this line log.info( - f"adjusting: {organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'allow' if check else 'deny '} | {meter.key}: {meter.value - meter.synced} [{meter.value}]" + f"adjusting: {organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'allow' if check else 'deny '} | {meter.key}: {meter.value-meter.synced} [{meter.value}]" ) return check is True, meter, _ diff --git a/api/ee/src/utils/permissions.py b/api/ee/src/utils/permissions.py index 4454a0ac6b..312bcb05b6 100644 --- a/api/ee/src/utils/permissions.py +++ b/api/ee/src/utils/permissions.py @@ -218,17 +218,17 @@ async def check_rbac_permission( bool: True if the user belongs to the workspace and has the specified permission, False otherwise. """ - assert project_id is not None, ( - "Project_ID is required to check object-level permissions" - ) + assert ( + project_id is not None + ), "Project_ID is required to check object-level permissions" # Assert that either permission or role is provided, but not both - assert (permission is not None) or (role is not None), ( - "Either 'permission' or 'role' must be provided, but neither is provided" - ) - assert not ((permission is not None) and (role is not None)), ( - "'permission' and 'role' cannot both be provided at the same time" - ) + assert (permission is not None) or ( + role is not None + ), "Either 'permission' or 'role' must be provided, but neither is provided" + assert not ( + (permission is not None) and (role is not None) + ), "'permission' and 'role' cannot both be provided at the same time" if project_id is not None: project = await db_manager.get_project_by_id(project_id) @@ -281,9 +281,9 @@ async def check_project_has_role_or_permission( if not check: return True - assert role is not None or permission is not None, ( - "Either role or permission must be provided" - ) + assert ( + role is not None or permission is not None + ), "Either role or permission must be provided" project_members = await db_manager_ee.get_project_members( project_id=str(project.id) diff --git a/api/oss/tests/manual/evaluations/live.http b/api/ee/tests/manual/evaluations/live.http similarity index 100% rename from api/oss/tests/manual/evaluations/live.http rename to api/ee/tests/manual/evaluations/live.http diff --git a/api/ee/tests/manual/evaluations/sdk/quickstart.ipynb b/api/ee/tests/manual/evaluations/sdk/quickstart.ipynb new file mode 100644 index 0000000000..24b8099925 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/quickstart.ipynb @@ -0,0 +1,192 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "id": "97d72a52", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "http://144.76.237.122/api\n", + "2025-10-23T13:22:49.147Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - SDK version: 0.51.2 \u001b[38;5;245m[agenta.sdk.agenta_init]\u001b[0m \n", + "2025-10-23T13:22:49.147Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - Host: http://144.76.237.122 \u001b[38;5;245m[agenta.sdk.agenta_init]\u001b[0m \n", + "2025-10-23T13:22:49.148Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - OLTP URL: http://144.76.237.122/api/otlp/v1/traces \u001b[38;5;245m[agenta.sdk.tracing.tracing]\u001b[0m \n" + ] + } + ], + "source": [ + "import asyncio\n", + "import random\n", + "import os\n", + "\n", + "from evaluate import evaluate\n", + "from entities import sync_testset\n", + "\n", + "from utils import display_evaluation_results\n", + "\n", + "import agenta as ag\n", + "\n", + "from agenta.sdk.decorators.running import workflow\n", + "from agenta.sdk.workflows import builtin\n", + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv()\n", + "print(os.getenv(\"AGENTA_API_URL\"))\n", + "ag.init(api_url=os.getenv(\"AGENTA_API_URL\"), api_key=os.getenv(\"AGENTA_API_KEY\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb27d3a5", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import entities" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5d941062", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'_id': '0199bec6-b13c-7ea2-999e-8bc9432f5ee0',\n", + " 'name': 'completion_testset',\n", + " 'created_at': '2025-10-07 13:05:16.604762+00:00',\n", + " 'updated_at': '2025-10-07 13:05:16.604764+00:00'},\n", + " {'_id': '0199ca28-8f74-7d52-8b9f-11a58ea131c7',\n", + " 'name': 'Agenta Questions',\n", + " 'created_at': '2025-10-09 18:07:59.860875+00:00',\n", + " 'updated_at': '2025-10-09 18:07:59.860877+00:00'},\n", + " {'_id': '0199ec05-dcea-7c02-bb46-cde0731b3da5',\n", + " 'name': 'Agenta Questions',\n", + " 'created_at': '2025-10-16 07:57:11.274985+00:00',\n", + " 'updated_at': '2025-10-16 07:57:11.274987+00:00'},\n", + " {'_id': '0199ec08-48f8-7cc1-9850-c47d631c7f05',\n", + " 'name': 'Capitals',\n", + " 'created_at': '2025-10-16 07:59:50.008286+00:00',\n", + " 'updated_at': '2025-10-16 07:59:50.008289+00:00'},\n", + " {'_id': '0199ec0a-2c1c-7be1-bcdb-8599afb38b8e',\n", + " 'name': 'Capitals',\n", + " 'created_at': '2025-10-16 08:01:53.692302+00:00',\n", + " 'updated_at': '2025-10-16 08:01:53.692305+00:00'},\n", + " {'_id': '0199ec27-a638-7762-987d-37fa94b0bf83',\n", + " 'name': 'Agenta Questions',\n", + " 'created_at': '2025-10-16 08:34:05.496950+00:00',\n", + " 'updated_at': '2025-10-16 08:34:05.496952+00:00'},\n", + " {'_id': '019a0cfc-0452-76c2-b0c8-98ab72c444c0',\n", + " 'name': 'chat-testing',\n", + " 'created_at': '2025-10-22 17:33:54.130969+00:00',\n", + " 'updated_at': '2025-10-22 17:56:01.893040+00:00'},\n", + " {'_id': '019a113e-5412-7822-8dce-7f329ba484a4',\n", + " 'name': 'Capitals',\n", + " 'created_at': '2025-10-23 13:24:48.786131+00:00',\n", + " 'updated_at': '2025-10-23 13:24:48.786135+00:00'}]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await entities._get_legacy_testsets()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f5962bdb", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "my_testcases_data = [\n", + " {\"country\": \"Germany\", \"capital\": \"Berlin\"},\n", + " {\"country\": \"France\", \"capital\": \"Paris\"},\n", + " {\"country\": \"Spain\", \"capital\": \"Madrid\"},\n", + " {\"country\": \"Italy\", \"capital\": \"Rome\"},\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a2395572", + "metadata": {}, + "outputs": [], + "source": [ + "my_testset = await sync_testset(\n", + " # testset_slug=\"my_testset\",\n", + " #\n", + " name=\"Capitals\",\n", + " # description=\"A testset of countries and their capitals\",\n", + " #\n", + " data=my_testcases_data,\n", + ")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c8521beb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "UUID('019a113e-5412-7822-8dce-7f329ba484a4')" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "my_testset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d130a4bc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/api/oss/tests/manual/evaluators/human-evaluator.http b/api/ee/tests/manual/evaluators/human-evaluator.http similarity index 100% rename from api/oss/tests/manual/evaluators/human-evaluator.http rename to api/ee/tests/manual/evaluators/human-evaluator.http diff --git a/api/entrypoint.py b/api/entrypoint.py index 36b37b699c..aa50cf2de1 100644 --- a/api/entrypoint.py +++ b/api/entrypoint.py @@ -24,8 +24,6 @@ from oss.src.services.auth_helper import authentication_middleware from oss.src.services.analytics_service import analytics_middleware -from oss.src.routers import evaluation_router, human_evaluation_router - # DBEs from oss.src.dbs.postgres.queries.dbes import ( QueryArtifactDBE, @@ -474,18 +472,6 @@ async def lifespan(*args, **kwargs): tags=["Evaluations"], ) -app.include_router( - evaluation_router.router, - prefix="/evaluations", - tags=["Evaluations"], -) - -app.include_router( - human_evaluation_router.router, - prefix="/human-evaluations", - tags=["Human-Evaluations"], -) - app.include_router( admin_router.router, prefix="/admin", @@ -582,11 +568,7 @@ async def lifespan(*args, **kwargs): # ------------------------------------------------------------------------------ - -import oss.src.tasks.evaluations.live -import oss.src.tasks.evaluations.legacy -import oss.src.tasks.evaluations.batch - - if ee and is_ee(): app = ee.extend_app_schema(app) + + ee.load_tasks() diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py index e5db291b3b..5ed91ee5f9 100644 --- a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py +++ b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py @@ -131,13 +131,13 @@ def add_default_evaluators_to_project(session: Session, project_id: str): } for setting_name, default_value in settings_values.items(): - assert default_value != "", ( - f"Default value for ground truth key '{setting_name}' in Evaluator is empty" - ) + assert ( + default_value != "" + ), f"Default value for ground truth key '{setting_name}' in Evaluator is empty" - assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), ( - f"'name' and 'key' does not exist in the evaluator: {evaluator}" - ) + assert hasattr(evaluator, "name") and hasattr( + evaluator, "key" + ), f"'name' and 'key' does not exist in the evaluator: {evaluator}" evaluator_config = EvaluatorConfigDB( project_id=uuid.UUID(project_id), diff --git a/api/oss/databases/postgres/migrations/core/utils.py b/api/oss/databases/postgres/migrations/core/utils.py index adeb34bd50..9994ce2e54 100644 --- a/api/oss/databases/postgres/migrations/core/utils.py +++ b/api/oss/databases/postgres/migrations/core/utils.py @@ -73,9 +73,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): async with engine.connect() as connection: try: - result = await connection.execute( - text("SELECT version_num FROM alembic_version") - ) # type: ignore + result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore except (asyncpg.exceptions.UndefinedTableError, ProgrammingError): # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception. # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \ @@ -85,9 +83,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): return "alembic_version" migration_heads = [row[0] for row in result.fetchall()] - assert len(migration_heads) == 1, ( - "There can only be one migration head stored in the database." - ) + assert ( + len(migration_heads) == 1 + ), "There can only be one migration head stored in the database." return migration_heads[0] diff --git a/api/oss/databases/postgres/migrations/tracing/utils.py b/api/oss/databases/postgres/migrations/tracing/utils.py index 6966d0e1c8..db61035477 100644 --- a/api/oss/databases/postgres/migrations/tracing/utils.py +++ b/api/oss/databases/postgres/migrations/tracing/utils.py @@ -65,9 +65,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): async with engine.connect() as connection: try: - result = await connection.execute( - text("SELECT version_num FROM alembic_version") - ) # type: ignore + result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore except (asyncpg.exceptions.UndefinedTableError, ProgrammingError): # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception. # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \ @@ -77,9 +75,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine): return "alembic_version" migration_heads = [row[0] for row in result.fetchall()] - assert len(migration_heads) == 1, ( - "There can only be one migration head stored in the database." - ) + assert ( + len(migration_heads) == 1 + ), "There can only be one migration head stored in the database." return migration_heads[0] diff --git a/api/oss/docker/Dockerfile.dev b/api/oss/docker/Dockerfile.dev index 8b500fd96e..647b46c960 100644 --- a/api/oss/docker/Dockerfile.dev +++ b/api/oss/docker/Dockerfile.dev @@ -34,12 +34,12 @@ ENV PYTHONPATH=/sdk:$PYTHONPATH # # -COPY ./oss/src/crons/queries.sh /queries.sh -COPY ./oss/src/crons/queries.txt /etc/cron.d/queries-cron -RUN sed -i -e '$a\' /etc/cron.d/queries-cron -RUN cat -A /etc/cron.d/queries-cron +# +# +# +# -RUN chmod +x /queries.sh \ - && chmod 0644 /etc/cron.d/queries-cron +# +# EXPOSE 8000 diff --git a/api/oss/docker/Dockerfile.gh b/api/oss/docker/Dockerfile.gh index e26bfb7c9a..cf9817f0b0 100644 --- a/api/oss/docker/Dockerfile.gh +++ b/api/oss/docker/Dockerfile.gh @@ -18,11 +18,11 @@ RUN pip install --upgrade pip \ # COPY ./oss /app/oss/ COPY ./entrypoint.py ./pyproject.toml /app/ -# COPY ./sdk /sdk/ +# RUN poetry config virtualenvs.create false \ - && poetry install --no-interaction --no-ansi - # && pip install --force-reinstall --upgrade /sdk/ + && poetry install --no-interaction --no-ansi +# # @@ -34,12 +34,12 @@ RUN poetry config virtualenvs.create false \ # # -COPY ./oss/src/crons/queries.sh /queries.sh -COPY ./oss/src/crons/queries.txt /etc/cron.d/queries-cron -RUN sed -i -e '$a\' /etc/cron.d/queries-cron -RUN cat -A /etc/cron.d/queries-cron +# +# +# +# -RUN chmod +x /queries.sh \ - && chmod 0644 /etc/cron.d/queries-cron +# +# EXPOSE 8000 diff --git a/api/oss/src/apis/fastapi/applications/router.py b/api/oss/src/apis/fastapi/applications/router.py index e179e0f04c..2f03ea8c8b 100644 --- a/api/oss/src/apis/fastapi/applications/router.py +++ b/api/oss/src/apis/fastapi/applications/router.py @@ -104,14 +104,13 @@ async def retrieve_application_revision( *, application_revision_retrieve_request: ApplicationRevisionRetrieveRequest, ): - if is_ee(): - if not await check_action_access( # type: ignore - project_id=request.state.project_id, - user_uid=request.state.user_id, - # - permission=Permission.VIEW_APPLICATIONS, # type: ignore - ): - raise FORBIDDEN_EXCEPTION # type: ignore + if not await check_action_access( # type: ignore + project_id=request.state.project_id, + user_uid=request.state.user_id, + # + permission=Permission.VIEW_APPLICATIONS, # type: ignore + ): + raise FORBIDDEN_EXCEPTION # type: ignore cache_key = { "artifact_ref": application_revision_retrieve_request.application_ref, # type: ignore diff --git a/api/oss/src/apis/fastapi/evaluators/router.py b/api/oss/src/apis/fastapi/evaluators/router.py index 4ee7f5cbd8..4461df9072 100644 --- a/api/oss/src/apis/fastapi/evaluators/router.py +++ b/api/oss/src/apis/fastapi/evaluators/router.py @@ -748,14 +748,13 @@ async def retrieve_evaluator_revision( *, evaluator_revision_retrieve_request: EvaluatorRevisionRetrieveRequest, ) -> EvaluatorRevisionResponse: - if is_ee(): - if not await check_action_access( # type: ignore - project_id=request.state.project_id, - user_uid=request.state.user_id, - # - permission=Permission.VIEW_EVALUATORS, # type: ignore - ): - raise FORBIDDEN_EXCEPTION # type: ignore + if not await check_action_access( # type: ignore + project_id=request.state.project_id, + user_uid=request.state.user_id, + # + permission=Permission.VIEW_EVALUATORS, # type: ignore + ): + raise FORBIDDEN_EXCEPTION # type: ignore cache_key = { "artifact_ref": evaluator_revision_retrieve_request.evaluator_ref, # type: ignore diff --git a/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py b/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py index 7203208ede..f9664d80ba 100644 --- a/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py +++ b/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py @@ -40,7 +40,9 @@ def process(self, attributes: CanonicalAttributes, features: SpanFeatures) -> No # Exceptions - Rebuilt from attributes.events to match previous output structure exception_events = attributes.get_events_by_name("exception") - if exception_events: # Process the first one if multiple exist, or adapt if all should be processed + if ( + exception_events + ): # Process the first one if multiple exist, or adapt if all should be processed event_data = exception_events[0] # Ensure timestamp is decoded and formatted as previously (likely to string by decode_value if it's datetime) decoded_ts = decode_value(event_data.timestamp) diff --git a/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py b/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py index f71b18fa72..c1106e118d 100644 --- a/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py +++ b/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py @@ -95,9 +95,12 @@ def process(self, bag: CanonicalAttributes, features: SpanFeatures) -> None: and transformed_attributes.get("ag.metrics.unit.tokens.completion") and not transformed_attributes.get("ag.metrics.unit.tokens.total") ): - transformed_attributes["ag.metrics.unit.tokens.total"] = ( - transformed_attributes.get("ag.metrics.unit.tokens.prompt") - + transformed_attributes.get("ag.metrics.unit.tokens.completion") + transformed_attributes[ + "ag.metrics.unit.tokens.total" + ] = transformed_attributes.get( + "ag.metrics.unit.tokens.prompt" + ) + transformed_attributes.get( + "ag.metrics.unit.tokens.completion" ) if not has_logfire_data: return diff --git a/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py b/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py index bbbc95d9a9..899bf0765c 100644 --- a/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py +++ b/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py @@ -3,7 +3,6 @@ # NO CHECKED-IN PROTOBUF GENCODE # source: traces.proto """Generated protocol buffer code.""" - from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database diff --git a/api/oss/src/apis/fastapi/shared/utils.py b/api/oss/src/apis/fastapi/shared/utils.py index 6982ddb8f8..b9accb73b9 100644 --- a/api/oss/src/apis/fastapi/shared/utils.py +++ b/api/oss/src/apis/fastapi/shared/utils.py @@ -12,11 +12,7 @@ def parse_metadata( flags: Optional[str] = None, tags: Optional[str] = None, meta: Optional[str] = None, -) -> Tuple[ - Optional[Flags], - Optional[Tags], - Optional[Meta], -]: +) -> Tuple[Optional[Flags], Optional[Tags], Optional[Meta],]: _flags = None try: _flags = loads(flags) if flags else None diff --git a/api/oss/src/apis/fastapi/testsets/router.py b/api/oss/src/apis/fastapi/testsets/router.py index 3c8ba795c4..9fcd8bcab6 100644 --- a/api/oss/src/apis/fastapi/testsets/router.py +++ b/api/oss/src/apis/fastapi/testsets/router.py @@ -1162,23 +1162,19 @@ async def fetch_simple_testset( if testset is None: return SimpleTestsetResponse() - testset_variant = ( - await self.simple_testsets_service.testsets_service.fetch_testset_variant( - project_id=UUID(request.state.project_id), - # - testset_ref=Reference(id=testset.id), - ) + testset_variant = await self.simple_testsets_service.testsets_service.fetch_testset_variant( + project_id=UUID(request.state.project_id), + # + testset_ref=Reference(id=testset.id), ) if testset_variant is None: return SimpleTestsetResponse() - testset_revision = ( - await self.simple_testsets_service.testsets_service.fetch_testset_revision( - project_id=UUID(request.state.project_id), - # - testset_variant_ref=Reference(id=testset_variant.id), - ) + testset_revision = await self.simple_testsets_service.testsets_service.fetch_testset_revision( + project_id=UUID(request.state.project_id), + # + testset_variant_ref=Reference(id=testset_variant.id), ) if testset_revision is None: diff --git a/api/oss/src/core/annotations/service.py b/api/oss/src/core/annotations/service.py index 3d9d734955..c7229ffbdc 100644 --- a/api/oss/src/core/annotations/service.py +++ b/api/oss/src/core/annotations/service.py @@ -146,12 +146,10 @@ async def create( ) if simple_evaluator: - evaluator_revision = ( - await self.evaluators_service.fetch_evaluator_revision( - project_id=project_id, - # - evaluator_ref=Reference(id=simple_evaluator.id), - ) + evaluator_revision = await self.evaluators_service.fetch_evaluator_revision( + project_id=project_id, + # + evaluator_ref=Reference(id=simple_evaluator.id), ) elif evaluator_revision.evaluator_id: simple_evaluator = await self.simple_evaluators_service.fetch( @@ -335,12 +333,10 @@ async def edit( ) if simple_evaluator: - evaluator_revision = ( - await self.evaluators_service.fetch_evaluator_revision( - project_id=project_id, - # - evaluator_ref=Reference(id=simple_evaluator.id), - ) + evaluator_revision = await self.evaluators_service.fetch_evaluator_revision( + project_id=project_id, + # + evaluator_ref=Reference(id=simple_evaluator.id), ) if not evaluator_revision or not evaluator_revision.data: diff --git a/api/oss/src/core/evaluations/service.py b/api/oss/src/core/evaluations/service.py index 5aa820cec6..a6fff35361 100644 --- a/api/oss/src/core/evaluations/service.py +++ b/api/oss/src/core/evaluations/service.py @@ -6,6 +6,7 @@ from celery import current_app as celery_dispatch +from oss.src.utils.common import is_ee from oss.src.utils.logging import get_module_logger from oss.src.core.shared.dtos import Reference, Windowing, Tags, Meta, Data @@ -178,7 +179,7 @@ async def refresh_runs( try: log.info( - "[LIVE] Dispatching...", + "[LIVE]", project_id=project_id, run_id=run.id, # @@ -186,24 +187,19 @@ async def refresh_runs( oldest=oldest, ) - celery_dispatch.send_task( # type: ignore - "src.tasks.evaluations.live.evaluate", - kwargs=dict( - project_id=project_id, - user_id=user_id, - # - run_id=run.id, - # - newest=newest, - oldest=oldest, - ), - ) - - log.info( - "[LIVE] Dispatched. ", - project_id=project_id, - run_id=run.id, - ) + if is_ee(): + celery_dispatch.send_task( # type: ignore + "src.tasks.evaluations.live.evaluate", + kwargs=dict( + project_id=project_id, + user_id=user_id, + # + run_id=run.id, + # + newest=newest, + oldest=oldest, + ), + ) except Exception as e: # pylint: disable=broad-exception-caught log.error(f"[LIVE] Error refreshing run {run.id}: {e}", exc_info=True) @@ -1561,26 +1557,29 @@ async def start( return None if _evaluation.data.query_steps: - celery_dispatch.send_task( # type: ignore - "src.tasks.evaluations.batch.evaluate_queries", - kwargs=dict( - project_id=project_id, - user_id=user_id, - # - run_id=run.id, - ), - ) + if is_ee(): + celery_dispatch.send_task( # type: ignore + "src.tasks.evaluations.batch.evaluate_queries", + kwargs=dict( + project_id=project_id, + user_id=user_id, + # + run_id=run.id, + ), + ) elif _evaluation.data.testset_steps: - celery_dispatch.send_task( # type: ignore - "src.tasks.evaluations.batch.evaluate_testsets", - kwargs=dict( - project_id=project_id, - user_id=user_id, - # - run_id=run.id, - ), - ) + if is_ee(): + # TODO: Fix typing ? + celery_dispatch.send_task( # type: ignore + "src.tasks.evaluations.batch.evaluate_testsets", + kwargs=dict( + project_id=project_id, + user_id=user_id, + # + run_id=run.id, + ), + ) return _evaluation @@ -1804,12 +1803,10 @@ async def _make_evaluation_run_data( ) return None - testset_revision = ( - await self.testsets_service.fetch_testset_revision( - project_id=project_id, - # - testset_ref=testset_ref, - ) + testset_revision = await self.testsets_service.fetch_testset_revision( + project_id=project_id, + # + testset_ref=testset_ref, ) if ( @@ -2037,12 +2034,10 @@ async def _make_evaluation_run_data( ) return None - evaluator_revision = ( - await self.evaluators_service.fetch_evaluator_revision( - project_id=project_id, - # - evaluator_ref=evaluator_ref, - ) + evaluator_revision = await self.evaluators_service.fetch_evaluator_revision( + project_id=project_id, + # + evaluator_ref=evaluator_ref, ) if ( @@ -2062,12 +2057,10 @@ async def _make_evaluation_run_data( for evaluator_revision_id, origin in (evaluator_steps or {}).items(): evaluator_revision_ref = Reference(id=evaluator_revision_id) - evaluator_revision = ( - await self.evaluators_service.fetch_evaluator_revision( - project_id=project_id, - # - evaluator_revision_ref=evaluator_revision_ref, - ) + evaluator_revision = await self.evaluators_service.fetch_evaluator_revision( + project_id=project_id, + # + evaluator_revision_ref=evaluator_revision_ref, ) if not evaluator_revision or not evaluator_revision.slug: @@ -2086,12 +2079,10 @@ async def _make_evaluation_run_data( evaluator_variant_ref = Reference(id=evaluator_revision.variant_id) - evaluator_variant = ( - await self.evaluators_service.fetch_evaluator_variant( - project_id=project_id, - # - evaluator_variant_ref=evaluator_variant_ref, - ) + evaluator_variant = await self.evaluators_service.fetch_evaluator_variant( + project_id=project_id, + # + evaluator_variant_ref=evaluator_variant_ref, ) if not evaluator_variant: diff --git a/api/oss/src/core/testsets/service.py b/api/oss/src/core/testsets/service.py index 6bc781ef85..0d34def73d 100644 --- a/api/oss/src/core/testsets/service.py +++ b/api/oss/src/core/testsets/service.py @@ -490,12 +490,10 @@ async def create_testset_revision( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) return testset_revision @@ -558,12 +556,10 @@ async def fetch_testset_revision( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) return testset_revision @@ -599,12 +595,10 @@ async def edit_testset_revision( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) return testset_revision @@ -697,12 +691,10 @@ async def query_testset_revisions( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) testset_revisions.append(testset_revision) @@ -758,12 +750,10 @@ async def commit_testset_revision( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) return testset_revision @@ -795,12 +785,10 @@ async def log_testset_revisions( ) if testset_revision.data and testset_revision.data.testcase_ids: - testset_revision.data.testcases = ( - await self.testcases_service.fetch_testcases( - project_id=project_id, - # - testcase_ids=testset_revision.data.testcase_ids, - ) + testset_revision.data.testcases = await self.testcases_service.fetch_testcases( + project_id=project_id, + # + testcase_ids=testset_revision.data.testcase_ids, ) testset_revisions.append(testset_revision) diff --git a/api/oss/src/core/workflows/dtos.py b/api/oss/src/core/workflows/dtos.py index 880da2a44a..c6b71caccf 100644 --- a/api/oss/src/core/workflows/dtos.py +++ b/api/oss/src/core/workflows/dtos.py @@ -172,9 +172,9 @@ class WorkflowServiceVersion(BaseModel): class WorkflowServiceInterface(WorkflowServiceVersion): uri: Optional[str] = None # str (Enum) w/ validation url: Optional[str] = None # str w/ validation - headers: Optional[Dict[str, Union[Reference, str]]] = ( - None # either hardcoded or a secret - ) + headers: Optional[ + Dict[str, Union[Reference, str]] + ] = None # either hardcoded or a secret # handler: Optional[Callable] = None schemas: Optional[Dict[str, Schema]] = None # json-schema instead of pydantic diff --git a/api/oss/src/core/workflows/service.py b/api/oss/src/core/workflows/service.py index 866fddf38e..18e4835b97 100644 --- a/api/oss/src/core/workflows/service.py +++ b/api/oss/src/core/workflows/service.py @@ -732,10 +732,7 @@ async def invoke_workflow( request: WorkflowServiceRequest, # **kwargs, - ) -> Union[ - WorkflowServiceBatchResponse, - WorkflowServiceStreamResponse, - ]: + ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse,]: project = await get_project_by_id( project_id=str(project_id), ) diff --git a/api/oss/src/dbs/postgres/git/dao.py b/api/oss/src/dbs/postgres/git/dao.py index 8d275d6dae..f1bb94f45f 100644 --- a/api/oss/src/dbs/postgres/git/dao.py +++ b/api/oss/src/dbs/postgres/git/dao.py @@ -342,9 +342,7 @@ async def query_artifacts( if artifact_query.description: stmt = stmt.filter( - self.ArtifactDBE.description.ilike( - f"%{artifact_query.description}%" - ), # type: ignore + self.ArtifactDBE.description.ilike(f"%{artifact_query.description}%"), # type: ignore ) if include_archived is not True: @@ -1175,9 +1173,7 @@ async def query_revisions( if revision_query.description: stmt = stmt.filter( - self.RevisionDBE.description.ilike( - f"%{revision_query.description}%" - ) # type: ignore + self.RevisionDBE.description.ilike(f"%{revision_query.description}%") # type: ignore ) if include_archived is not True: diff --git a/api/oss/src/models/converters.py b/api/oss/src/models/converters.py index b7e9080dbf..768fded064 100644 --- a/api/oss/src/models/converters.py +++ b/api/oss/src/models/converters.py @@ -143,8 +143,7 @@ async def environment_db_to_output( ) -> EnvironmentOutput: deployed_app_variant_id = ( str(environment_db.deployed_app_variant_id) - if environment_db.deployed_app_variant_id - and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore + if environment_db.deployed_app_variant_id and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore else None ) if deployed_app_variant_id: diff --git a/api/oss/src/models/db/models.py b/api/oss/src/models/db/models.py index da7a99c099..4e78e945ac 100644 --- a/api/oss/src/models/db/models.py +++ b/api/oss/src/models/db/models.py @@ -58,14 +58,4 @@ ] if is_ee(): - models.extend( - [ - OrganizationDB, - WorkspaceDB, - APIKeyDB, - InvitationDB, - OrganizationMemberDB, - ProjectMemberDB, - WorkspaceMemberDB, - ] - ) # type: ignore + models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, OrganizationMemberDB, ProjectMemberDB, WorkspaceMemberDB]) # type: ignore diff --git a/api/oss/src/models/db_models.py b/api/oss/src/models/db_models.py index 867cb30156..3afa51acdb 100644 --- a/api/oss/src/models/db_models.py +++ b/api/oss/src/models/db_models.py @@ -89,6 +89,7 @@ class WorkspaceDB(Base): ) +# KEEP in oss/ class UserDB(Base): __tablename__ = "users" @@ -110,6 +111,7 @@ class UserDB(Base): ) +# KEEP in oss/ class ProjectDB(Base): __tablename__ = "projects" @@ -153,6 +155,7 @@ class ProjectDB(Base): testset = relationship("TestsetDB", cascade=CASCADE_ALL_DELETE, backref="project") +# KEEP in oss/ class AppDB(Base): __tablename__ = "app_db" @@ -187,6 +190,7 @@ class AppDB(Base): ) +# KEEP in oss/ class DeploymentDB(Base): __tablename__ = "deployments" @@ -213,6 +217,7 @@ class DeploymentDB(Base): app = relationship("AppDB", back_populates="deployment") +# KEEP in oss/ class VariantBaseDB(Base): __tablename__ = "bases" @@ -243,6 +248,7 @@ class VariantBaseDB(Base): project = relationship("oss.src.models.db_models.ProjectDB") +# KEEP in oss/ class AppVariantDB(Base): __tablename__ = "app_variants" @@ -287,6 +293,7 @@ class AppVariantDB(Base): ) +# KEEP in oss/ class AppVariantRevisionsDB(Base): __tablename__ = "app_variant_revisions" @@ -329,6 +336,7 @@ def get_config(self) -> dict: return {"config_name": self.config_name, "parameters": self.config_parameters} +# KEEP in oss/ class AppEnvironmentDB(Base): __tablename__ = "environments" @@ -366,6 +374,7 @@ class AppEnvironmentDB(Base): deployed_app_variant_revision = relationship("AppVariantRevisionsDB") +# KEEP in oss/ class AppEnvironmentRevisionDB(Base): __tablename__ = "environments_revisions" @@ -399,6 +408,7 @@ class AppEnvironmentRevisionDB(Base): modified_by = relationship("UserDB") +# KEEP in oss/ class TestsetDB(Base): __tablename__ = "testsets" @@ -422,6 +432,7 @@ class TestsetDB(Base): ) +# KEEP in oss/ class EvaluatorConfigDB(Base): __tablename__ = "auto_evaluator_configs" @@ -447,6 +458,7 @@ class EvaluatorConfigDB(Base): ) +# KEEP in oss/ or KILL class IDsMappingDB(Base): __tablename__ = "ids_mapping" @@ -519,267 +531,3 @@ class APIKeyDB(Base): project = relationship( "oss.src.models.db_models.ProjectDB", backref="api_key_project" ) - - -class HumanEvaluationVariantDB(Base): - __tablename__ = "human_evaluation_variants" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - human_evaluation_id = Column( - UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE") - ) - variant_id = Column( - UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") - ) - variant_revision_id = Column( - UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL") - ) - - variant = relationship("AppVariantDB", backref="evaluation_variant") - variant_revision = relationship( - "AppVariantRevisionsDB", backref="evaluation_variant_revision" - ) - - -class HumanEvaluationDB(Base): - __tablename__ = "human_evaluations" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE")) - project_id = Column( - UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") - ) - status = Column(String) - evaluation_type = Column(String) - testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id")) - created_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - updated_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - - testset = relationship("TestsetDB") - evaluation_variant = relationship( - "HumanEvaluationVariantDB", - cascade=CASCADE_ALL_DELETE, - backref="human_evaluation", - ) - evaluation_scenario = relationship( - "HumanEvaluationScenarioDB", - cascade=CASCADE_ALL_DELETE, - backref="evaluation_scenario", - ) - - -class HumanEvaluationScenarioDB(Base): - __tablename__ = "human_evaluations_scenarios" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - project_id = Column( - UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") - ) - evaluation_id = Column( - UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE") - ) - inputs = Column( - mutable_json_type(dbtype=JSONB, nested=True) - ) # List of HumanEvaluationScenarioInput - outputs = Column( - mutable_json_type(dbtype=JSONB, nested=True) - ) # List of HumanEvaluationScenarioOutput - vote = Column(String) - score = Column(String) - correct_answer = Column(String) - created_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - updated_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - is_pinned = Column(Boolean) - note = Column(String) - - -class EvaluationAggregatedResultDB(Base): - __tablename__ = "auto_evaluation_aggregated_results" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - evaluation_id = Column( - UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE") - ) - evaluator_config_id = Column( - UUID(as_uuid=True), - ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), - ) - result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - - evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config") - - -class EvaluationScenarioResultDB(Base): - __tablename__ = "auto_evaluation_scenario_results" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - evaluation_scenario_id = Column( - UUID(as_uuid=True), - ForeignKey("auto_evaluation_scenarios.id", ondelete="CASCADE"), - ) - evaluator_config_id = Column( - UUID(as_uuid=True), - ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), - ) - result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - - -class EvaluationDB(Base): - __tablename__ = "auto_evaluations" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE")) - project_id = Column( - UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") - ) - status = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - testset_id = Column( - UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL") - ) - variant_id = Column( - UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") - ) - variant_revision_id = Column( - UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL") - ) - average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result - created_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - updated_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - - project = relationship("oss.src.models.db_models.ProjectDB") - testset = relationship("TestsetDB") - variant = relationship("AppVariantDB") - variant_revision = relationship("AppVariantRevisionsDB") - aggregated_results = relationship( - "EvaluationAggregatedResultDB", - cascade=CASCADE_ALL_DELETE, - backref="evaluation", - ) - evaluation_scenarios = relationship( - "EvaluationScenarioDB", cascade=CASCADE_ALL_DELETE, backref="evaluation" - ) - evaluator_configs = relationship( - "EvaluationEvaluatorConfigDB", - cascade=CASCADE_ALL_DELETE, - backref="evaluation", - ) - - -class EvaluationEvaluatorConfigDB(Base): - __tablename__ = "auto_evaluation_evaluator_configs" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - evaluation_id = Column( - UUID(as_uuid=True), - ForeignKey("auto_evaluations.id", ondelete="CASCADE"), - primary_key=True, - ) - evaluator_config_id = Column( - UUID(as_uuid=True), - ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"), - primary_key=True, - ) - - -class EvaluationScenarioDB(Base): - __tablename__ = "auto_evaluation_scenarios" - - id = Column( - UUID(as_uuid=True), - primary_key=True, - default=uuid.uuid7, - unique=True, - nullable=False, - ) - project_id = Column( - UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE") - ) - evaluation_id = Column( - UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE") - ) - variant_id = Column( - UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL") - ) - inputs = Column( - mutable_json_type(dbtype=JSONB, nested=True) - ) # List of EvaluationScenarioInput - outputs = Column( - mutable_json_type(dbtype=JSONB, nested=True) - ) # List of EvaluationScenarioOutput - correct_answers = Column( - mutable_json_type(dbtype=JSONB, nested=True) - ) # List of CorrectAnswer - is_pinned = Column(Boolean) - note = Column(String) - latency = Column(Integer) - cost = Column(Integer) - created_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - updated_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) - ) - - project = relationship("oss.src.models.db_models.ProjectDB") - variant = relationship("AppVariantDB") - results = relationship( - "EvaluationScenarioResultDB", - cascade=CASCADE_ALL_DELETE, - backref="evaluation_scenario", - ) diff --git a/api/oss/src/models/deprecated_models.py b/api/oss/src/models/deprecated_models.py index 85ac4be0c4..070536fa13 100644 --- a/api/oss/src/models/deprecated_models.py +++ b/api/oss/src/models/deprecated_models.py @@ -442,7 +442,9 @@ class DeprecatedEvaluationDB(DeprecatedBase): ) average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result - average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result + average_latency = Column( + mutable_json_type(dbtype=JSONB, nested=True) + ) # type: ignore # Result created_at = Column( DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) ) diff --git a/api/oss/src/resources/evaluators/evaluators.py b/api/oss/src/resources/evaluators/evaluators.py index cbca48d4fc..53a2d48542 100644 --- a/api/oss/src/resources/evaluators/evaluators.py +++ b/api/oss/src/resources/evaluators/evaluators.py @@ -229,12 +229,12 @@ "description": "Extract information from the user's response.", "type": "object", "properties": { - "score": { + "correctness": { "type": "boolean", "description": "The grade results", } }, - "required": ["score"], + "required": ["correctness"], "strict": True, }, }, @@ -264,12 +264,12 @@ "description": "Extract information from the user's response.", "type": "object", "properties": { - "score": { + "correctness": { "type": "boolean", "description": "The hallucination detection result", } }, - "required": ["score"], + "required": ["correctness"], "strict": True, }, }, @@ -339,12 +339,12 @@ "description": "Extract information from the user's response.", "type": "object", "properties": { - "score": { + "correctness": { "type": "boolean", "description": "The grade results", } }, - "required": ["score"], + "required": ["correctness"], "strict": True, }, }, diff --git a/api/oss/src/routers/app_router.py b/api/oss/src/routers/app_router.py index d0a60affca..52e724ee83 100644 --- a/api/oss/src/routers/app_router.py +++ b/api/oss/src/routers/app_router.py @@ -389,9 +389,7 @@ async def list_apps( """ if is_ee(): - user_org_workspace_data = await get_user_org_and_workspace_id( - request.state.user_id - ) # type: ignore + user_org_workspace_data = await get_user_org_and_workspace_id(request.state.user_id) # type: ignore has_permission = await check_rbac_permission( # type: ignore user_org_workspace_data=user_org_workspace_data, project_id=request.state.project_id, diff --git a/api/oss/src/routers/configs_router.py b/api/oss/src/routers/configs_router.py index 2271fc41bc..8c432b31d6 100644 --- a/api/oss/src/routers/configs_router.py +++ b/api/oss/src/routers/configs_router.py @@ -94,9 +94,9 @@ async def get_config( "parameters": found_variant.config_parameters, } - assert "name" and "parameters" in config, ( - "'name' and 'parameters' not found in configuration" - ) + assert ( + "name" and "parameters" in config + ), "'name' and 'parameters' not found in configuration" return GetConfigResponse( config_name=config["name"], # type: ignore current_version=variant_revision, # type: ignore diff --git a/api/oss/src/routers/user_profile.py b/api/oss/src/routers/user_profile.py index 0b082467b6..67a66f21ae 100644 --- a/api/oss/src/routers/user_profile.py +++ b/api/oss/src/routers/user_profile.py @@ -39,9 +39,9 @@ async def user_profile(request: Request): user = await db_manager.get_user_with_id(user_id=request.state.user_id) - assert user is not None, ( - "User not found. Please ensure that the user_id is specified correctly." - ) + assert ( + user is not None + ), "User not found. Please ensure that the user_id is specified correctly." user = User( id=str(user.id), diff --git a/api/oss/src/routers/variants_router.py b/api/oss/src/routers/variants_router.py index 405910d540..8edeef5d3c 100644 --- a/api/oss/src/routers/variants_router.py +++ b/api/oss/src/routers/variants_router.py @@ -393,9 +393,9 @@ async def get_variant_revision( revision_number: int, request: Request, ): - assert variant_id != "undefined", ( - "Variant id is required to retrieve variant revision" - ) + assert ( + variant_id != "undefined" + ), "Variant id is required to retrieve variant revision" app_variant = await db_manager.fetch_app_variant_by_id(app_variant_id=variant_id) if is_ee(): diff --git a/api/oss/src/services/app_manager.py b/api/oss/src/services/app_manager.py index 7128b2f54e..7e11084485 100644 --- a/api/oss/src/services/app_manager.py +++ b/api/oss/src/services/app_manager.py @@ -42,7 +42,7 @@ async def get_appdb_str_by_id(object_id: str, object_type: str) -> str: raise db_manager.NoResultFound(f"Variant with id {object_id} not found") return str(app_variant_db.app_id) elif object_type == "evaluation": - evaluation_db = await db_manager.fetch_evaluation_by_id( + evaluation_db = await db_manager_ee.fetch_evaluation_by_id( project_id=project_id, evaluation_id=object_id, ) @@ -135,12 +135,12 @@ async def terminate_and_remove_app_variant( Exception: Any other exception raised during the operation. """ - assert app_variant_id or app_variant_db, ( - "Either app_variant_id or app_variant_db must be provided" - ) - assert not (app_variant_id and app_variant_db), ( - "Only one of app_variant_id or app_variant_db must be provided" - ) + assert ( + app_variant_id or app_variant_db + ), "Either app_variant_id or app_variant_db must be provided" + assert not ( + app_variant_id and app_variant_db + ), "Only one of app_variant_id or app_variant_db must be provided" if app_variant_id: app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id) diff --git a/api/oss/src/services/auth_helper.py b/api/oss/src/services/auth_helper.py index fb1bd08bd2..9f49188cb3 100644 --- a/api/oss/src/services/auth_helper.py +++ b/api/oss/src/services/auth_helper.py @@ -483,9 +483,9 @@ async def verify_bearer_token( else: workspaces = await db_manager.get_workspaces() - assert len(workspaces) == 1, ( - "You can only have a single workspace in OSS." - ) + assert ( + len(workspaces) == 1 + ), "You can only have a single workspace in OSS." workspace_id = str(workspaces[0].id) project_id = await db_manager.get_default_project_id_from_workspace( diff --git a/api/oss/src/services/converters.py b/api/oss/src/services/converters.py deleted file mode 100644 index ad9cb64169..0000000000 --- a/api/oss/src/services/converters.py +++ /dev/null @@ -1,191 +0,0 @@ -import uuid -from typing import List, Dict, Any -from datetime import datetime, timezone - -from oss.src.services import db_manager -from oss.src.models.api.evaluation_model import ( - CorrectAnswer, - Evaluation, - HumanEvaluation, - EvaluationScenario, - SimpleEvaluationOutput, - EvaluationScenarioInput, - HumanEvaluationScenario, - EvaluationScenarioOutput, -) -from oss.src.models.db_models import ( - EvaluationDB, - HumanEvaluationDB, - EvaluationScenarioDB, - HumanEvaluationScenarioDB, -) - - -async def human_evaluation_db_to_simple_evaluation_output( - human_evaluation_db: HumanEvaluationDB, -) -> SimpleEvaluationOutput: - evaluation_variants = await db_manager.fetch_human_evaluation_variants( - human_evaluation_id=str(human_evaluation_db.id) - ) - return SimpleEvaluationOutput( - id=str(human_evaluation_db.id), - app_id=str(human_evaluation_db.app_id), - project_id=str(human_evaluation_db.project_id), - status=human_evaluation_db.status, # type: ignore - evaluation_type=human_evaluation_db.evaluation_type, # type: ignore - variant_ids=[ - str(evaluation_variant.variant_id) - for evaluation_variant in evaluation_variants - ], - ) - - -async def evaluation_db_to_pydantic( - evaluation_db: EvaluationDB, -) -> Evaluation: - variant_name = ( - evaluation_db.variant.variant_name - if evaluation_db.variant.variant_name - else str(evaluation_db.variant_id) - ) - aggregated_results = aggregated_result_of_evaluation_to_pydantic( - evaluation_db.aggregated_results - ) - - return Evaluation( - id=str(evaluation_db.id), - app_id=str(evaluation_db.app_id), - project_id=str(evaluation_db.project_id), - status=evaluation_db.status, - variant_ids=[str(evaluation_db.variant_id)], - variant_revision_ids=[str(evaluation_db.variant_revision_id)], - revisions=[str(evaluation_db.variant_revision.revision)], - variant_names=[variant_name], - testset_id=str(evaluation_db.testset_id), - testset_name=evaluation_db.testset.name, - aggregated_results=aggregated_results, - created_at=str(evaluation_db.created_at), - updated_at=str(evaluation_db.updated_at), - average_cost=evaluation_db.average_cost, - total_cost=evaluation_db.total_cost, - average_latency=evaluation_db.average_latency, - ) - - -async def human_evaluation_db_to_pydantic( - evaluation_db: HumanEvaluationDB, -) -> HumanEvaluation: - evaluation_variants = await db_manager.fetch_human_evaluation_variants( - human_evaluation_id=str(evaluation_db.id) # type: ignore - ) - - revisions = [] - variants_ids = [] - variants_names = [] - variants_revision_ids = [] - for evaluation_variant in evaluation_variants: - variant_name = ( - evaluation_variant.variant.variant_name - if isinstance(evaluation_variant.variant_id, uuid.UUID) - else str(evaluation_variant.variant_id) - ) - variants_names.append(str(variant_name)) - variants_ids.append(str(evaluation_variant.variant_id)) - variant_revision = ( - str(evaluation_variant.variant_revision.revision) - if isinstance(evaluation_variant.variant_revision_id, uuid.UUID) - else " None" - ) - revisions.append(variant_revision) - variants_revision_ids.append(str(evaluation_variant.variant_revision_id)) - - return HumanEvaluation( - id=str(evaluation_db.id), - app_id=str(evaluation_db.app_id), - project_id=str(evaluation_db.project_id), - status=evaluation_db.status, # type: ignore - evaluation_type=evaluation_db.evaluation_type, # type: ignore - variant_ids=variants_ids, - variant_names=variants_names, - testset_id=str(evaluation_db.testset_id), - testset_name=evaluation_db.testset.name, - variants_revision_ids=variants_revision_ids, - revisions=revisions, - created_at=str(evaluation_db.created_at), # type: ignore - updated_at=str(evaluation_db.updated_at), # type: ignore - ) - - -def human_evaluation_scenario_db_to_pydantic( - evaluation_scenario_db: HumanEvaluationScenarioDB, evaluation_id: str -) -> HumanEvaluationScenario: - return HumanEvaluationScenario( - id=str(evaluation_scenario_db.id), - evaluation_id=evaluation_id, - inputs=evaluation_scenario_db.inputs, # type: ignore - outputs=evaluation_scenario_db.outputs, # type: ignore - vote=evaluation_scenario_db.vote, # type: ignore - score=evaluation_scenario_db.score, # type: ignore - correct_answer=evaluation_scenario_db.correct_answer, # type: ignore - is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore - note=evaluation_scenario_db.note or "", # type: ignore - ) - - -def aggregated_result_of_evaluation_to_pydantic( - evaluation_aggregated_results: List, -) -> List[dict]: - transformed_results = [] - for aggregated_result in evaluation_aggregated_results: - evaluator_config_dict = ( - { - "id": str(aggregated_result.evaluator_config.id), - "name": aggregated_result.evaluator_config.name, - "evaluator_key": aggregated_result.evaluator_config.evaluator_key, - "settings_values": aggregated_result.evaluator_config.settings_values, - "created_at": str(aggregated_result.evaluator_config.created_at), - "updated_at": str(aggregated_result.evaluator_config.updated_at), - } - if isinstance(aggregated_result.evaluator_config_id, uuid.UUID) - else None - ) - transformed_results.append( - { - "evaluator_config": ( - {} if evaluator_config_dict is None else evaluator_config_dict - ), - "result": aggregated_result.result, - } - ) - return transformed_results - - -async def evaluation_scenario_db_to_pydantic( - evaluation_scenario_db: EvaluationScenarioDB, evaluation_id: str -) -> EvaluationScenario: - scenario_results = [ - { - "evaluator_config": str(scenario_result.evaluator_config_id), - "result": scenario_result.result, - } - for scenario_result in evaluation_scenario_db.results - ] - return EvaluationScenario( - id=str(evaluation_scenario_db.id), - evaluation_id=evaluation_id, - inputs=[ - EvaluationScenarioInput(**scenario_input) # type: ignore - for scenario_input in evaluation_scenario_db.inputs - ], - outputs=[ - EvaluationScenarioOutput(**scenario_output) # type: ignore - for scenario_output in evaluation_scenario_db.outputs - ], - correct_answers=[ - CorrectAnswer(**correct_answer) # type: ignore - for correct_answer in evaluation_scenario_db.correct_answers - ], - is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore - note=evaluation_scenario_db.note or "", # type: ignore - results=scenario_results, # type: ignore - ) diff --git a/api/oss/src/services/db_manager.py b/api/oss/src/services/db_manager.py index 09e4ef2a0a..54fef759d3 100644 --- a/api/oss/src/services/db_manager.py +++ b/api/oss/src/services/db_manager.py @@ -10,7 +10,7 @@ from sqlalchemy import func, or_, asc from sqlalchemy.ext.asyncio import AsyncSession from supertokens_python.types import AccountInfo -from sqlalchemy.orm import joinedload, load_only, aliased +from sqlalchemy.orm import joinedload, load_only, selectinload from sqlalchemy.exc import NoResultFound, MultipleResultsFound, SQLAlchemyError from supertokens_python.asyncio import list_users_by_account_info from supertokens_python.asyncio import delete_user as delete_user_from_supertokens @@ -22,6 +22,7 @@ from oss.src.dbs.postgres.shared.engine import engine from oss.src.services.json_importer_helper import get_json + if is_ee(): from ee.src.models.db_models import ProjectDB, WorkspaceDB else: @@ -47,25 +48,6 @@ AppType, ConfigDB, ) -from oss.src.models.shared_models import ( - Result, - CorrectAnswer, - AggregatedResult, - EvaluationScenarioResult, - EvaluationScenarioInput, - EvaluationScenarioOutput, - HumanEvaluationScenarioInput, -) -from oss.src.models.db_models import ( - EvaluationDB, - HumanEvaluationDB, - EvaluationScenarioDB, - HumanEvaluationScenarioDB, - HumanEvaluationVariantDB, - EvaluationScenarioResultDB, - EvaluationEvaluatorConfigDB, - EvaluationAggregatedResultDB, -) log = get_module_logger(__name__) @@ -223,12 +205,8 @@ async def fetch_app_variant_by_id(app_variant_id: str) -> Optional[AppVariantDB] assert app_variant_id is not None, "app_variant_id cannot be None" async with engine.core_session() as session: query = select(AppVariantDB).options( - joinedload(AppVariantDB.app.of_type(AppDB)).load_only( - AppDB.id, AppDB.app_name - ), # type: ignore - joinedload(AppVariantDB.base.of_type(VariantBaseDB)) - .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)) - .load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore + joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore + joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore ) result = await session.execute( @@ -568,9 +546,9 @@ async def create_new_app_variant( AppVariantDB: The created variant. """ - assert config.parameters == {}, ( - "Parameters should be empty when calling create_new_app_variant (otherwise revision should not be set to 0)" - ) + assert ( + config.parameters == {} + ), "Parameters should be empty when calling create_new_app_variant (otherwise revision should not be set to 0)" async with engine.core_session() as session: variant = AppVariantDB( @@ -1873,9 +1851,7 @@ async def add_variant_from_base_and_config( app_variant_for_base = await list_variants_for_base(base_db) already_exists = any( - av - for av in app_variant_for_base - if av.config_name == new_config_name # type: ignore + av for av in app_variant_for_base if av.config_name == new_config_name # type: ignore ) if already_exists: raise ValueError("App variant with the same name already exists") @@ -1960,12 +1936,8 @@ async def list_app_variants(app_id: str): result = await session.execute( select(AppVariantDB) .options( - joinedload(AppVariantDB.app.of_type(AppDB)).load_only( - AppDB.id, AppDB.app_name - ), # type: ignore - joinedload(AppVariantDB.base.of_type(VariantBaseDB)) - .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)) - .load_only(DeploymentDB.uri), # type: ignore + joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore + joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.uri), # type: ignore ) .where(AppVariantDB.hidden.is_not(True)) .filter_by(app_id=uuid.UUID(app_uuid)) @@ -2263,9 +2235,7 @@ async def fetch_app_environment_revision_by_app_variant_revision_id( ) if is_ee(): query = query.options( - joinedload( - AppEnvironmentRevisionDB.deployed_app_variant.of_type(AppVariantDB) - ), # type: ignore + joinedload(AppEnvironmentRevisionDB.deployed_app_variant.of_type(AppVariantDB)), # type: ignore ) result = await session.execute(query) app_environment = result.scalars().one_or_none() @@ -2288,9 +2258,7 @@ async def fetch_app_variant_revision_by_id( result = await session.execute( select(AppVariantRevisionsDB) .options( - joinedload(AppVariantRevisionsDB.base.of_type(VariantBaseDB)) - .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)) - .load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore + joinedload(AppVariantRevisionsDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore ) .filter_by(id=uuid.UUID(variant_revision_id)) ) @@ -2317,7 +2285,9 @@ async def fetch_environment_revisions_for_environment(environment: AppEnvironmen query = query.options( joinedload( AppEnvironmentRevisionDB.modified_by.of_type(UserDB) - ).load_only(UserDB.username) # type: ignore + ).load_only( + UserDB.username + ) # type: ignore ) else: query = query.options( @@ -2507,9 +2477,9 @@ async def create_environment_revision( ) if kwargs: - assert "deployed_app_variant_revision" in kwargs, ( - "Deployed app variant revision is required" - ) + assert ( + "deployed_app_variant_revision" in kwargs + ), "Deployed app variant revision is required" assert ( isinstance( kwargs.get("deployed_app_variant_revision"), AppVariantRevisionsDB @@ -2524,9 +2494,9 @@ async def create_environment_revision( ) deployment = kwargs.get("deployment") - assert isinstance(deployment, DeploymentDB) == True, ( - "Type of deployment in kwargs is not correct" - ) + assert ( + isinstance(deployment, DeploymentDB) == True + ), "Type of deployment in kwargs is not correct" if deployment is not None: environment_revision.deployment_id = deployment.id # type: ignore @@ -2594,7 +2564,9 @@ async def fetch_app_variant_revision( ) else: query = base_query.options( - joinedload(AppVariantRevisionsDB.modified_by).load_only(UserDB.username) # type: ignore + joinedload(AppVariantRevisionsDB.modified_by).load_only( + UserDB.username + ) # type: ignore ) result = await session.execute(query) app_variant_revision = result.scalars().first() @@ -2787,12 +2759,8 @@ async def get_app_variant_instance_by_id( result = await session.execute( select(AppVariantDB) .options( - joinedload(AppVariantDB.app.of_type(AppDB)).load_only( - AppDB.id, AppDB.app_name - ), # type: ignore - joinedload(AppVariantDB.base.of_type(VariantBaseDB)) - .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)) - .load_only(DeploymentDB.uri), # type: ignore + joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore + joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.uri), # type: ignore ) .filter_by(id=uuid.UUID(variant_id), project_id=uuid.UUID(project_id)), ) @@ -2990,7 +2958,7 @@ async def find_previous_variant_from_base_id( async def update_base( base_id: str, **kwargs: dict, -) -> Optional[VariantBaseDB]: +) -> VariantBaseDB: """Update the base object in the database with the provided id. Arguments: @@ -3098,9 +3066,7 @@ async def fetch_evaluators_configs(project_id: str): return evaluators_configs -async def fetch_evaluator_config( - evaluator_config_id: str, -) -> Optional[EvaluatorConfigDB]: +async def fetch_evaluator_config(evaluator_config_id: str) -> EvaluatorConfigDB: """Fetch evaluator configurations from the database. Args: @@ -3153,7 +3119,7 @@ async def check_if_evaluators_exist_in_list_of_evaluators_configs( async def fetch_evaluator_config_by_appId( app_id: str, evaluator_name: str -) -> Optional[EvaluatorConfigDB]: +) -> EvaluatorConfigDB: """Fetch the evaluator config from the database using the app Id and evaluator name. Args: @@ -3280,9 +3246,9 @@ async def get_object_uuid(object_id: str, table_name: str) -> str: # Use the object_id directly if it is not a valid MongoDB ObjectId object_uuid_as_str = object_id - assert object_uuid_as_str is not None, ( - f"{table_name} Object UUID cannot be none. Is the object_id {object_id} a valid MongoDB ObjectId?" - ) + assert ( + object_uuid_as_str is not None + ), f"{table_name} Object UUID cannot be none. Is the object_id {object_id} a valid MongoDB ObjectId?" return object_uuid_as_str @@ -3304,7 +3270,7 @@ async def fetch_corresponding_object_uuid(table_name: str, object_id: str) -> st return str(object_mapping.uuid) -async def fetch_default_project() -> Optional[ProjectDB]: +async def fetch_default_project() -> ProjectDB: """ Fetch the default project from the database. Returns: @@ -3317,9 +3283,7 @@ async def fetch_default_project() -> Optional[ProjectDB]: return default_project -async def get_user_api_key_by_prefix( - api_key_prefix: str, user_id: str -) -> Optional[APIKeyDB]: +async def get_user_api_key_by_prefix(api_key_prefix: str, user_id: str) -> APIKeyDB: """ Gets the user api key by prefix. @@ -3365,777 +3329,3 @@ async def update_api_key_timestamp(api_key_id: str) -> None: await session.commit() await session.refresh(api_key) - - -async def fetch_evaluation_status_by_id( - project_id: str, - evaluation_id: str, -) -> Optional[str]: - """Fetch only the status of an evaluation by its ID.""" - assert evaluation_id is not None, "evaluation_id cannot be None" - - async with engine.core_session() as session: - query = ( - select(EvaluationDB) - .filter_by(project_id=project_id, id=uuid.UUID(evaluation_id)) - .options(load_only(EvaluationDB.status)) - ) - - result = await session.execute(query) - evaluation = result.scalars().first() - return evaluation.status if evaluation else None - - -async def fetch_evaluation_by_id( - project_id: str, - evaluation_id: str, -) -> Optional[EvaluationDB]: - """Fetches a evaluation by its ID. - - Args: - evaluation_id (str): The ID of the evaluation to fetch. - - Returns: - EvaluationDB: The fetched evaluation, or None if no evaluation was found. - """ - - assert evaluation_id is not None, "evaluation_id cannot be None" - async with engine.core_session() as session: - base_query = select(EvaluationDB).filter_by( - project_id=project_id, - id=uuid.UUID(evaluation_id), - ) - query = base_query.options( - joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only( - TestsetDB.id, TestsetDB.name - ), # type: ignore - ) - - result = await session.execute( - query.options( - joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only( - AppVariantDB.id, AppVariantDB.variant_name - ), # type: ignore - joinedload( - EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB) - ).load_only(AppVariantRevisionsDB.revision), # type: ignore - joinedload( - EvaluationDB.aggregated_results.of_type( - EvaluationAggregatedResultDB - ) - ).joinedload(EvaluationAggregatedResultDB.evaluator_config), - ) - ) - evaluation = result.unique().scalars().first() - return evaluation - - -async def list_human_evaluations(app_id: str, project_id: str): - """ - Fetches human evaluations belonging to an App. - - Args: - app_id (str): The application identifier - """ - - async with engine.core_session() as session: - base_query = ( - select(HumanEvaluationDB) - .filter_by(app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id)) - .filter(HumanEvaluationDB.testset_id.isnot(None)) - ) - query = base_query.options( - joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only( - TestsetDB.id, TestsetDB.name - ), # type: ignore - ) - - result = await session.execute(query) - human_evaluations = result.scalars().all() - return human_evaluations - - -async def create_human_evaluation( - app: AppDB, - status: str, - evaluation_type: str, - testset_id: str, - variants_ids: List[str], -): - """ - Creates a human evaluation. - - Args: - app (AppDB: The app object - status (str): The status of the evaluation - evaluation_type (str): The evaluation type - testset_id (str): The ID of the evaluation testset - variants_ids (List[str]): The IDs of the variants for the evaluation - """ - - async with engine.core_session() as session: - human_evaluation = HumanEvaluationDB( - app_id=app.id, - project_id=app.project_id, - status=status, - evaluation_type=evaluation_type, - testset_id=testset_id, - ) - - session.add(human_evaluation) - await session.commit() - await session.refresh(human_evaluation, attribute_names=["testset"]) - - # create variants for human evaluation - await create_human_evaluation_variants( - human_evaluation_id=str(human_evaluation.id), - variants_ids=variants_ids, - ) - return human_evaluation - - -async def fetch_human_evaluation_variants(human_evaluation_id: str): - """ - Fetches human evaluation variants. - - Args: - human_evaluation_id (str): The human evaluation ID - - Returns: - The human evaluation variants. - """ - - async with engine.core_session() as session: - base_query = select(HumanEvaluationVariantDB).filter_by( - human_evaluation_id=uuid.UUID(human_evaluation_id) - ) - query = base_query.options( - joinedload( - HumanEvaluationVariantDB.variant.of_type(AppVariantDB) - ).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore - joinedload( - HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB) - ).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore - ) - - result = await session.execute(query) - evaluation_variants = result.scalars().all() - return evaluation_variants - - -async def create_human_evaluation_variants( - human_evaluation_id: str, variants_ids: List[str] -): - """ - Creates human evaluation variants. - - Args: - human_evaluation_id (str): The human evaluation identifier - variants_ids (List[str]): The variants identifiers - project_id (str): The project ID - """ - - variants_dict = {} - for variant_id in variants_ids: - variant = await fetch_app_variant_by_id(app_variant_id=variant_id) - if variant: - variants_dict[variant_id] = variant - - variants_revisions_dict = {} - for variant_id, variant in variants_dict.items(): - variant_revision = await fetch_app_variant_revision_by_variant( - app_variant_id=str(variant.id), - project_id=str(variant.project_id), - revision=variant.revision, # type: ignore - ) - if variant_revision: - variants_revisions_dict[variant_id] = variant_revision - - if set(variants_dict.keys()) != set(variants_revisions_dict.keys()): - raise ValueError("Mismatch between variants and their revisions") - - async with engine.core_session() as session: - for variant_id in variants_ids: - variant = variants_dict[variant_id] - variant_revision = variants_revisions_dict[variant_id] - human_evaluation_variant = HumanEvaluationVariantDB( - human_evaluation_id=uuid.UUID(human_evaluation_id), - variant_id=variant.id, # type: ignore - variant_revision_id=variant_revision.id, # type: ignore - ) - session.add(human_evaluation_variant) - - await session.commit() - - -async def fetch_human_evaluation_by_id( - evaluation_id: str, -) -> Optional[HumanEvaluationDB]: - """ - Fetches a evaluation by its ID. - - Args: - evaluation_id (str): The ID of the evaluation to fetch. - - Returns: - EvaluationDB: The fetched evaluation, or None if no evaluation was found. - """ - - assert evaluation_id is not None, "evaluation_id cannot be None" - async with engine.core_session() as session: - base_query = select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) - query = base_query.options( - joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only( - TestsetDB.id, TestsetDB.name - ), # type: ignore - ) - result = await session.execute(query) - evaluation = result.scalars().first() - return evaluation - - -async def update_human_evaluation(evaluation_id: str, values_to_update: dict): - """Updates human evaluation with the specified values. - - Args: - evaluation_id (str): The evaluation ID - values_to_update (dict): The values to update - - Exceptions: - NoResultFound: if human evaluation is not found - """ - - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) - ) - human_evaluation = result.scalars().first() - if not human_evaluation: - raise NoResultFound(f"Human evaluation with id {evaluation_id} not found") - - for key, value in values_to_update.items(): - if hasattr(human_evaluation, key): - setattr(human_evaluation, key, value) - - await session.commit() - await session.refresh(human_evaluation) - - -async def delete_human_evaluation(evaluation_id: str): - """Delete the evaluation by its ID. - - Args: - evaluation_id (str): The ID of the evaluation to delete. - """ - - assert evaluation_id is not None, "evaluation_id cannot be None" - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id)) - ) - evaluation = result.scalars().first() - if not evaluation: - raise NoResultFound(f"Human evaluation with id {evaluation_id} not found") - - await session.delete(evaluation) - await session.commit() - - -async def create_human_evaluation_scenario( - inputs: List[HumanEvaluationScenarioInput], - project_id: str, - evaluation_id: str, - evaluation_extend: Dict[str, Any], -): - """ - Creates a human evaluation scenario. - - Args: - inputs (List[HumanEvaluationScenarioInput]): The inputs. - evaluation_id (str): The evaluation identifier. - evaluation_extend (Dict[str, any]): An extended required payload for the evaluation scenario. Contains score, vote, and correct_answer. - """ - - async with engine.core_session() as session: - evaluation_scenario = HumanEvaluationScenarioDB( - **evaluation_extend, - project_id=uuid.UUID(project_id), - evaluation_id=uuid.UUID(evaluation_id), - inputs=[input.model_dump() for input in inputs], - outputs=[], - ) - - session.add(evaluation_scenario) - await session.commit() - - -async def update_human_evaluation_scenario( - evaluation_scenario_id: str, values_to_update: dict -): - """Updates human evaluation scenario with the specified values. - - Args: - evaluation_scenario_id (str): The evaluation scenario ID - values_to_update (dict): The values to update - - Exceptions: - NoResultFound: if human evaluation scenario is not found - """ - - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationScenarioDB).filter_by( - id=uuid.UUID(evaluation_scenario_id) - ) - ) - human_evaluation_scenario = result.scalars().first() - if not human_evaluation_scenario: - raise NoResultFound( - f"Human evaluation scenario with id {evaluation_scenario_id} not found" - ) - - for key, value in values_to_update.items(): - if hasattr(human_evaluation_scenario, key): - setattr(human_evaluation_scenario, key, value) - - await session.commit() - await session.refresh(human_evaluation_scenario) - - -async def fetch_human_evaluation_scenarios(evaluation_id: str): - """ - Fetches human evaluation scenarios. - - Args: - evaluation_id (str): The evaluation identifier - - Returns: - The evaluation scenarios. - """ - - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationScenarioDB) - .filter_by(evaluation_id=uuid.UUID(evaluation_id)) - .order_by(asc(HumanEvaluationScenarioDB.created_at)) - ) - evaluation_scenarios = result.scalars().all() - return evaluation_scenarios - - -async def fetch_evaluation_scenarios(evaluation_id: str, project_id: str): - """ - Fetches evaluation scenarios. - - Args: - evaluation_id (str): The evaluation identifier - project_id (str): The ID of the project - - Returns: - The evaluation scenarios. - """ - - async with engine.core_session() as session: - result = await session.execute( - select(EvaluationScenarioDB) - .filter_by( - evaluation_id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id) - ) - .options(joinedload(EvaluationScenarioDB.results)) - ) - evaluation_scenarios = result.unique().scalars().all() - return evaluation_scenarios - - -async def fetch_evaluation_scenario_by_id( - evaluation_scenario_id: str, -) -> Optional[EvaluationScenarioDB]: - """Fetches and evaluation scenario by its ID. - - Args: - evaluation_scenario_id (str): The ID of the evaluation scenario to fetch. - - Returns: - EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. - """ - - assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None" - async with engine.core_session() as session: - result = await session.execute( - select(EvaluationScenarioDB).filter_by(id=uuid.UUID(evaluation_scenario_id)) - ) - evaluation_scenario = result.scalars().first() - return evaluation_scenario - - -async def fetch_human_evaluation_scenario_by_id( - evaluation_scenario_id: str, -) -> Optional[HumanEvaluationScenarioDB]: - """Fetches and evaluation scenario by its ID. - - Args: - evaluation_scenario_id (str): The ID of the evaluation scenario to fetch. - - Returns: - EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. - """ - - assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None" - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationScenarioDB).filter_by( - id=uuid.UUID(evaluation_scenario_id) - ) - ) - evaluation_scenario = result.scalars().first() - return evaluation_scenario - - -async def fetch_human_evaluation_scenario_by_evaluation_id( - evaluation_id: str, -) -> Optional[HumanEvaluationScenarioDB]: - """Fetches and evaluation scenario by its ID. - Args: - evaluation_id (str): The ID of the evaluation object to use in fetching the human evaluation. - Returns: - EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found. - """ - - evaluation = await fetch_human_evaluation_by_id(evaluation_id) - async with engine.core_session() as session: - result = await session.execute( - select(HumanEvaluationScenarioDB).filter_by( - evaluation_id=evaluation.id # type: ignore - ) - ) - human_eval_scenario = result.scalars().first() - return human_eval_scenario - - -async def create_new_evaluation( - app: AppDB, - project_id: str, - testset: TestsetDB, - status: Result, - variant: str, - variant_revision: str, -) -> EvaluationDB: - """Create a new evaluation scenario. - Returns: - EvaluationScenarioDB: The created evaluation scenario. - """ - - async with engine.core_session() as session: - evaluation = EvaluationDB( - app_id=app.id, - project_id=uuid.UUID(project_id), - testset_id=testset.id, - status=status.model_dump(), - variant_id=uuid.UUID(variant), - variant_revision_id=uuid.UUID(variant_revision), - ) - - session.add(evaluation) - await session.commit() - await session.refresh( - evaluation, - attribute_names=[ - "testset", - "variant", - "variant_revision", - "aggregated_results", - ], - ) - - return evaluation - - -async def list_evaluations(app_id: str, project_id: str): - """Retrieves evaluations of the specified app from the db. - - Args: - app_id (str): The ID of the app - project_id (str): The ID of the project - """ - - async with engine.core_session() as session: - base_query = select(EvaluationDB).filter_by( - app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id) - ) - query = base_query.options( - joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only( - TestsetDB.id, TestsetDB.name - ), # type: ignore - ) - - result = await session.execute( - query.options( - joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only( - AppVariantDB.id, AppVariantDB.variant_name - ), # type: ignore - joinedload( - EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB) - ).load_only(AppVariantRevisionsDB.revision), # type: ignore - joinedload( - EvaluationDB.aggregated_results.of_type( - EvaluationAggregatedResultDB - ) - ).joinedload(EvaluationAggregatedResultDB.evaluator_config), - ) - ) - evaluations = result.unique().scalars().all() - return evaluations - - -async def fetch_evaluations_by_resource( - resource_type: str, project_id: str, resource_ids: List[str] -): - """ - Fetches an evaluations by resource. - - Args: - resource_type (str): The resource type - project_id (str): The ID of the project - resource_ids (List[str]): The resource identifiers - - Returns: - The evaluations by resource. - - Raises: - HTTPException:400 resource_type {type} is not supported - """ - - ids = list(map(uuid.UUID, resource_ids)) - - async with engine.core_session() as session: - if resource_type == "variant": - result_evaluations = await session.execute( - select(EvaluationDB) - .filter( - EvaluationDB.variant_id.in_(ids), - EvaluationDB.project_id == uuid.UUID(project_id), - ) - .options(load_only(EvaluationDB.id)) # type: ignore - ) - result_human_evaluations = await session.execute( - select(HumanEvaluationDB) - .join(HumanEvaluationVariantDB) - .filter( - HumanEvaluationVariantDB.variant_id.in_(ids), - HumanEvaluationDB.project_id == uuid.UUID(project_id), - ) - .options(load_only(HumanEvaluationDB.id)) # type: ignore - ) - res_evaluations = list(result_evaluations.scalars().all()) - res_human_evaluations = list(result_human_evaluations.scalars().all()) - return res_evaluations + res_human_evaluations - - elif resource_type == "testset": - result_evaluations = await session.execute( - select(EvaluationDB) - .filter( - EvaluationDB.testset_id.in_(ids), - EvaluationDB.project_id == uuid.UUID(project_id), - ) - .options(load_only(EvaluationDB.id)) # type: ignore - ) - result_human_evaluations = await session.execute( - select(HumanEvaluationDB) - .filter( - HumanEvaluationDB.testset_id.in_(ids), - HumanEvaluationDB.project_id - == uuid.UUID(project_id), # Fixed to match HumanEvaluationDB - ) - .options(load_only(HumanEvaluationDB.id)) # type: ignore - ) - res_evaluations = list(result_evaluations.scalars().all()) - res_human_evaluations = list(result_human_evaluations.scalars().all()) - return res_evaluations + res_human_evaluations - - elif resource_type == "evaluator_config": - query = ( - select(EvaluationDB) - .join(EvaluationDB.evaluator_configs) - .filter( - EvaluationEvaluatorConfigDB.evaluator_config_id.in_(ids), - EvaluationDB.project_id == uuid.UUID(project_id), - ) - ) - result = await session.execute(query) - res = result.scalars().all() - return res - - raise HTTPException( - status_code=400, - detail=f"resource_type {resource_type} is not supported", - ) - - -async def delete_evaluations(evaluation_ids: List[str]) -> None: - """Delete evaluations based on the ids provided from the db. - - Args: - evaluations_ids (list[str]): The IDs of the evaluation - """ - - async with engine.core_session() as session: - query = select(EvaluationDB).where(EvaluationDB.id.in_(evaluation_ids)) - result = await session.execute(query) - evaluations = result.scalars().all() - for evaluation in evaluations: - await session.delete(evaluation) - await session.commit() - - -async def create_new_evaluation_scenario( - project_id: str, - evaluation_id: str, - variant_id: str, - inputs: List[EvaluationScenarioInput], - outputs: List[EvaluationScenarioOutput], - correct_answers: Optional[List[CorrectAnswer]], - is_pinned: Optional[bool], - note: Optional[str], - results: List[EvaluationScenarioResult], -) -> EvaluationScenarioDB: - """Create a new evaluation scenario. - - Returns: - EvaluationScenarioDB: The created evaluation scenario. - """ - - async with engine.core_session() as session: - evaluation_scenario = EvaluationScenarioDB( - project_id=uuid.UUID(project_id), - evaluation_id=uuid.UUID(evaluation_id), - variant_id=uuid.UUID(variant_id), - inputs=[input.model_dump() for input in inputs], - outputs=[output.model_dump() for output in outputs], - correct_answers=( - [correct_answer.model_dump() for correct_answer in correct_answers] - if correct_answers is not None - else [] - ), - is_pinned=is_pinned, - note=note, - ) - - session.add(evaluation_scenario) - await session.commit() - await session.refresh(evaluation_scenario) - - # create evaluation scenario result - for result in results: - evaluation_scenario_result = EvaluationScenarioResultDB( - evaluation_scenario_id=evaluation_scenario.id, - evaluator_config_id=uuid.UUID(result.evaluator_config), - result=result.result.model_dump(), - ) - - session.add(evaluation_scenario_result) - - await session.commit() # ensures that scenario results insertion is committed - await session.refresh(evaluation_scenario) - - return evaluation_scenario - - -async def update_evaluation_with_aggregated_results( - evaluation_id: str, aggregated_results: List[AggregatedResult] -): - async with engine.core_session() as session: - for result in aggregated_results: - aggregated_result = EvaluationAggregatedResultDB( - evaluation_id=uuid.UUID(evaluation_id), - evaluator_config_id=uuid.UUID(result.evaluator_config), - result=result.result.model_dump(), - ) - session.add(aggregated_result) - - await session.commit() - - -async def fetch_eval_aggregated_results(evaluation_id: str): - """ - Fetches an evaluation aggregated results by evaluation identifier. - - Args: - evaluation_id (str): The evaluation identifier - - Returns: - The evaluation aggregated results by evaluation identifier. - """ - - async with engine.core_session() as session: - base_query = select(EvaluationAggregatedResultDB).filter_by( - evaluation_id=uuid.UUID(evaluation_id) - ) - query = base_query.options( - joinedload( - EvaluationAggregatedResultDB.evaluator_config.of_type(EvaluatorConfigDB) - ).load_only( - EvaluatorConfigDB.id, # type: ignore - EvaluatorConfigDB.name, # type: ignore - EvaluatorConfigDB.evaluator_key, # type: ignore - EvaluatorConfigDB.settings_values, # type: ignore - EvaluatorConfigDB.created_at, # type: ignore - EvaluatorConfigDB.updated_at, # type: ignore - ) - ) - - result = await session.execute(query) - aggregated_results = result.scalars().all() - return aggregated_results - - -async def update_evaluation( - evaluation_id: str, project_id: str, updates: Dict[str, Any] -) -> Optional[EvaluationDB]: - """ - Update an evaluator configuration in the database with the provided id. - - Arguments: - evaluation_id (str): The ID of the evaluator configuration to be updated. - project_id (str): The ID of the project. - updates (Dict[str, Any]): The updates to apply to the evaluator configuration. - - Returns: - EvaluatorConfigDB: The updated evaluator configuration object. - """ - - async with engine.core_session() as session: - result = await session.execute( - select(EvaluationDB).filter_by( - id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id) - ) - ) - evaluation = result.scalars().first() - for key, value in updates.items(): - if hasattr(evaluation, key): - setattr(evaluation, key, value) - - await session.commit() - await session.refresh(evaluation) - - return evaluation - - -async def check_if_evaluation_contains_failed_evaluation_scenarios( - evaluation_id: str, -) -> bool: - async with engine.core_session() as session: - EvaluationResultAlias = aliased(EvaluationScenarioResultDB) - query = ( - select(func.count(EvaluationScenarioDB.id)) - .join(EvaluationResultAlias, EvaluationScenarioDB.results) - .where( - EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id), - EvaluationResultAlias.result["type"].astext == "error", - ) - ) - - result = await session.execute(query) - count = result.scalar() - if not count: - return False - return count > 0 diff --git a/api/oss/src/services/evaluator_manager.py b/api/oss/src/services/evaluator_manager.py index 375eec953a..68f44ee3d4 100644 --- a/api/oss/src/services/evaluator_manager.py +++ b/api/oss/src/services/evaluator_manager.py @@ -138,13 +138,13 @@ async def create_ready_to_use_evaluators(project_id: str): } for setting_name, default_value in settings_values.items(): - assert default_value != "", ( - f"Default value for ground truth key '{setting_name}' in Evaluator is empty" - ) + assert ( + default_value != "" + ), f"Default value for ground truth key '{setting_name}' in Evaluator is empty" - assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), ( - f"'name' and 'key' does not exist in the evaluator: {evaluator}" - ) + assert hasattr(evaluator, "name") and hasattr( + evaluator, "key" + ), f"'name' and 'key' does not exist in the evaluator: {evaluator}" await db_manager.create_evaluator_config( project_id=project_id, name=evaluator.name, diff --git a/api/oss/src/services/evaluators_service.py b/api/oss/src/services/evaluators_service.py index cbb4643c13..5ff93cabb0 100644 --- a/api/oss/src/services/evaluators_service.py +++ b/api/oss/src/services/evaluators_service.py @@ -734,10 +734,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac raise e if ( - (input.settings.get("version") == "4") - and ( # this check is used when running in the background (celery) - type(input.settings.get("prompt_template", "")) is not str - ) + input.settings.get("version") == "4" + ) and ( # this check is used when running in the background (celery) + type(input.settings.get("prompt_template", "")) is not str ): # this check is used when running in the frontend (since in that case we'll alway have version 2) try: parameters = input.settings or dict() @@ -769,9 +768,7 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac template_format = parameters.get("template_format") or default_format - response_type = input.settings.get("response_type") or ( - "json_schema" if template_version == "4" else "text" - ) + response_type = input.settings.get("response_type") or "text" json_schema = input.settings.get("json_schema") or None @@ -931,10 +928,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac except Exception as e: raise RuntimeError(f"Evaluation failed: {str(e)}") elif ( - (input.settings.get("version") == "3") - and ( # this check is used when running in the background (celery) - type(input.settings.get("prompt_template", "")) is not str - ) + input.settings.get("version") == "3" + ) and ( # this check is used when running in the background (celery) + type(input.settings.get("prompt_template", "")) is not str ): # this check is used when running in the frontend (since in that case we'll alway have version 2) try: parameters = input.settings or dict() @@ -1091,10 +1087,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac except Exception as e: raise RuntimeError(f"Evaluation failed: {str(e)}") elif ( - (input.settings.get("version") == "2") - and ( # this check is used when running in the background (celery) - type(input.settings.get("prompt_template", "")) is not str - ) + input.settings.get("version") == "2" + ) and ( # this check is used when running in the background (celery) + type(input.settings.get("prompt_template", "")) is not str ): # this check is used when running in the frontend (since in that case we'll alway have version 2) try: prompt_template = input.settings.get("prompt_template", "") @@ -1550,9 +1545,9 @@ async def json_diff(input: EvaluatorInputInterface) -> EvaluatorOutputInterface: # 1. extract llm app output if app output format is v2+ app_output = input.inputs["prediction"] - assert isinstance(app_output, (str, dict)), ( - "App output is expected to be a string or a JSON object" - ) + assert isinstance( + app_output, (str, dict) + ), "App output is expected to be a string or a JSON object" app_output = ( app_output.get("data", "") if isinstance(app_output, dict) else app_output ) @@ -1560,7 +1555,9 @@ async def json_diff(input: EvaluatorInputInterface) -> EvaluatorOutputInterface: try: app_output = json.loads(app_output) except json.JSONDecodeError: - app_output = {} # we will return 0 score for json diff in case we cannot parse the output as json + app_output = ( + {} + ) # we will return 0 score for json diff in case we cannot parse the output as json score = compare_jsons( ground_truth=ground_truth, diff --git a/api/oss/tests/legacy/admin/tests.py b/api/oss/tests/legacy/admin/tests.py index 27d4aeb883..356aa29581 100644 --- a/api/oss/tests/legacy/admin/tests.py +++ b/api/oss/tests/legacy/admin/tests.py @@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/api/oss/tests/legacy/apps/tests.py b/api/oss/tests/legacy/apps/tests.py index 59afc1b7d7..d1be36c456 100644 --- a/api/oss/tests/legacy/apps/tests.py +++ b/api/oss/tests/legacy/apps/tests.py @@ -39,12 +39,12 @@ async def test_create_without_default_params(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) - assert response_data["app_name"] == app_data["app_name"], ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" + assert ( + response_data["app_name"] == app_data["app_name"] + ), f"Failed for case: {description}" # Cleanup: Remove application await delete_application(http_client, response_data["app_id"], headers) @@ -64,9 +64,9 @@ async def test_create_invalid_params(self, http_client): response = await http_client.post("/apps", json=app_data, headers=headers) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -86,9 +86,9 @@ async def test_create_conflicts(self, http_client): response = await http_client.post("/apps", json=app_data, headers=headers) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup: Remove application app_cleanup_response = await http_client.get("/apps", headers=headers) @@ -120,9 +120,9 @@ async def test_permissions_principal_not_in_scope_post(self, http_client): ) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -213,9 +213,9 @@ async def test_list_query_filter_no_element(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == len(elements), f"Failed for case: {description}" @pytest.mark.asyncio @@ -236,9 +236,9 @@ async def test_list_query_filter_one_element(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup: Remove application @@ -263,9 +263,9 @@ async def test_list_query_filter_many_elements_small_data(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup: Remove applications @@ -291,9 +291,9 @@ async def test_list_query_filter_many_elements_big_data(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup: Remove applications @@ -326,9 +326,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup: Delete the application with valid principal await delete_application(http_client, app["app_id"], owner_headers) @@ -361,9 +361,9 @@ async def test_permissions_allowed(self, http_client): list_of_status_codes.append(response.status_code) # Assert: Verify the response - assert list_of_status_codes.count(expected_status) == 3, ( - f"Failed for case: {description}" - ) + assert ( + list_of_status_codes.count(expected_status) == 3 + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical diff --git a/api/oss/tests/legacy/auth/tests.py b/api/oss/tests/legacy/auth/tests.py index 1a61f0a034..b09b4b4c61 100644 --- a/api/oss/tests/legacy/auth/tests.py +++ b/api/oss/tests/legacy/auth/tests.py @@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/api/oss/tests/legacy/old_tests/models.py b/api/oss/tests/legacy/old_tests/models.py index 8e24e032d9..5c1d0ad49b 100644 --- a/api/oss/tests/legacy/old_tests/models.py +++ b/api/oss/tests/legacy/old_tests/models.py @@ -62,14 +62,4 @@ ] if is_ee(): - models.extend( - [ - OrganizationDB, - WorkspaceDB, - APIKeyDB, - InvitationDB, - OrganizationMemberDB, - ProjectMemberDB, - WorkspaceMemberDB, - ] - ) # type: ignore + models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, OrganizationMemberDB, ProjectMemberDB, WorkspaceMemberDB]) # type: ignore diff --git a/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py b/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py index 8ad085a396..af2757cf09 100644 --- a/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py +++ b/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py @@ -20,16 +20,14 @@ async def test_batch_invoke_success(): to simulate successful invocations. It verifies that the batch_invoke function correctly returns the expected results for the given test data. """ - with ( - patch( - "src.services.llm_apps_service.get_parameters_from_openapi", - new_callable=AsyncMock, - ) as mock_get_parameters_from_openapi, - patch( - "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock - ) as mock_invoke_app, - patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep, - ): + with patch( + "src.services.llm_apps_service.get_parameters_from_openapi", + new_callable=AsyncMock, + ) as mock_get_parameters_from_openapi, patch( + "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock + ) as mock_invoke_app, patch( + "asyncio.sleep", new_callable=AsyncMock + ) as mock_sleep: mock_get_parameters_from_openapi.return_value = [ {"name": "param1", "type": "input"}, {"name": "param2", "type": "input"}, @@ -92,16 +90,14 @@ async def test_batch_invoke_retries_and_failure(): function correctly retries the specified number of times and returns an error result after reaching the maximum retries. """ - with ( - patch( - "src.services.llm_apps_service.get_parameters_from_openapi", - new_callable=AsyncMock, - ) as mock_get_parameters_from_openapi, - patch( - "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock - ) as mock_invoke_app, - patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep, - ): + with patch( + "src.services.llm_apps_service.get_parameters_from_openapi", + new_callable=AsyncMock, + ) as mock_get_parameters_from_openapi, patch( + "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock + ) as mock_invoke_app, patch( + "asyncio.sleep", new_callable=AsyncMock + ) as mock_sleep: mock_get_parameters_from_openapi.return_value = [ {"name": "param1", "type": "input"}, {"name": "param2", "type": "input"}, @@ -159,16 +155,14 @@ async def test_batch_invoke_generic_exception(): batch_invoke function correctly handles the exception and returns an error result with the appropriate error message. """ - with ( - patch( - "src.m_apps_service.get_parameters_from_openapi", - new_callable=AsyncMock, - ) as mock_get_parameters_from_openapi, - patch( - "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock - ) as mock_invoke_app, - patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep, - ): + with patch( + "src.m_apps_service.get_parameters_from_openapi", + new_callable=AsyncMock, + ) as mock_get_parameters_from_openapi, patch( + "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock + ) as mock_invoke_app, patch( + "asyncio.sleep", new_callable=AsyncMock + ) as mock_sleep: mock_get_parameters_from_openapi.return_value = [ {"name": "param1", "type": "input"}, {"name": "param2", "type": "input"}, diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py index dc670b8589..90e18ea27f 100644 --- a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py +++ b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py @@ -183,9 +183,9 @@ async def wait_for_evaluation_to_finish(evaluation_id): return await asyncio.sleep(intervals) - assert False, ( - f"Evaluation status did not become '{EvaluationStatusEnum.EVALUATION_FINISHED}' within the specified polling time" - ) + assert ( + False + ), f"Evaluation status did not become '{EvaluationStatusEnum.EVALUATION_FINISHED}' within the specified polling time" async def create_evaluation_with_evaluator(evaluator_config_name): diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py index 004b54e48f..9f9a87672b 100644 --- a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py +++ b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py @@ -80,6 +80,6 @@ async def test_deploy_to_environment(deploy_to_environment_payload): ) list_of_response_status_codes.append(response.status_code) - assert list_of_response_status_codes.count(200) == 3, ( - "The list does not contain 3 occurrences of 200 status code" - ) + assert ( + list_of_response_status_codes.count(200) == 3 + ), "The list does not contain 3 occurrences of 200 status code" diff --git a/api/oss/tests/legacy/sdk/apps/tests.py b/api/oss/tests/legacy/sdk/apps/tests.py index d53a001e99..3d379d5d5f 100644 --- a/api/oss/tests/legacy/sdk/apps/tests.py +++ b/api/oss/tests/legacy/sdk/apps/tests.py @@ -44,9 +44,9 @@ async def test_create_app_successfully(self, http_client, setup_class_fixture): # ASSERT assert response.app_name == app_name - assert isinstance(response.model_dump(), dict), ( - "Response data is not a dictionary." - ) + assert isinstance( + response.model_dump(), dict + ), "Response data is not a dictionary." # CLEANUP await delete_application( diff --git a/api/oss/tests/legacy/testsets/tests.py b/api/oss/tests/legacy/testsets/tests.py index 68931fd204..edf1474f15 100644 --- a/api/oss/tests/legacy/testsets/tests.py +++ b/api/oss/tests/legacy/testsets/tests.py @@ -77,9 +77,9 @@ async def test_upload_file_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # @pytest.mark.asyncio # @pytest.mark.typical @@ -148,9 +148,9 @@ async def test_get_testset_owner_access(self, http_client): response = await http_client.get(f"/testsets/{testset['id']}", headers=headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert "id" in response.json(), f"Failed for case: {description}" # Cleanup @@ -191,9 +191,9 @@ async def test_create_testset_success(self, http_client): await delete_testset(http_client, response_data["id"], headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert "id" in response_data, f"Failed for case: {description}" @pytest.mark.asyncio @@ -213,9 +213,9 @@ async def test_create_testset_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -241,9 +241,9 @@ async def test_create_testset_non_member_access(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -260,9 +260,9 @@ async def test_no_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 0, f"Failed for case: {description}" @pytest.mark.asyncio @@ -282,9 +282,9 @@ async def test_one_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup @@ -308,9 +308,9 @@ async def test_many_elements_small_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup @@ -335,9 +335,9 @@ async def test_many_elements_big_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup @@ -368,9 +368,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], owner_headers) @@ -391,9 +391,9 @@ async def test_permissions_allowed(self, http_client): response = await http_client.get("/testsets", headers=owner_headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -410,9 +410,9 @@ async def test_no_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 0, f"Failed for case: {description}" @pytest.mark.asyncio @@ -432,9 +432,9 @@ async def test_one_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup @@ -458,9 +458,9 @@ async def test_many_elements_small_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup @@ -485,9 +485,9 @@ async def test_many_elements_big_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup @@ -518,9 +518,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], owner_headers) @@ -541,9 +541,9 @@ async def test_permissions_allowed(self, http_client): response = await http_client.get("/testsets", headers=owner_headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -565,9 +565,9 @@ async def test_update_success(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert response_data["_id"] == testset["id"], f"Failed for case: {description}" # Cleanup @@ -592,9 +592,9 @@ async def test_update_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], headers) @@ -622,9 +622,9 @@ async def test_update_non_member_access(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], member_headers) @@ -650,9 +650,9 @@ async def test_delete_success(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -674,9 +674,9 @@ async def test_delete_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -698,6 +698,6 @@ async def test_delete_non_existent(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py b/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py index 0de50397f6..245b5affc1 100644 --- a/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py +++ b/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py @@ -41,9 +41,9 @@ async def test_create_secret_with_viewer_role( json=valid_secret_payload, ) - assert create_response.status_code == 403, ( - "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + create_response.status_code == 403 + ), "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role." created_secret_message = create_response.json()["detail"] assert ( @@ -84,9 +84,9 @@ async def test_create_secret_with_invalid_secret_kind(self, async_client): "secrets", json=invalid_payload, ) - assert response.status_code == 422, ( - "Should reject payload with invalid secret kind" - ) + assert ( + response.status_code == 422 + ), "Should reject payload with invalid secret kind" @pytest.mark.asyncio @pytest.mark.secret_creation @@ -104,9 +104,9 @@ async def test_create_secret_with_invalid_provider_kind(self, async_client): "secrets", json=invalid_payload, ) - assert response.status_code == 422, ( - "Should reject payload with invalid secret provider kind" - ) + assert ( + response.status_code == 422 + ), "Should reject payload with invalid secret provider kind" @pytest.mark.asyncio @pytest.mark.secret_retrieval @@ -204,9 +204,9 @@ async def test_update_secret_with_viewer_role( json=update_payload, ) - assert update_response.status_code == 403, ( - "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + update_response.status_code == 403 + ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." update_response_message = update_response.json()["detail"] assert ( @@ -233,9 +233,9 @@ async def test_delete_secret(self, async_client, valid_secret_payload): get_response = await async_client.get( f"secrets/{secret_id}", ) - assert get_response.status_code == 404, ( - "Deleted secret should not be retrievable" - ) + assert ( + get_response.status_code == 404 + ), "Deleted secret should not be retrievable" @pytest.mark.asyncio @pytest.mark.secret_deletion @@ -254,9 +254,9 @@ async def test_delete_secret_with_viewer_role( f"secrets/{secret_id}", headers={"Authorization": f"ApiKey {os.environ.get('VIEWER_API_KEY', '')}"}, ) - assert delete_response.status_code == 403, ( - "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + delete_response.status_code == 403 + ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." delete_response_message = delete_response.json()["detail"] assert ( @@ -272,6 +272,6 @@ async def test_delete_nonexistent_secret(self, async_client): response = await async_client.delete( f"secrets/{non_existent_id}", ) - assert response.status_code == 204, ( - "Should always return 204 since the endpoint is idempotent" - ) + assert ( + response.status_code == 204 + ), "Should always return 204 since the endpoint is idempotent" diff --git a/api/oss/tests/legacy/workflows/admin/tests.py b/api/oss/tests/legacy/workflows/admin/tests.py index 3b695fe6d5..efa0a31025 100644 --- a/api/oss/tests/legacy/workflows/admin/tests.py +++ b/api/oss/tests/legacy/workflows/admin/tests.py @@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/api/oss/tests/legacy/workflows/auth/tests.py b/api/oss/tests/legacy/workflows/auth/tests.py index 5dbfeb795e..32dee3c697 100644 --- a/api/oss/tests/legacy/workflows/auth/tests.py +++ b/api/oss/tests/legacy/workflows/auth/tests.py @@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/api/oss/tests/legacy/workflows/observability/tests.py b/api/oss/tests/legacy/workflows/observability/tests.py index d0bc109555..8b6bf6b47a 100644 --- a/api/oss/tests/legacy/workflows/observability/tests.py +++ b/api/oss/tests/legacy/workflows/observability/tests.py @@ -70,6 +70,6 @@ async def test_completion_generate_observability_tree( ) is_match = exact_match(workflow_nodes, observability_nodes) - assert is_match is True, ( - "Workflow nodes does not match nodes from observability" - ) + assert ( + is_match is True + ), "Workflow nodes does not match nodes from observability" diff --git a/api/oss/tests/legacy/workflows/permissions/tests.py b/api/oss/tests/legacy/workflows/permissions/tests.py index 541b8e67a5..50a98eef6e 100644 --- a/api/oss/tests/legacy/workflows/permissions/tests.py +++ b/api/oss/tests/legacy/workflows/permissions/tests.py @@ -67,9 +67,9 @@ async def test_permissions_principal_not_in_scope( response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) - assert response.json().get("detail") == "Service execution not allowed.", ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" + assert ( + response.json().get("detail") == "Service execution not allowed." + ), f"Failed for case: {description}" diff --git a/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py b/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py index 751011e23c..3106a372b8 100644 --- a/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py +++ b/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py @@ -9,7 +9,6 @@ 2. Batch size configuration issues with OpenTelemetry environment variables """ - from dotenv import load_dotenv import asyncio diff --git a/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py b/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py index 53d193eda2..d5a828df80 100644 --- a/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py +++ b/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py @@ -72,10 +72,9 @@ def forward(self, topic: str): outline = self.build_outline(topic=topic) sections = [] for heading, subheadings in outline.section_subheadings.items(): - section, subheadings = ( - f"## {heading}", - [f"### {subheading}" for subheading in subheadings], - ) + section, subheadings = f"## {heading}", [ + f"### {subheading}" for subheading in subheadings + ] section = self.draft_section( topic=outline.title, section_heading=section, diff --git a/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py b/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py index 2f4121c8e3..abf22c9dc7 100644 --- a/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py +++ b/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py @@ -221,7 +221,7 @@ def test_query_paginated_workflow_revisions( # ACT ------------------------------------------------------------------ response = authed_api( "GET", - "/preview/workflows/revisions/?include_archived=true&limit=1", + "/preview/workflows/revisions/?include_archived=true" "&limit=1", ) # ---------------------------------------------------------------------- diff --git a/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py b/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py index b294b45116..19eae82dbd 100644 --- a/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py +++ b/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py @@ -185,7 +185,7 @@ def test_query_paginated_workflow_variants( # ACT ------------------------------------------------------------------ response = authed_api( "GET", - "/preview/workflows/variants/?include_archived=true&limit=1", + "/preview/workflows/variants/?include_archived=true" "&limit=1", ) # ---------------------------------------------------------------------- diff --git a/api/oss/tests/pytest/workflows/test_workflows_queries.py b/api/oss/tests/pytest/workflows/test_workflows_queries.py index 91de434e0b..57163be02b 100644 --- a/api/oss/tests/pytest/workflows/test_workflows_queries.py +++ b/api/oss/tests/pytest/workflows/test_workflows_queries.py @@ -144,7 +144,7 @@ def test_query_paginated_workflows( # ACT ------------------------------------------------------------------ response = authed_api( "GET", - "/preview/workflows/?include_archived=true&limit=1", + "/preview/workflows/?include_archived=true" "&limit=1", ) # ---------------------------------------------------------------------- diff --git a/api/oss/tests/pytest/workflows/test_workflows_retrieve.py b/api/oss/tests/pytest/workflows/test_workflows_retrieve.py index fa6df8ea4b..8d824dc30f 100644 --- a/api/oss/tests/pytest/workflows/test_workflows_retrieve.py +++ b/api/oss/tests/pytest/workflows/test_workflows_retrieve.py @@ -201,7 +201,8 @@ def test_retrieve_by_revision_id(self, authed_api, mock_data): response = authed_api( "GET", - f"/preview/workflows/revisions/retrieve?workflow_revision_id={revision_id}", + f"/preview/workflows/revisions/retrieve" + f"?workflow_revision_id={revision_id}", ) assert response.status_code == 200 @@ -288,7 +289,8 @@ def test_retrieve_by_variant_id(self, authed_api, mock_data): response = authed_api( "GET", - f"/preview/workflows/revisions/retrieve?workflow_variant_id={variant_id}", + f"/preview/workflows/revisions/retrieve" + f"?workflow_variant_id={variant_id}", ) assert response.status_code == 200 diff --git a/api/poetry.lock b/api/poetry.lock index db46b33049..eea4a296e7 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2,15 +2,15 @@ [[package]] name = "agenta" -version = "0.61.0" +version = "0.60.1" description = "The SDK for agenta is an open-source LLMOps platform." optional = false python-versions = "<4.0,>=3.11" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "agenta-0.61.0-py3-none-any.whl", hash = "sha256:b530a174b01ea76b1a9773a6982c31a243f7927801c753c3a0ad78455b250522"}, - {file = "agenta-0.61.0.tar.gz", hash = "sha256:525699f4d34326cdff7baefbdd1439ff989e7ee15ba8faf5acad94e144b59d8a"}, + {file = "agenta-0.60.1-py3-none-any.whl", hash = "sha256:9895b04f4e700c575147428f0535bdd7c0d378d9a10d2fa70eb17bf8efe05650"}, + {file = "agenta-0.60.1.tar.gz", hash = "sha256:09f9ee2f1dd38a86d66d2b92eb9377adb5a83a876c8e5d120f1397fe45e70ccb"}, ] [package.dependencies] @@ -23,7 +23,7 @@ huggingface-hub = "<0.31.0" importlib-metadata = ">=8.0.0,<9.0" jinja2 = ">=3.1.6,<4.0.0" litellm = "1.78.7" -openai = ">=1.106.0" +openai = ">=1.106.0,<2.0.0" opentelemetry-api = ">=1.27.0,<2.0.0" opentelemetry-exporter-otlp-proto-http = ">=1.27.0,<2.0.0" opentelemetry-instrumentation = ">=0.56b0" @@ -32,7 +32,7 @@ pydantic = ">=2,<3" python-dotenv = ">=1.0.0,<2.0.0" python-jsonpath = ">=2.0.0,<3.0.0" pyyaml = ">=6.0.2,<7.0.0" -restrictedpython = {version = ">=8.0,<9.0", markers = "python_version >= \"3.11\" and python_version < \"3.14\""} +restrictedpython = {version = ">=8.0,<9.0", markers = "python_version == \"3.11\""} starlette = ">=0.47.0,<0.48.0" structlog = ">=25.2.0,<26.0.0" tiktoken = "0.11.0" @@ -1714,115 +1714,115 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jiter" -version = "0.12.0" +version = "0.11.1" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.9" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65"}, - {file = "jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2"}, - {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025"}, - {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca"}, - {file = "jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4"}, - {file = "jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11"}, - {file = "jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9"}, - {file = "jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725"}, - {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6"}, - {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e"}, - {file = "jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c"}, - {file = "jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f"}, - {file = "jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5"}, - {file = "jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37"}, - {file = "jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126"}, - {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9"}, - {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86"}, - {file = "jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44"}, - {file = "jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb"}, - {file = "jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789"}, - {file = "jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e"}, - {file = "jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9"}, - {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626"}, - {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c"}, - {file = "jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de"}, - {file = "jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a"}, - {file = "jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60"}, - {file = "jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6"}, - {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4"}, - {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb"}, - {file = "jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7"}, - {file = "jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3"}, - {file = "jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525"}, - {file = "jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a"}, - {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67"}, - {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b"}, - {file = "jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42"}, - {file = "jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf"}, - {file = "jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451"}, - {file = "jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783"}, - {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b"}, - {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6"}, - {file = "jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183"}, - {file = "jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873"}, - {file = "jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473"}, - {file = "jiter-0.12.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9d28b218d5f9e5f69a0787a196322a5056540cb378cac8ff542b4fa7219966c"}, - {file = "jiter-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0ee12028daf8cfcf880dd492349a122a64f42c059b6c62a2b0c96a83a8da820"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b135ebe757a82d67ed2821526e72d0acf87dd61f6013e20d3c45b8048af927b"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15d7fafb81af8a9e3039fc305529a61cd933eecee33b4251878a1c89859552a3"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92d1f41211d8a8fe412faad962d424d334764c01dac6691c44691c2e4d3eedaf"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a64a48d7c917b8f32f25c176df8749ecf08cec17c466114727efe7441e17f6d"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:122046f3b3710b85de99d9aa2f3f0492a8233a2f54a64902b096efc27ea747b5"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27ec39225e03c32c6b863ba879deb427882f243ae46f0d82d68b695fa5b48b40"}, - {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26b9e155ddc132225a39b1995b3b9f0fe0f79a6d5cbbeacf103271e7d309b404"}, - {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab05b7c58e29bb9e60b70c2e0094c98df79a1e42e397b9bb6eaa989b7a66dd0"}, - {file = "jiter-0.12.0-cp39-cp39-win32.whl", hash = "sha256:59f9f9df87ed499136db1c2b6c9efb902f964bed42a582ab7af413b6a293e7b0"}, - {file = "jiter-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:d3719596a1ebe7a48a498e8d5d0c4bf7553321d4c3eee1d620628d51351a3928"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c"}, - {file = "jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b"}, + {file = "jiter-0.11.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ed58841a491bbbf3f7c55a6b68fff568439ab73b2cce27ace0e169057b5851df"}, + {file = "jiter-0.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:499beb9b2d7e51d61095a8de39ebcab1d1778f2a74085f8305a969f6cee9f3e4"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87b2821795e28cc990939b68ce7a038edea680a24910bd68a79d54ff3f03c02"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:83f6fa494d8bba14ab100417c80e70d32d737e805cb85be2052d771c76fcd1f8"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fbc6aea1daa2ec6f5ed465f0c5e7b0607175062ceebbea5ca70dd5ddab58083"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:302288e2edc43174bb2db838e94688d724f9aad26c5fb9a74f7a5fb427452a6a"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85db563fe3b367bb568af5d29dea4d4066d923b8e01f3417d25ebecd958de815"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f1c1ba2b6b22f775444ef53bc2d5778396d3520abc7b2e1da8eb0c27cb3ffb10"}, + {file = "jiter-0.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:523be464b14f8fd0cc78da6964b87b5515a056427a2579f9085ce30197a1b54a"}, + {file = "jiter-0.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:25b99b3f04cd2a38fefb22e822e35eb203a2cd37d680dbbc0c0ba966918af336"}, + {file = "jiter-0.11.1-cp310-cp310-win32.whl", hash = "sha256:47a79e90545a596bb9104109777894033347b11180d4751a216afef14072dbe7"}, + {file = "jiter-0.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:cace75621ae9bd66878bf69fbd4dfc1a28ef8661e0c2d0eb72d3d6f1268eddf5"}, + {file = "jiter-0.11.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9b0088ff3c374ce8ce0168523ec8e97122ebb788f950cf7bb8e39c7dc6a876a2"}, + {file = "jiter-0.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74433962dd3c3090655e02e461267095d6c84f0741c7827de11022ef8d7ff661"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d98030e345e6546df2cc2c08309c502466c66c4747b043f1a0d415fada862b8"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d6db0b2e788db46bec2cf729a88b6dd36959af2abd9fa2312dfba5acdd96dcb"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55678fbbda261eafe7289165dd2ddd0e922df5f9a1ae46d7c79a5a15242bd7d1"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a6b74fae8e40497653b52ce6ca0f1b13457af769af6fb9c1113efc8b5b4d9be"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a55a453f8b035eb4f7852a79a065d616b7971a17f5e37a9296b4b38d3b619e4"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2638148099022e6bdb3f42904289cd2e403609356fb06eb36ddec2d50958bc29"}, + {file = "jiter-0.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:252490567a5d990986f83b95a5f1ca1bf205ebd27b3e9e93bb7c2592380e29b9"}, + {file = "jiter-0.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d431d52b0ca2436eea6195f0f48528202100c7deda354cb7aac0a302167594d5"}, + {file = "jiter-0.11.1-cp311-cp311-win32.whl", hash = "sha256:db6f41e40f8bae20c86cb574b48c4fd9f28ee1c71cb044e9ec12e78ab757ba3a"}, + {file = "jiter-0.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0cc407b8e6cdff01b06bb80f61225c8b090c3df108ebade5e0c3c10993735b19"}, + {file = "jiter-0.11.1-cp311-cp311-win_arm64.whl", hash = "sha256:fe04ea475392a91896d1936367854d346724a1045a247e5d1c196410473b8869"}, + {file = "jiter-0.11.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c92148eec91052538ce6823dfca9525f5cfc8b622d7f07e9891a280f61b8c96c"}, + {file = "jiter-0.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ecd4da91b5415f183a6be8f7158d127bdd9e6a3174138293c0d48d6ea2f2009d"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e3ac25c00b9275684d47aa42febaa90a9958e19fd1726c4ecf755fbe5e553b"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d7305c0a841858f866cd459cd9303f73883fb5e097257f3d4a3920722c69d4"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e86fa10e117dce22c547f31dd6d2a9a222707d54853d8de4e9a2279d2c97f239"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae5ef1d48aec7e01ee8420155d901bb1d192998fa811a65ebb82c043ee186711"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb68e7bf65c990531ad8715e57d50195daf7c8e6f1509e617b4e692af1108939"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43b30c8154ded5845fa454ef954ee67bfccce629b2dea7d01f795b42bc2bda54"}, + {file = "jiter-0.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:586cafbd9dd1f3ce6a22b4a085eaa6be578e47ba9b18e198d4333e598a91db2d"}, + {file = "jiter-0.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:677cc2517d437a83bb30019fd4cf7cad74b465914c56ecac3440d597ac135250"}, + {file = "jiter-0.11.1-cp312-cp312-win32.whl", hash = "sha256:fa992af648fcee2b850a3286a35f62bbbaeddbb6dbda19a00d8fbc846a947b6e"}, + {file = "jiter-0.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:88b5cae9fa51efeb3d4bd4e52bfd4c85ccc9cac44282e2a9640893a042ba4d87"}, + {file = "jiter-0.11.1-cp312-cp312-win_arm64.whl", hash = "sha256:9a6cae1ab335551917f882f2c3c1efe7617b71b4c02381e4382a8fc80a02588c"}, + {file = "jiter-0.11.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:71b6a920a5550f057d49d0e8bcc60945a8da998019e83f01adf110e226267663"}, + {file = "jiter-0.11.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b3de72e925388453a5171be83379549300db01284f04d2a6f244d1d8de36f94"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc19dd65a2bd3d9c044c5b4ebf657ca1e6003a97c0fc10f555aa4f7fb9821c00"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d58faaa936743cd1464540562f60b7ce4fd927e695e8bc31b3da5b914baa9abd"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:902640c3103625317291cb73773413b4d71847cdf9383ba65528745ff89f1d14"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30405f726e4c2ed487b176c09f8b877a957f535d60c1bf194abb8dadedb5836f"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3217f61728b0baadd2551844870f65219ac4a1285d5e1a4abddff3d51fdabe96"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1364cc90c03a8196f35f396f84029f12abe925415049204446db86598c8b72c"}, + {file = "jiter-0.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:53a54bf8e873820ab186b2dca9f6c3303f00d65ae5e7b7d6bda1b95aa472d646"}, + {file = "jiter-0.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7e29aca023627b0e0c2392d4248f6414d566ff3974fa08ff2ac8dbb96dfee92a"}, + {file = "jiter-0.11.1-cp313-cp313-win32.whl", hash = "sha256:f153e31d8bca11363751e875c0a70b3d25160ecbaee7b51e457f14498fb39d8b"}, + {file = "jiter-0.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:f773f84080b667c69c4ea0403fc67bb08b07e2b7ce1ef335dea5868451e60fed"}, + {file = "jiter-0.11.1-cp313-cp313-win_arm64.whl", hash = "sha256:635ecd45c04e4c340d2187bcb1cea204c7cc9d32c1364d251564bf42e0e39c2d"}, + {file = "jiter-0.11.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d892b184da4d94d94ddb4031296931c74ec8b325513a541ebfd6dfb9ae89904b"}, + {file = "jiter-0.11.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa22c223a3041dacb2fcd37c70dfd648b44662b4a48e242592f95bda5ab09d58"}, + {file = "jiter-0.11.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:330e8e6a11ad4980cd66a0f4a3e0e2e0f646c911ce047014f984841924729789"}, + {file = "jiter-0.11.1-cp313-cp313t-win_amd64.whl", hash = "sha256:09e2e386ebf298547ca3a3704b729471f7ec666c2906c5c26c1a915ea24741ec"}, + {file = "jiter-0.11.1-cp313-cp313t-win_arm64.whl", hash = "sha256:fe4a431c291157e11cee7c34627990ea75e8d153894365a3bc84b7a959d23ca8"}, + {file = "jiter-0.11.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:0fa1f70da7a8a9713ff8e5f75ec3f90c0c870be6d526aa95e7c906f6a1c8c676"}, + {file = "jiter-0.11.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:569ee559e5046a42feb6828c55307cf20fe43308e3ae0d8e9e4f8d8634d99944"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f69955fa1d92e81987f092b233f0be49d4c937da107b7f7dcf56306f1d3fcce9"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:090f4c9d4a825e0fcbd0a2647c9a88a0f366b75654d982d95a9590745ff0c48d"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbf3d8cedf9e9d825233e0dcac28ff15c47b7c5512fdfe2e25fd5bbb6e6b0cee"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa9b1958f9c30d3d1a558b75f0626733c60eb9b7774a86b34d88060be1e67fe"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42d1ca16590b768c5e7d723055acd2633908baacb3628dd430842e2e035aa90"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5db4c2486a023820b701a17aec9c5a6173c5ba4393f26662f032f2de9c848b0f"}, + {file = "jiter-0.11.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:4573b78777ccfac954859a6eff45cbd9d281d80c8af049d0f1a3d9fc323d5c3a"}, + {file = "jiter-0.11.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:7593ac6f40831d7961cb67633c39b9fef6689a211d7919e958f45710504f52d3"}, + {file = "jiter-0.11.1-cp314-cp314-win32.whl", hash = "sha256:87202ec6ff9626ff5f9351507def98fcf0df60e9a146308e8ab221432228f4ea"}, + {file = "jiter-0.11.1-cp314-cp314-win_amd64.whl", hash = "sha256:a5dd268f6531a182c89d0dd9a3f8848e86e92dfff4201b77a18e6b98aa59798c"}, + {file = "jiter-0.11.1-cp314-cp314-win_arm64.whl", hash = "sha256:5d761f863f912a44748a21b5c4979c04252588ded8d1d2760976d2e42cd8d991"}, + {file = "jiter-0.11.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2cc5a3965285ddc33e0cab933e96b640bc9ba5940cea27ebbbf6695e72d6511c"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b572b3636a784c2768b2342f36a23078c8d3aa6d8a30745398b1bab58a6f1a8"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad93e3d67a981f96596d65d2298fe8d1aa649deb5374a2fb6a434410ee11915e"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a83097ce379e202dcc3fe3fc71a16d523d1ee9192c8e4e854158f96b3efe3f2f"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7042c51e7fbeca65631eb0c332f90c0c082eab04334e7ccc28a8588e8e2804d9"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a68d679c0e47649a61df591660507608adc2652442de7ec8276538ac46abe08"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1b0da75dbf4b6ec0b3c9e604d1ee8beaf15bc046fff7180f7d89e3cdbd3bb51"}, + {file = "jiter-0.11.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:69dd514bf0fa31c62147d6002e5ca2b3e7ef5894f5ac6f0a19752385f4e89437"}, + {file = "jiter-0.11.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:bb31ac0b339efa24c0ca606febd8b77ef11c58d09af1b5f2be4c99e907b11111"}, + {file = "jiter-0.11.1-cp314-cp314t-win32.whl", hash = "sha256:b2ce0d6156a1d3ad41da3eec63b17e03e296b78b0e0da660876fccfada86d2f7"}, + {file = "jiter-0.11.1-cp314-cp314t-win_amd64.whl", hash = "sha256:f4db07d127b54c4a2d43b4cf05ff0193e4f73e0dd90c74037e16df0b29f666e1"}, + {file = "jiter-0.11.1-cp314-cp314t-win_arm64.whl", hash = "sha256:28e4fdf2d7ebfc935523e50d1efa3970043cfaa161674fe66f9642409d001dfe"}, + {file = "jiter-0.11.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:baa99c8db49467527658bb479857344daf0a14dff909b7f6714579ac439d1253"}, + {file = "jiter-0.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:860fe55fa3b01ad0edf2adde1098247ff5c303d0121f9ce028c03d4f88c69502"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:173dd349d99b6feaf5a25a6fbcaf3489a6f947708d808240587a23df711c67db"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:14ac1dca837514cc946a6ac2c4995d9695303ecc754af70a3163d057d1a444ab"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69af47de5f93a231d5b85f7372d3284a5be8edb4cc758f006ec5a1406965ac5e"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:685f8b3abd3bbd3e06e4dfe2429ff87fd5d7a782701151af99b1fcbd80e31b2b"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d04afa2d4e5526e54ae8a58feea953b1844bf6e3526bc589f9de68e86d0ea01"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1e92b927259035b50d8e11a8fdfe0ebd014d883e4552d37881643fa289a4bcf1"}, + {file = "jiter-0.11.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e7bd8be4fad8d4c5558b7801770cd2da6c072919c6f247cc5336edb143f25304"}, + {file = "jiter-0.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:121381a77a3c85987f3eba0d30ceaca9116f7463bedeec2fa79b2e7286b89b60"}, + {file = "jiter-0.11.1-cp39-cp39-win32.whl", hash = "sha256:160225407f6dfabdf9be1b44e22f06bc293a78a28ffa4347054698bd712dad06"}, + {file = "jiter-0.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:028e0d59bcdfa1079f8df886cdaefc6f515c27a5288dec956999260c7e4a7cfd"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:e642b5270e61dd02265866398707f90e365b5db2eb65a4f30c789d826682e1f6"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:464ba6d000585e4e2fd1e891f31f1231f497273414f5019e27c00a4b8f7a24ad"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:055568693ab35e0bf3a171b03bb40b2dcb10352359e0ab9b5ed0da2bf1eb6f6f"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c69ea798d08a915ba4478113efa9e694971e410056392f4526d796f136d3fa"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:0d4d6993edc83cf75e8c6828a8d6ce40a09ee87e38c7bfba6924f39e1337e21d"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f78d151c83a87a6cf5461d5ee55bc730dd9ae227377ac6f115b922989b95f838"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9022974781155cd5521d5cb10997a03ee5e31e8454c9d999dcdccd253f2353f"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18c77aaa9117510d5bdc6a946baf21b1f0cfa58ef04d31c8d016f206f2118960"}, + {file = "jiter-0.11.1.tar.gz", hash = "sha256:849dcfc76481c0ea0099391235b7ca97d7279e0fa4c86005457ac7c88e8b76dc"}, ] [[package]] @@ -2454,29 +2454,29 @@ files = [ [[package]] name = "openai" -version = "2.7.1" +version = "1.109.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "openai-2.7.1-py3-none-any.whl", hash = "sha256:2f2530354d94c59c614645a4662b9dab0a5b881c5cd767a8587398feac0c9021"}, - {file = "openai-2.7.1.tar.gz", hash = "sha256:df4d4a3622b2df3475ead8eb0fbb3c27fd1c070fa2e55d778ca4f40e0186c726"}, + {file = "openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315"}, + {file = "openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" -jiter = ">=0.10.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" typing-extensions = ">=4.11,<5" [package.extras] -aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"] +aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] realtime = ["websockets (>=13,<16)"] voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] @@ -4970,4 +4970,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "02b458d29498290917a02277daf00235fe7c5eeb349c80697aa740c0806f665d" +content-hash = "9c94913f5a5a17981426d96d1ad0d6120ca2a0e09bc44e83c30e56b514910130" diff --git a/api/pyproject.toml b/api/pyproject.toml index e8c38e1e19..29129d2b7a 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "api" -version = "0.62.1" +version = "0.61.0" description = "Agenta API" authors = [ { name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" }, @@ -57,7 +57,7 @@ python-jsonpath = "^2.0.0" h11 = "^0.16.0" ecdsa = "^0.19.1" bson = "^0.5.10" -agenta = ">=0.61.0" +agenta = "^0.60.1" tiktoken = "0.11.0" google-auth = ">=2.23,<3" diff --git a/docs/blog/entries/annotate-your-llm-response-preview.mdx b/docs/blog/entries/annotate-your-llm-response-preview.mdx index ac2443ce74..0af9760569 100644 --- a/docs/blog/entries/annotate-your-llm-response-preview.mdx +++ b/docs/blog/entries/annotate-your-llm-response-preview.mdx @@ -20,7 +20,7 @@ This is useful to: - Run custom evaluation workflows - Measure application performance in real-time -Check out the how to [annotate traces from API](/observability/trace-with-python-sdk/annotate-traces) for more details. Or try our new tutorial (available as [jupyter notebook](https://github.com/Agenta-AI/agenta/blob/main/examples/jupyter/capture_user_feedback.ipynb)) [here](/tutorials/cookbooks/capture-user-feedback). +Check out the how to [annotate traces from API](/observability/trace-with-python-sdk/annotate-traces) for more details. Or try our new tutorial (available as [jupyter notebook](https://github.com/Agenta-AI/agenta/blob/main/examples/jupyter/observability/capture_user_feedback.ipynb)) [here](/tutorials/cookbooks/capture-user-feedback). - Custom output schemas in LLM-as-a-Judge - Example 1 - Custom output schemas in LLM-as-a-Judge - Example 2 - - -## What's New - -### **Flexible Output Types** -Configure the evaluator to return different types of outputs: -- **Binary**: Return a simple yes/no or pass/fail score -- **Multiclass**: Choose from multiple predefined categories -- **Custom JSON**: Define any structure that fits your use case - -### **Include Reasoning for Better Quality** -Enable the reasoning option to have the LLM explain its evaluation. This improves prediction quality because the model thinks through its assessment before providing a score. - -When you include reasoning, the evaluator returns both the score and a detailed explanation of how it arrived at that judgment. - -### **Advanced: Raw JSON Schema** -For complete control, provide a raw JSON schema. The evaluator will return responses that match your exact structure. - -This lets you capture multiple scores, categorical labels, confidence levels, and custom fields in a single evaluation pass. You can structure the output however your workflow requires. - -### **Use Custom Schemas in Evaluation** -Once configured, your custom schemas work seamlessly in the evaluation workflow. The results display in the evaluation dashboard with all your custom fields visible. - -This makes it easy to analyze multiple dimensions of quality in a single evaluation run. - -## Example Use Cases - -**Binary Score with Reasoning:** -Return a simple correct/incorrect judgment along with an explanation of why the output succeeded or failed. - -**Multi-dimensional Feedback:** -Capture separate scores for accuracy, relevance, completeness, and tone in one evaluation. Include reasoning for each dimension. - -**Structured Classification:** -Return categorical labels (excellent/good/fair/poor) along with specific issues found and suggestions for improvement. - -## Getting Started - -To use custom output schemas with LLM-as-a-Judge: - -1. Open the evaluator configuration -2. Select your desired output type (binary, multiclass, or custom) -3. Enable reasoning if you want explanations -4. For advanced use, provide your JSON schema -5. Run your evaluation - -Learn more in the [LLM-as-a-Judge documentation](/evaluation/configure-evaluators/llm-as-a-judge). diff --git a/docs/blog/main.mdx b/docs/blog/main.mdx index 66a0256cb0..a0a6914f4d 100644 --- a/docs/blog/main.mdx +++ b/docs/blog/main.mdx @@ -10,33 +10,6 @@ import Image from "@theme/IdealImage";
-### [Customize LLM-as-a-Judge Output Schemas](/changelog/customize-llm-as-a-judge-output-schemas) - -_10 November 2025_ - -**v0.62.0** - -
- Custom output schemas in LLM-as-a-Judge - Example 1 - Custom output schemas in LLM-as-a-Judge - Example 2 -
- -The LLM-as-a-Judge evaluator now supports custom output schemas. Create multiple feedback outputs per evaluator with any structure you need. - -You can configure output types (binary, multiclass), include reasoning to improve prediction quality, or provide a raw JSON schema with any structure you define. Use these custom schemas in your evaluations to capture exactly the feedback you need. - -Learn more in the [LLM-as-a-Judge documentation](/evaluation/configure-evaluators/llm-as-a-judge). - ---- - ### [Documentation Overhaul](/changelog/documentation-architecture-overhaul) _3 November 2025_ @@ -297,7 +270,7 @@ This is useful to: - Run custom evaluation workflows - Measure application performance in real-time -Check out the how to [annotate traces from API](/observability/trace-with-python-sdk/annotate-traces) for more details. Or try our new tutorial (available as [jupyter notebook](https://github.com/Agenta-AI/agenta/blob/main/examples/jupyter/capture_user_feedback.ipynb)) [here](/tutorials/cookbooks/capture-user-feedback). +Check out the how to [annotate traces from API](/observability/trace-with-python-sdk/annotate-traces) for more details. Or try our new tutorial (available as [jupyter notebook](https://github.com/Agenta-AI/agenta/blob/main/examples/jupyter/observability/capture_user_feedback.ipynb)) [here](/tutorials/cookbooks/capture-user-feedback). Basic output schema configuration - - -#### Advanced Configuration - -For complete control, you can provide a custom JSON schema. This lets you define any output structure you need. For example, you could return multiple scores, confidence levels, detailed feedback categories, or any combination of fields. - - -Advanced output schema configuration +The model can be configured to select one of the supported options (`gpt-3.5-turbo`, `gpt-4o`, `gpt-5`, `gpt-5-mini`, `gpt-5-nano`, `claude-3-5-sonnet`, `claude-3-5-haiku`, `claude-3-5-opus`). To use LLM-as-a-Judge, you'll need to set your OpenAI or Anthropic API key in the settings. The key is saved locally and only sent to our servers for evaluation—it's not stored there. diff --git a/docs/docs/evaluation/evaluation-from-sdk/01-quick-start.mdx b/docs/docs/evaluation/evaluation-from-sdk/01-quick-start.mdx new file mode 100644 index 0000000000..61189a4e88 --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/01-quick-start.mdx @@ -0,0 +1,299 @@ +--- +title: "Quick Start" +sidebar_label: "Quick Start" +description: "Learn how to run evaluations programmatically with the Agenta SDK in under 5 minutes" +sidebar_position: 1 +--- + +import GoogleColabButton from "@site/src/components/GoogleColabButton"; + + + Open in Google Colaboratory + + +# Quick Start Guide + +This guide shows you how to create your first evaluation using the Agenta SDK. You'll build a simple application that answers geography questions, then create evaluators to check if the answers are correct. + +## What You'll Build + +By the end of this guide, you'll have: +- An application that returns country capitals +- Two evaluators that check if answers are correct +- A complete evaluation run with results + +The entire example takes less than 100 lines of code. + +## Prerequisites + +Install the Agenta SDK: + +```bash +pip install agenta +``` + +Set your environment variables: + +```bash +export AGENTA_API_KEY="your-api-key" +export AGENTA_HOST="https://cloud.agenta.ai" +``` + +## Step 1: Initialize Agenta + +Create a new Python file and initialize the SDK: + +```python +import agenta as ag + +ag.init() +``` + +## Step 2: Create Your Application + +An application is any function that processes inputs and returns outputs. Use the `@ag.application` decorator to mark your function: + +```python +@ag.application( + slug="capital_finder", + name="Capital Finder", + description="Returns the capital of a given country" +) +async def capital_finder(country: str): + """ + Your application logic goes here. + For this example, we'll use a simple dictionary lookup. + """ + capitals = { + "Germany": "Berlin", + "France": "Paris", + "Spain": "Madrid", + "Italy": "Rome", + } + return capitals.get(country, "Unknown") +``` + +The function receives parameters from your test data. In this case, it gets `country` from the testcase and returns the capital city. + +## Step 3: Create an Evaluator + +An evaluator checks if your application's output is correct. Use the `@ag.evaluator` decorator: + +```python +@ag.evaluator( + slug="exact_match", + name="Exact Match Evaluator", + description="Checks if the output exactly matches the expected answer" +) +async def exact_match(capital: str, outputs: str): + """ + Compare the application's output to the expected answer. + + Args: + capital: The expected answer from the testcase + outputs: What your application returned + + Returns: + A dictionary with score and success flag + """ + is_correct = outputs == capital + return { + "score": 1.0 if is_correct else 0.0, + "success": is_correct, + } +``` + +The evaluator receives two types of inputs: +- Fields from your testcase (like `capital`) +- The application's output (always called `outputs`) + +## Step 4: Create Test Data + +Define your test cases as a list of dictionaries: + +```python +test_data = [ + {"country": "Germany", "capital": "Berlin"}, + {"country": "France", "capital": "Paris"}, + {"country": "Spain", "capital": "Madrid"}, + {"country": "Italy", "capital": "Rome"}, +] +``` + +Each dictionary represents one test case. The keys become parameters that your application and evaluators can access. + +## Step 5: Run the Evaluation + +Import the evaluation functions and run your test: + +```python +import asyncio +from agenta.sdk.evaluations import aevaluate + +async def run_evaluation(): + # Create a testset from your data + testset = await ag.testsets.acreate( + name="Country Capitals", + description="Test cases for capital city questions", + data=test_data, + ) + + # Run evaluation + result = await aevaluate( + testsets=[testset.id], + applications=[capital_finder], + evaluators=[exact_match], + ) + + return result + +# Run the evaluation +if __name__ == "__main__": + eval_result = asyncio.run(run_evaluation()) + print(f"Evaluation complete!") +``` + +## Complete Example + +Here's the full code in one place: + +```python +import asyncio +import agenta as ag +from agenta.sdk.evaluations import aevaluate + +# Initialize SDK +ag.init() + +# Define test data +test_data = [ + {"country": "Germany", "capital": "Berlin"}, + {"country": "France", "capital": "Paris"}, + {"country": "Spain", "capital": "Madrid"}, + {"country": "Italy", "capital": "Rome"}, +] + +# Create application +@ag.application( + slug="capital_finder", + name="Capital Finder", +) +async def capital_finder(country: str): + capitals = { + "Germany": "Berlin", + "France": "Paris", + "Spain": "Madrid", + "Italy": "Rome", + } + return capitals.get(country, "Unknown") + +# Create evaluator +@ag.evaluator( + slug="exact_match", + name="Exact Match", +) +async def exact_match(capital: str, outputs: str): + is_correct = outputs == capital + return { + "score": 1.0 if is_correct else 0.0, + "success": is_correct, + } + +# Run evaluation +async def main(): + testset = await ag.testsets.acreate( + name="Country Capitals", + data=test_data, + ) + + result = await aevaluate( + testsets=[testset.id], + applications=[capital_finder], + evaluators=[exact_match], + ) + + print(f"Evaluation complete!") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Understanding the Data Flow + +When you run an evaluation, here's what happens: + +1. **Testcase data** flows to the application + - Input: `{"country": "Germany", "capital": "Berlin"}` + - Application receives: `country="Germany"` + - Application returns: `"Berlin"` + +2. **Both testcase data and application output** flow to the evaluator + - Evaluator receives: `capital="Berlin"` (expected answer from testcase) + - Evaluator receives: `outputs="Berlin"` (what the application returned) + - Evaluator compares them and returns: `{"score": 1.0, "success": True}` + +3. **Results are collected** and stored in Agenta + - You can view them in the web interface + - Or access them programmatically from the result object + +## Next Steps + +Now that you've created your first evaluation, you can: + +- Learn how to [configure custom evaluators](/evaluation/evaluation-from-sdk/configuring-evaluators) with different scoring logic +- Explore [built-in evaluators](/evaluation/evaluation-from-sdk/configuring-evaluators#built-in-evaluators) like LLM-as-a-judge +- Understand how to [configure your application](/evaluation/evaluation-from-sdk/configuring-applications) for different use cases +- Run [multiple evaluators](/evaluation/evaluation-from-sdk/running-evaluations) in a single evaluation + +## Common Patterns + +### Using Multiple Evaluators + +You can run several evaluators on the same application: + +```python +result = await aevaluate( + testsets=[testset.id], + applications=[capital_finder], + evaluators=[ + exact_match, + case_insensitive_match, + similarity_check, + ], +) +``` + +Each evaluator runs independently and produces its own scores. + +### Accessing Additional Test Data + +Your evaluators can access any field from the testcase: + +```python +@ag.evaluator(slug="region_aware") +async def region_aware(country: str, region: str, outputs: str): + # You can access multiple fields from the testcase + # and use them in your evaluation logic + pass +``` + +### Returning Multiple Metrics + +Evaluators can return multiple scores: + +```python +@ag.evaluator(slug="detailed_eval") +async def detailed_eval(expected: str, outputs: str): + return { + "exact_match": 1.0 if outputs == expected else 0.0, + "length_diff": abs(len(outputs) - len(expected)), + "success": outputs == expected, + } +``` + +## Getting Help + +If you run into issues: +- Check the [troubleshooting guide](/evaluation/troubleshooting) +- Join our [Discord community](https://discord.gg/agenta) +- Open an issue on [GitHub](https://github.com/agenta-ai/agenta) diff --git a/docs/docs/evaluation/evaluation-from-sdk/02-configuring-evaluators.mdx b/docs/docs/evaluation/evaluation-from-sdk/02-configuring-evaluators.mdx new file mode 100644 index 0000000000..91b19f7b63 --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/02-configuring-evaluators.mdx @@ -0,0 +1,463 @@ +--- +title: "Configuring Evaluators" +sidebar_label: "Configuring Evaluators" +description: "Learn how to create custom evaluators and use built-in evaluators to check your application's output" +sidebar_position: 2 +--- + +# Configuring Evaluators + +Evaluators are functions that check if your application's output is correct. You can write your own custom evaluators or use Agenta's built-in evaluators. + +## Custom Evaluators + +Custom evaluators are Python functions decorated with `@ag.evaluator`. They receive inputs from your test data and the application's output, then return a dictionary with scores. + +### Basic Structure + +```python +import agenta as ag + +@ag.evaluator( + slug="my_evaluator", + name="My Evaluator", + description="Checks if the output meets my criteria" +) +async def my_evaluator(expected: str, outputs: str): + is_correct = outputs == expected + return { + "score": 1.0 if is_correct else 0.0, + "success": is_correct, + } +``` + +The evaluator decorator takes these parameters: + +- **slug** (required): A unique identifier for your evaluator +- **name** (optional): A human-readable name shown in the UI +- **description** (optional): Explains what the evaluator checks + +### Understanding Evaluator Inputs + +Evaluators receive two types of inputs: + +1. **Test case fields**: Any field from your test data +2. **Application output**: Always called `outputs` + +When you run an evaluation, Agenta passes both the test case data and what your application returned to the evaluator. + +**Example:** + +```python +# Your test case +test_case = { + "question": "What is 2+2?", + "correct_answer": "4", + "difficulty": "easy" +} + +# Your evaluator can access any of these fields +@ag.evaluator(slug="math_checker") +async def math_checker( + correct_answer: str, # From test case + difficulty: str, # From test case + outputs: str # What the application returned +): + # Check if the application's output matches the correct answer + is_correct = outputs == correct_answer + + # You can use other fields in your logic + if difficulty == "easy": + return {"score": 1.0 if is_correct else 0.0} + else: + # More lenient scoring for hard questions + return {"score": 0.5 if is_correct else 0.0} +``` + +### Return Values + +Evaluators must return a dictionary. You can include any metrics you want, but these fields have special meaning: + +- **score**: A numeric value (typically 0.0 to 1.0) indicating quality +- **success**: A boolean flag indicating pass/fail + +```python +@ag.evaluator(slug="detailed_checker") +async def detailed_checker(expected: str, outputs: str): + return { + "score": 0.85, # Overall score + "success": True, # Did it pass? + "length_match": len(outputs) == len(expected), + "exact_match": outputs == expected, + "custom_metric": 42, + } +``` + +All values in the result dictionary are stored and displayed in the evaluation results. + +### Practical Examples + +#### Case-Insensitive Match + +```python +@ag.evaluator( + slug="case_insensitive_match", + name="Case Insensitive Match" +) +async def case_insensitive_match(expected: str, outputs: str): + match = outputs.lower() == expected.lower() + return { + "score": 1.0 if match else 0.0, + "success": match, + } +``` + +#### Length Check + +```python +@ag.evaluator( + slug="length_validator", + name="Length Validator" +) +async def length_validator(outputs: str): + """Check if output is within acceptable length.""" + length = len(outputs) + is_valid = 10 <= length <= 500 + + return { + "success": is_valid, + "length": length, + "score": 1.0 if is_valid else 0.0, + } +``` + +#### Contains Keywords + +```python +@ag.evaluator( + slug="keyword_checker", + name="Keyword Checker" +) +async def keyword_checker(keywords: list[str], outputs: str): + """Check if output contains required keywords.""" + found = [kw for kw in keywords if kw.lower() in outputs.lower()] + score = len(found) / len(keywords) if keywords else 0.0 + + return { + "score": score, + "success": score >= 0.8, + "found_keywords": found, + "missing_keywords": [kw for kw in keywords if kw not in found], + } +``` + +## Built-in Evaluators + +Agenta provides pre-built evaluators for common evaluation tasks. You import them from `agenta.sdk.workflows.builtin` and pass them directly to the `aevaluate()` function. + +### LLM-as-a-Judge + +The LLM-as-a-judge evaluator uses a language model to evaluate your application's output. This is useful when you need nuanced judgments that simple string matching cannot provide. + +```python +from agenta.sdk.workflows import builtin +from agenta.sdk.evaluations import aevaluate + +llm_evaluator = builtin.auto_ai_critique( + slug="quality_evaluator", + name="Quality Evaluator", + description="Uses an LLM to judge response quality", + correct_answer_key="expected_answer", + model="gpt-3.5-turbo", # or "gpt-4", "claude-3-sonnet", etc. + prompt_template=[ + { + "role": "system", + "content": "You are an expert evaluator of AI responses.", + }, + { + "role": "user", + "content": ( + "Expected answer: {{expected_answer}}\n" + "Actual answer: {{outputs}}\n\n" + "Rate the quality of the actual answer from 0.0 to 1.0.\n" + "Respond with ONLY a number, nothing else." + ), + }, + ], +) + +# Use it in evaluation +result = await aevaluate( + testsets=[testset.id], + applications=[my_app], + evaluators=[llm_evaluator], +) +``` + +**Parameters:** + +- **slug** (required): Unique identifier for the evaluator +- **prompt_template** (required): List of message dictionaries with `role` and `content` + - Use `{{field_name}}` placeholders that will be replaced with test case values + - `{{outputs}}` is always available for the application's output +- **correct_answer_key** (optional): Field name in test case containing the expected answer +- **model** (optional): Which LLM to use (default: "gpt-3.5-turbo") +- **name** (optional): Display name +- **description** (optional): Description of what this evaluator checks + +The prompt template uses curly brace syntax `{{variable}}` for placeholders. All fields from your test case are available, plus `{{outputs}}`. + +### String Matching Evaluators + +#### Exact Match + +Checks if the output exactly matches the expected answer. + +```python +from agenta.sdk.workflows import builtin +from agenta.sdk.evaluations import aevaluate + +exact_match = builtin.auto_exact_match( + correct_answer_key="expected" +) + +# Use in evaluation +result = await aevaluate( + testsets=[testset.id], + applications=[my_app], + evaluators=[exact_match], +) +``` + +**Parameters:** +- **correct_answer_key** (optional): Field name in test case with expected value (default: "correct_answer") + +**Returns:** +- `success`: True if output exactly matches expected value + +#### Starts With + +Checks if the output starts with a specific prefix. + +```python +prefix_check = builtin.auto_starts_with( + prefix="Answer:", + case_sensitive=True +) +``` + +**Parameters:** +- **prefix** (required): The string the output should start with +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if output starts with the prefix + +#### Ends With + +Checks if the output ends with a specific suffix. + +```python +suffix_check = builtin.auto_ends_with( + suffix="Thank you!", + case_sensitive=False +) +``` + +**Parameters:** +- **suffix** (required): The string the output should end with +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if output ends with the suffix + +#### Contains + +Checks if the output contains a substring. + +```python +contains_check = builtin.auto_contains( + substring="important keyword", + case_sensitive=False +) +``` + +**Parameters:** +- **substring** (required): The string to search for +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if output contains the substring + +#### Contains Any + +Checks if the output contains at least one of several substrings. + +```python +any_check = builtin.auto_contains_any( + substrings=["hello", "hi", "greetings"], + case_sensitive=False +) +``` + +**Parameters:** +- **substrings** (required): List of strings to search for +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if output contains at least one substring + +#### Contains All + +Checks if the output contains all of several substrings. + +```python +all_check = builtin.auto_contains_all( + substrings=["name", "age", "email"], + case_sensitive=False +) +``` + +**Parameters:** +- **substrings** (required): List of strings that must all be present +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if output contains all substrings + +### Regex Evaluator + +Checks if the output matches a regular expression pattern. + +```python +regex_check = builtin.auto_regex_test( + regex_pattern=r"\d{3}-\d{3}-\d{4}", # Phone number pattern + regex_should_match=True, + case_sensitive=False +) +``` + +**Parameters:** +- **regex_pattern** (required): Regular expression pattern to test +- **regex_should_match** (optional): Whether pattern should match (default: True) +- **case_sensitive** (optional): Whether to match case (default: True) + +**Returns:** +- `success`: True if pattern match result equals `regex_should_match` + +### JSON Evaluators + +#### Contains JSON + +Checks if the output contains valid JSON. + +```python +json_check = builtin.auto_contains_json() +``` + +**Returns:** +- `success`: True if output contains parseable JSON + +#### JSON Field Match + +Checks if a specific field in JSON output matches the expected value. + +```python +field_check = builtin.field_match_test( + json_field="status", + correct_answer_key="expected_status" +) +``` + +**Parameters:** +- **json_field** (required): Name of field to extract from JSON output +- **correct_answer_key** (optional): Test case field with expected value (default: "correct_answer") + +**Returns:** +- `success`: True if extracted field matches expected value + +#### JSON Diff + +Compares JSON structures and calculates similarity score. + +```python +json_diff = builtin.auto_json_diff( + correct_answer_key="expected_json", + threshold=0.8, + compare_schema_only=False +) +``` + +**Parameters:** +- **correct_answer_key** (optional): Test case field with expected JSON (default: "correct_answer") +- **threshold** (optional): Minimum similarity score to pass (default: 0.5) +- **predict_keys** (optional): Whether to predict which keys to compare (default: False) +- **case_insensitive_keys** (optional): Whether to ignore case in key names (default: False) +- **compare_schema_only** (optional): Only compare structure, not values (default: False) + +**Returns:** +- `score`: Similarity score from 0.0 to 1.0 +- `success`: True if score meets threshold + +### Similarity Evaluators + +#### Levenshtein Distance + +Calculates edit distance between output and expected value. + +```python +levenshtein = builtin.auto_levenshtein_distance( + correct_answer_key="expected", + threshold=0.8, + case_sensitive=False +) +``` + +**Parameters:** +- **correct_answer_key** (optional): Test case field with expected value (default: "correct_answer") +- **case_sensitive** (optional): Whether to match case (default: True) +- **threshold** (optional): Minimum similarity to pass (default: 0.5) + +**Returns:** +- `score`: Normalized similarity score from 0.0 to 1.0 +- `success`: True if score meets threshold + +#### Similarity Match + +Uses Python's SequenceMatcher to calculate similarity. + +```python +similarity = builtin.auto_similarity_match( + correct_answer_key="expected", + threshold=0.75 +) +``` + +**Parameters:** +- **correct_answer_key** (optional): Test case field with expected value (default: "correct_answer") +- **threshold** (optional): Minimum similarity to pass (default: 0.5) + +**Returns:** +- `score`: Similarity ratio from 0.0 to 1.0 +- `success`: True if score meets threshold + +#### Semantic Similarity + +Uses embeddings to measure semantic similarity. + +```python +semantic = builtin.auto_semantic_similarity( + correct_answer_key="expected", + embedding_model="text-embedding-3-small", + threshold=0.8 +) +``` + +**Parameters:** +- **correct_answer_key** (optional): Test case field with expected value (default: "correct_answer") +- **embedding_model** (optional): OpenAI embedding model (default: "text-embedding-3-small") +- **threshold** (optional): Minimum similarity to pass (default: 0.5) + +**Returns:** +- `score`: Cosine similarity from 0.0 to 1.0 +- `success`: True if score meets threshold diff --git a/docs/docs/evaluation/evaluation-from-sdk/03-configuring-applications.mdx b/docs/docs/evaluation/evaluation-from-sdk/03-configuring-applications.mdx new file mode 100644 index 0000000000..92cb6cd32b --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/03-configuring-applications.mdx @@ -0,0 +1,166 @@ +--- +title: "Configuring Applications" +sidebar_label: "Configuring Applications" +description: "Learn how to define and configure applications for evaluation with the Agenta SDK" +sidebar_position: 3 +--- + +# Configuring Applications + +Applications are the functions you want to evaluate. They receive inputs from your test data and return outputs that evaluators will check. + +## Basic Application Structure + +An application is any Python function decorated with `@ag.application`. The decorator tells Agenta this function should be evaluated. + +```python +import agenta as ag + +ag.init() + +@ag.application( + slug="my_app", + name="My Application", + description="Describes what this application does" +) +async def my_app(input_text: str): + # Your application logic here + result = process(input_text) + return result +``` + +The application decorator takes these parameters: + +- **slug** (required): A unique identifier for your application +- **name** (optional): A human-readable name shown in the UI +- **description** (optional): Explains what the application does + +## Understanding Application Inputs + +Applications receive inputs from your test cases. The function parameters must match field names in your test data. + +**Example:** + +```python +# Your test case +test_case = { + "country": "France", + "language": "French", + "capital": "Paris" +} + +# Your application receives these as parameters +@ag.application(slug="country_info") +async def country_info(country: str, language: str): + # country = "France" + # language = "French" + # Note: capital is not used by this application + return f"The capital of {country} is well known!" +``` + +You only need to declare parameters for the fields your application uses. Extra fields in the test case are ignored. +## Application Return Values + +Applications should return the output you want evaluators to check. The return value can be: + +- **String**: Text responses +- **Dictionary**: Structured data +- **List**: Multiple items +- **Number**: Numeric results +- **Any JSON-serializable value** + +### String Returns + +Most common for text-based applications: + +```python +@ag.application(slug="question_answerer") +async def question_answerer(question: str) -> str: + answer = generate_answer(question) + return answer # Simple string +``` + +### Dictionary Returns + +Useful for structured outputs: + +```python +@ag.application(slug="entity_extractor") +async def entity_extractor(text: str) -> dict: + return { + "entities": ["Paris", "France"], + "count": 2, + "confidence": 0.95 + } +``` + +### List Returns + +For multiple items: + +```python +@ag.application(slug="keyword_extractor") +async def keyword_extractor(text: str) -> list: + keywords = extract_keywords(text) + return keywords # ["keyword1", "keyword2", ...] +``` + +## Application Examples + +### Simple Lookup Application + +```python +@ag.application( + slug="capital_lookup", + name="Capital City Lookup", + description="Returns the capital city for a given country" +) +async def capital_lookup(country: str) -> str: + """Look up a country's capital city.""" + capitals = { + "France": "Paris", + "Germany": "Berlin", + "Spain": "Madrid", + "Italy": "Rome", + } + return capitals.get(country, "Unknown") +``` + +### LLM-Based Application + +```python +import openai + +@ag.application( + slug="question_answerer", + name="Question Answering System", + description="Answers questions using GPT-4" +) +async def question_answerer(question: str, context: str) -> str: + """Answer questions based on provided context.""" + client = openai.AsyncOpenAI() + + response = await client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "Answer based on the context provided."}, + {"role": "user", "content": f"Context: {context}\n\nQuestion: {question}"} + ] + ) + + return response.choices[0].message.content +``` + +## Synchronous vs Asynchronous + +Applications can be either synchronous or asynchronous: + +### Asynchronous (Recommended) + + + +### Synchronous + + +## Working with Application Parameters + diff --git a/docs/docs/evaluation/evaluation-from-sdk/03-managing-testsets.mdx b/docs/docs/evaluation/evaluation-from-sdk/03-managing-testsets.mdx new file mode 100644 index 0000000000..fa51db0054 --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/03-managing-testsets.mdx @@ -0,0 +1,282 @@ +--- +title: "Managing Testsets" +sidebar_label: "Managing Testsets" +description: "Learn how to create, list, and retrieve testsets using the Agenta SDK" +sidebar_position: 3 +--- + +import GoogleColabButton from "@site/src/components/GoogleColabButton"; +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +This guide covers how to create, list, and retrieve testsets using the Agenta SDK for evaluation purposes. + + + Open in Google Colaboratory + + +## Creating a Testset + +Use `ag.testsets.acreate()` to create a new testset with data: + + + + +```python +import agenta as ag + +# Create a testset with simple data +testset = await ag.testsets.acreate( + data=[ + {"country": "Germany", "capital": "Berlin"}, + {"country": "France", "capital": "Paris"}, + {"country": "Spain", "capital": "Madrid"} + ], + name="Country Capitals", + description="A testset of countries and their capitals", +) + +print(f"Created testset with ID: {testset.id}") +print(f"Name: {testset.name}") +print(f"Slug: {testset.slug}") +``` + + + + +**Parameters:** +- `data`: A list of dictionaries containing your test data. Each dictionary represents one testcase. +- `name`: The name of your testset. +- `description` (optional): A description of what this testset contains. + +**Returns:** A `TestsetRevision` object containing: +- `id`: The UUID of the created testset +- `name`: The testset name +- `slug`: The testset slug +- `data`: The test data (with `testcases` structure) + +**Sample Output:** + +```python +{ + "id": "01963413-3d39-7650-80ce-3ad5d688da6c", + "name": "Country Capitals", + "slug": "3ad5d688da6c", + "description": "A testset of countries and their capitals", + "data": { + "testcases": [ + {"data": {"country": "Germany", "capital": "Berlin"}}, + {"data": {"country": "France", "capital": "Paris"}}, + {"data": {"country": "Spain", "capital": "Madrid"}} + ] + } +} +``` + +:::tip +The `data` parameter accepts a simple list of dictionaries. The SDK automatically converts this to the structured `TestsetRevisionData` format internally. +::: + +## Upserting a Testset + +Use `ag.testsets.aupsert()` to create a new testset or update an existing one if it already exists: + + + + +```python +import agenta as ag + +# Create or update a testset +testset = await ag.testsets.aupsert( + name="Country Capitals", + data=[ + {"country": "Germany", "capital": "Berlin"}, + {"country": "France", "capital": "Paris"}, + {"country": "Spain", "capital": "Madrid"}, + {"country": "Italy", "capital": "Rome"}, + ], +) + +print(f"Upserted testset with ID: {testset.id}") +``` + + + + +**Parameters:** +- `name` (required): The name of your testset. Used to find existing testset. +- `data` (required): A list of dictionaries containing your test data. +- `testset_id` (optional): If provided, updates the testset with this specific ID. + +**Returns:** A `TestsetRevision` object with the created or updated testset. + +:::tip When to use upsert vs create +Use `aupsert()` when you want to update an existing testset with the same name, or create it if it doesn't exist. This is useful in CI/CD pipelines where you want to keep testsets synchronized. Use `acreate()` when you explicitly want to create a new testset every time. +::: + +## Listing Testsets + +To list all testsets in your project, use `ag.testsets.alist()`: + + + + +```python +import agenta as ag + +# List all testsets +testsets = await ag.testsets.alist() + +print(f"Found {len(testsets)} testsets:") +for testset in testsets: + print(f" - {testset.name} (ID: {testset.id})") +``` + + + + +**Parameters:** None required. + +**Returns:** A list of `TestsetRevision` objects, each containing: +- `id`: The testset UUID +- `name`: The testset name +- `slug`: The testset slug +- Additional metadata fields + +**Sample Output:** + +```python +[ + { + "id": "01963413-3d39-7650-80ce-3ad5d688da6c", + "name": "Country Capitals", + "slug": "country-capitals" + }, + { + "id": "01963520-4e4a-8761-91df-4be6e799eb7d", + "name": "Math Problems", + "slug": "math-problems" + } +] +``` + +## Retrieving a Testset by ID + +To retrieve a specific testset by its ID, use `ag.testsets.aretrieve()`: + + + + +```python +import agenta as ag + +# Retrieve a specific testset (using the testset_id from creation) +testset = await ag.testsets.aretrieve(testset_id=testset_id) + +if testset: + print(f"Retrieved testset: {testset.id}") + print(f"Testcases count: {len(testset.data.testcases) if testset.data and testset.data.testcases else 0}") +else: + print("Testset not found") +``` + + + + +**Parameters:** +- `testset_id`: The UUID of the testset to retrieve + +**Returns:** A `TestsetRevision` object (or `None` if not found) containing: +- `id`: The testset revision UUID +- `testset_id`: The parent testset UUID +- `slug`: The revision slug +- `version`: The revision version number +- `data`: The `TestsetRevisionData` with all testcases + +**Sample Output:** + +```python +{ + "id": "01963413-3d39-7650-80ce-3ad5d688da6c", + "testset_id": "01963413-3d39-7650-80ce-3ad5d688da6c", + "slug": "3ad5d688da6c", + "version": "1", + "data": { + "testcases": [ + {"data": {"country": "Germany", "capital": "Berlin"}}, + {"data": {"country": "France", "capital": "Paris"}}, + {"data": {"country": "Spain", "capital": "Madrid"}} + ] + } +} +``` + +:::info +Currently using the legacy testset API. When retrieving a testset, the function returns a `TestsetRevision` object with version "1". In the future, this will support the new versioning system where each update creates a new revision. +::: + +## Retrieving a Testset by Name + +While there's no dedicated function for this, you can easily find a testset by name by filtering the results from `ag.testsets.alist()`: + + + + +```python +import agenta as ag + +async def get_testset_by_name(name: str): + """Helper function to find a testset by name.""" + testsets = await ag.testsets.alist() + + if not testsets: + return None + + for testset in testsets: + if testset.name == name: + return testset + + return None + +# Usage +testset = await get_testset_by_name("Country Capitals") + +if testset: + print(f"Found testset: {testset.name} with ID: {testset.id}") +else: + print("Testset not found") +``` + + + + +:::tip Helper Pattern +This pattern shows how you can implement your own helper functions to filter and find testsets based on custom criteria. You can extend this to search by description, tags, or other metadata fields. +::: + +## Working with Test Data + +Once you have a testset, you can access the testcases within it: + + + + +```python +import agenta as ag + +# Retrieve a testset +testset = await ag.testsets.aretrieve(testset_id=testset_id) + +# Access testcases +if testset and testset.data and testset.data.testcases: + for testcase in testset.data.testcases: + print(f"Testcase: {testcase.data}") + # Use testcase.data in your evaluation +``` + + + + +Each testcase contains a `data` field with the dictionary you provided during creation. You can use these testcases directly in your evaluations. + diff --git a/docs/docs/evaluation/evaluation-from-sdk/04-running-evaluations.mdx b/docs/docs/evaluation/evaluation-from-sdk/04-running-evaluations.mdx new file mode 100644 index 0000000000..4f2d669994 --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/04-running-evaluations.mdx @@ -0,0 +1,6 @@ +--- +title: "Running Evaluations" +sidebar_label: "Running Evaluations" +description: "Learn how to run evaluations programmatically from the SDK" +sidebar_position: 2 +--- diff --git a/docs/docs/evaluation/evaluation-from-sdk/_category_.json b/docs/docs/evaluation/evaluation-from-sdk/_category_.json new file mode 100644 index 0000000000..cc211724c0 --- /dev/null +++ b/docs/docs/evaluation/evaluation-from-sdk/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Evaluation from SDK", + "position": 7, + "collapsible": true, + "collapsed": true +} \ No newline at end of file diff --git a/docs/docs/tutorials/cookbooks/01-capture-user-feedback.mdx b/docs/docs/tutorials/cookbooks/01-capture-user-feedback.mdx index 1633d97b31..789dda4e44 100644 --- a/docs/docs/tutorials/cookbooks/01-capture-user-feedback.mdx +++ b/docs/docs/tutorials/cookbooks/01-capture-user-feedback.mdx @@ -18,7 +18,7 @@ In this tutorial, we'll build a simple LLM application and learn how to capture - View this feedback in the Agenta UI - + Open in Google Colaboratory diff --git a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx index 5a0bc75291..06d943840e 100644 --- a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx +++ b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx @@ -18,7 +18,7 @@ This guide shows you how to set up tracing for a RAG application in Langchain us Tracing allows us to debug effectively complex LLM applications. It allows us to view exact prompts sent and contexts retrieved. - + Open in Google Colaboratory diff --git a/docs/docs/tutorials/sdk/_evaluate-with-SDK.mdx b/docs/docs/tutorials/sdk/_evaluate-with-SDK.mdx index d390b7962e..4fba4d9bc2 100644 --- a/docs/docs/tutorials/sdk/_evaluate-with-SDK.mdx +++ b/docs/docs/tutorials/sdk/_evaluate-with-SDK.mdx @@ -18,10 +18,6 @@ We will do the following: - Retrieve the results of evaluations We assume that you have already created an LLM application and variants in agenta. - - - Open in Google Colaboratory - ### Architectural Overview: In this scenario, evaluations are executed on the Agenta backend. Specifically, Agenta invokes the LLM application for each row in the test set and subsequently processes the output using the designated evaluator. diff --git a/docs/static/images/changelog/changelog-llm-as-a-judge-response-1.png b/docs/static/images/changelog/changelog-llm-as-a-judge-response-1.png deleted file mode 100644 index 452c1b718a..0000000000 Binary files a/docs/static/images/changelog/changelog-llm-as-a-judge-response-1.png and /dev/null differ diff --git a/docs/static/images/changelog/changelog-llm-as-a-judge-response-2.png b/docs/static/images/changelog/changelog-llm-as-a-judge-response-2.png deleted file mode 100644 index 2a7fa18e42..0000000000 Binary files a/docs/static/images/changelog/changelog-llm-as-a-judge-response-2.png and /dev/null differ diff --git a/examples/jupyter/evaluation/quick-start.ipynb b/examples/jupyter/evaluation/quick-start.ipynb new file mode 100644 index 0000000000..138c4d5f8c --- /dev/null +++ b/examples/jupyter/evaluation/quick-start.ipynb @@ -0,0 +1,406 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agenta SDK Quick Start - Evaluations\n", + "\n", + "This notebook demonstrates how to:\n", + "1. Create a simple application that returns country capitals\n", + "2. Create evaluators to check if the application's output is correct\n", + "3. Run an evaluation to test your application\n", + "\n", + "The entire example takes less than 100 lines of code!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "First, install the Agenta SDK and set up your environment variables:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Install Agenta SDK\n", + "%pip install agenta -q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# Set your API credentials\n", + "if not os.getenv(\"AGENTA_API_KEY\"):\n", + " os.environ[\"AGENTA_API_KEY\"] = getpass(\"Enter your Agenta API key: \")\n", + "\n", + "if not os.getenv(\"AGENTA_HOST\"):\n", + " os.environ[\"AGENTA_HOST\"] = \"https://cloud.agenta.ai\" # Change for self-hosted\n", + "\n", + "print(\"✅ Environment configured!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialize Agenta SDK\n", + "\n", + "Initialize the SDK to connect to the Agenta platform:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import agenta as ag\n", + "\n", + "ag.init()\n", + "\n", + "print(\"✅ Agenta SDK initialized!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Define Your Application\n", + "\n", + "An application is any function decorated with `@ag.application`. It receives inputs from test data and returns outputs.\n", + "\n", + "Let's create a simple application that returns country capitals:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@ag.application(\n", + " slug=\"capital_finder\",\n", + " name=\"Capital Finder\",\n", + " description=\"Returns the capital of a given country\"\n", + ")\n", + "async def capital_finder(country: str):\n", + " \"\"\"\n", + " A simple application that returns country capitals.\n", + " \n", + " Args:\n", + " country: The country name (from testcase)\n", + " \n", + " Returns:\n", + " The capital city name\n", + " \"\"\"\n", + " capitals = {\n", + " \"Germany\": \"Berlin\",\n", + " \"France\": \"Paris\",\n", + " \"Spain\": \"Madrid\",\n", + " \"Italy\": \"Rome\",\n", + " }\n", + " return capitals.get(country, \"Unknown\")\n", + "\n", + "print(\"✅ Application defined!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Create Custom Evaluators\n", + "\n", + "Evaluators check if your application's output is correct. They receive:\n", + "- Fields from your testcase (e.g., `capital`)\n", + "- The application's output (always called `outputs`)\n", + "\n", + "Let's create two evaluators:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@ag.evaluator(\n", + " slug=\"exact_match\",\n", + " name=\"Exact Match Evaluator\",\n", + " description=\"Checks if the output exactly matches the expected answer\"\n", + ")\n", + "async def exact_match(capital: str, outputs: str):\n", + " \"\"\"\n", + " Evaluates if the application's output matches the expected answer.\n", + " \n", + " Args:\n", + " capital: The expected capital (from testcase)\n", + " outputs: What the application returned\n", + " \n", + " Returns:\n", + " Dictionary with score and success flag\n", + " \"\"\"\n", + " is_correct = outputs == capital\n", + " return {\n", + " \"score\": 1.0 if is_correct else 0.0,\n", + " \"success\": is_correct,\n", + " }\n", + "\n", + "\n", + "@ag.evaluator(\n", + " slug=\"case_insensitive_match\",\n", + " name=\"Case Insensitive Match\",\n", + " description=\"Checks if output matches ignoring case\"\n", + ")\n", + "async def case_insensitive_match(capital: str, outputs: str):\n", + " \"\"\"\n", + " Evaluates with case-insensitive comparison.\n", + " \"\"\"\n", + " is_correct = outputs.lower() == capital.lower()\n", + " return {\n", + " \"score\": 1.0 if is_correct else 0.0,\n", + " \"success\": is_correct,\n", + " }\n", + "\n", + "print(\"✅ Evaluators defined!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3: Use Built-in Evaluators\n", + "\n", + "Agenta provides built-in evaluators like LLM-as-a-judge. Let's create one:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from agenta.sdk.workflows import builtin\n", + "\n", + "llm_judge = builtin.auto_ai_critique(\n", + " slug=\"llm_judge\",\n", + " name=\"LLM Judge Evaluator\",\n", + " description=\"Uses an LLM to judge if the answer is correct\",\n", + " correct_answer_key=\"capital\",\n", + " model=\"gpt-4o-mini\",\n", + " prompt_template=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a geography expert evaluating answers about world capitals.\",\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": (\n", + " \"Expected capital: {{capital}}\\n\"\n", + " \"Student's answer: {{outputs}}\\n\\n\"\n", + " \"Is the student's answer correct?\\n\"\n", + " \"Respond with ONLY a number from 0.0 (wrong) to 1.0 (correct).\\n\"\n", + " \"Nothing else - just the number.\"\n", + " ),\n", + " },\n", + " ],\n", + ")\n", + "\n", + "print(\"✅ LLM judge evaluator created!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 4: Create Test Data\n", + "\n", + "Define test cases as a list of dictionaries:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_data = [\n", + " {\"country\": \"Germany\", \"capital\": \"Berlin\"},\n", + " {\"country\": \"France\", \"capital\": \"Paris\"},\n", + " {\"country\": \"Spain\", \"capital\": \"Madrid\"},\n", + " {\"country\": \"Italy\", \"capital\": \"Rome\"},\n", + "]\n", + "\n", + "print(f\"✅ Created {len(test_data)} test cases\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 5: Run the Evaluation\n", + "\n", + "Now let's create a testset and run the evaluation!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from agenta.sdk.evaluations import aevaluate\n", + "\n", + "# Create a testset\n", + "print(\"📝 Creating testset...\")\n", + "testset = await ag.testsets.acreate(\n", + " name=\"Country Capitals Quick Start\",\n", + " description=\"Test cases for capital city questions\",\n", + " data=test_data,\n", + ")\n", + "\n", + "if not testset or not testset.id:\n", + " print(\"❌ Failed to create testset\")\n", + "else:\n", + " print(f\"✅ Testset created with ID: {testset.id}\")\n", + " print(f\" Contains {len(test_data)} test cases\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run evaluation with all three evaluators\n", + "print(\"🚀 Running evaluation...\\n\")\n", + "\n", + "result = await aevaluate(\n", + " testsets=[testset.id],\n", + " applications=[capital_finder],\n", + " evaluators=[\n", + " exact_match,\n", + " case_insensitive_match,\n", + " llm_judge,\n", + " ],\n", + ")\n", + "\n", + "print(\"\\n\" + \"=\" * 70)\n", + "print(\"✅ Evaluation Complete!\")\n", + "print(\"=\" * 70)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## View Results\n", + "\n", + "The evaluation results are now available in the Agenta UI! You can:\n", + "\n", + "1. **View detailed results** - See how each test case performed\n", + "2. **Compare evaluators** - See which evaluators flagged which test cases\n", + "3. **Analyze metrics** - View aggregated scores and success rates\n", + "\n", + "You can also access results programmatically:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if result and \"run\" in result:\n", + " print(f\"\\n📊 Evaluation Details:\")\n", + " print(f\" Run ID: {result['run'].id}\")\n", + " print(f\" Status: {result['run'].status}\")\n", + " print(f\"\\n🔗 View results in the Agenta UI\")\n", + "else:\n", + " print(\"No result data available\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Understanding the Data Flow\n", + "\n", + "When you run an evaluation, here's what happens:\n", + "\n", + "1. **Testcase data flows to the application**\n", + " - Input: `{\"country\": \"Germany\", \"capital\": \"Berlin\"}`\n", + " - Application receives: `country=\"Germany\"`\n", + " - Application returns: `\"Berlin\"`\n", + "\n", + "2. **Both testcase data and application output flow to evaluators**\n", + " - Evaluator receives: `capital=\"Berlin\"` (from testcase)\n", + " - Evaluator receives: `outputs=\"Berlin\"` (from application)\n", + " - Evaluator compares and returns: `{\"score\": 1.0, \"success\": True}`\n", + "\n", + "3. **Results are stored in Agenta**\n", + " - View in web interface\n", + " - Access programmatically" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "\n", + "Now that you've created your first evaluation, explore:\n", + "\n", + "- **[Configuring Evaluators](/evaluation/evaluation-from-sdk/configuring-evaluators)** - Create custom scoring logic\n", + "- **[Managing Testsets](/evaluation/evaluation-from-sdk/managing-testsets)** - Work with test data\n", + "- **[Running Evaluations](/evaluation/evaluation-from-sdk/running-evaluations)** - Advanced evaluation patterns\n", + "\n", + "## Summary\n", + "\n", + "In this notebook, you learned how to:\n", + "\n", + "✅ Define an application with `@ag.application` \n", + "✅ Create custom evaluators with `@ag.evaluator` \n", + "✅ Use built-in evaluators like LLM-as-a-judge \n", + "✅ Create testsets with `ag.testsets.acreate()` \n", + "✅ Run evaluations with `aevaluate()` \n", + "✅ View results in the Agenta UI \n", + "\n", + "Happy evaluating! 🎉" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/api/ee/tests/manual/evaluations/sdk/testset-management.ipynb b/examples/jupyter/evaluation/testset-management.ipynb similarity index 100% rename from api/ee/tests/manual/evaluations/sdk/testset-management.ipynb rename to examples/jupyter/evaluation/testset-management.ipynb diff --git a/examples/jupyter/evaluations_with_sdk.ipynb b/examples/jupyter/evaluations_with_sdk.ipynb deleted file mode 100644 index 74e3d9ff9e..0000000000 --- a/examples/jupyter/evaluations_with_sdk.ipynb +++ /dev/null @@ -1,279 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using evaluations with the SDK\n", - "In this cookbook we will show how to interact with evaluation in agenta programatically. Either using the SDK (or the raw API). \n", - "\n", - "We will do the following:\n", - "\n", - "- Create a testset\n", - "- Create and configure an evaluator\n", - "- Run an evaluation\n", - "- Retrieve the results of evaluations\n", - "\n", - "We assume that you have already created an LLM application and variants in agenta. \n", - "\n", - "\n", - "### Architectural Overview:\n", - "In this scenario, evaluations are executed on the Agenta backend. Specifically, Agenta invokes the LLM application for each row in the testset and subsequently processes the output using the designated evaluator. \n", - "This operation is managed through Celery tasks. The interactions with the LLM application are asynchronous, batched, and include retry mechanisms. Additionally, the batching configuration can be adjusted to avoid exceeding the rate limits imposed by the LLM provider.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! pip install -U agenta" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Configuration Setup\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assuming an application has already been created through the user interface, you will need to obtain the application ID.\n", - "# In this example we will use the default template single_prompt which has the prompt \"Determine the capital of {country}\"\n", - "\n", - "# You can find the application ID in the URL. For example, in the URL https://cloud.agenta.ai/apps/666dde95962bbaffdb0072b5/playground?variant=app.default, the application ID is `666dde95962bbaffdb0072b5`.\n", - "from agenta.client.client import AgentaApi\n", - "\n", - "# Let's list the applications\n", - "client.apps.list_apps()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "app_id = \"667d8cfad1812781f7e375d9\"\n", - "\n", - "# You can create the API key under the settings page. If you are using the OSS version, you should keep this as an empty string\n", - "api_key = \"EUqJGOUu.xxxx\"\n", - "\n", - "# Host.\n", - "host = \"https://cloud.agenta.ai\"\n", - "\n", - "# Initialize the client\n", - "\n", - "client = AgentaApi(base_url=host + \"/api\", api_key=api_key)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create a testset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from agenta.client.types.new_testset import NewTestset\n", - "\n", - "csvdata = [\n", - " {\"country\": \"france\", \"capital\": \"Paris\"},\n", - " {\"country\": \"Germany\", \"capital\": \"paris\"},\n", - "]\n", - "\n", - "response = client.testsets.create_testset(\n", - " request=NewTestset(name=\"testset\", csvdata=csvdata)\n", - ")\n", - "testset_id = response.id\n", - "\n", - "# let's now update it\n", - "\n", - "csvdata = [\n", - " {\"country\": \"france\", \"capital\": \"Paris\"},\n", - " {\"country\": \"Germany\", \"capital\": \"Berlin\"},\n", - "]\n", - "\n", - "client.testsets.update_testset(\n", - " testset_id=testset_id, request=NewTestset(name=\"testset\", csvdata=csvdata)\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Create evaluators" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create an evaluator that performs an exact match comparison on the 'capital' column\n", - "# You can find the list of evaluator keys and evaluators and their configurations in https://github.com/Agenta-AI/agenta/blob/main/agenta-backend/agenta_backend/resources/evaluators/evaluators.py\n", - "response = client.evaluators.create_new_evaluator_config(\n", - " app_id=app_id,\n", - " name=\"capital_evaluator\",\n", - " evaluator_key=\"auto_exact_match\",\n", - " settings_values={\"correct_answer_key\": \"capital\"},\n", - ")\n", - "exact_match_eval_id = response.id\n", - "\n", - "code_snippet = \"\"\"\n", - "from typing import Dict\n", - "\n", - "def evaluate(\n", - " app_params: Dict[str, str],\n", - " inputs: Dict[str, str],\n", - " output: str, # output of the llm app\n", - " datapoint: Dict[str, str] # contains the testset row \n", - ") -> float:\n", - " if output and output[0].isupper():\n", - " return 1.0\n", - " else:\n", - " return 0.0\n", - "\"\"\"\n", - "\n", - "response = client.evaluators.create_new_evaluator_config(\n", - " app_id=app_id,\n", - " name=\"capital_letter_evaluator\",\n", - " evaluator_key=\"auto_custom_code_run\",\n", - " settings_values={\"code\": code_snippet},\n", - ")\n", - "letter_match_eval_id = response.id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# get list of all evaluators\n", - "client.evaluators.get_evaluator_configs(app_id=app_id)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Run an evaluation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = client.apps.list_app_variants(app_id=app_id)\n", - "print(response)\n", - "myvariant_id = response[0].variant_id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Run an evaluation\n", - "from agenta.client.types.llm_run_rate_limit import LlmRunRateLimit\n", - "\n", - "response = client.evaluations.create_evaluation(\n", - " app_id=app_id,\n", - " variant_ids=[myvariant_id],\n", - " testset_id=testsetid,\n", - " evaluators_configs=[exact_match_eval_id, letter_match_eval_id],\n", - " rate_limit=LlmRunRateLimit(\n", - " batch_size=10, # number of rows to call in parallel\n", - " max_retries=3, # max number of time to retry a failed llm call\n", - " retry_delay=2, # delay before retrying a failed llm call\n", - " delay_between_batches=5, # delay between batches\n", - " ),\n", - ")\n", - "print(response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# check the status\n", - "client.evaluations.fetch_evaluation_status(\"667d98fbd1812781f7e3761a\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# fetch the overall results\n", - "response = client.evaluations.fetch_evaluation_results(\"667d98fbd1812781f7e3761a\")\n", - "\n", - "results = [\n", - " (evaluator[\"evaluator_config\"][\"name\"], evaluator[\"result\"])\n", - " for evaluator in response[\"results\"]\n", - "]\n", - "# End of Selection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# fetch the detailed results\n", - "client.evaluations.fetch_evaluation_scenarios(\n", - " evaluations_ids=\"667d98fbd1812781f7e3761a\"\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/jupyter/capture_user_feedback.ipynb b/examples/jupyter/observability/capture_user_feedback.ipynb similarity index 100% rename from examples/jupyter/capture_user_feedback.ipynb rename to examples/jupyter/observability/capture_user_feedback.ipynb diff --git a/examples/jupyter/observability_langchain.ipynb b/examples/jupyter/observability/observability_langchain.ipynb similarity index 100% rename from examples/jupyter/observability_langchain.ipynb rename to examples/jupyter/observability/observability_langchain.ipynb diff --git a/hosting/docker-compose/ee/docker-compose.dev.yml b/hosting/docker-compose/ee/docker-compose.dev.yml index 09861d7b20..8cccfdc2b9 100644 --- a/hosting/docker-compose/ee/docker-compose.dev.yml +++ b/hosting/docker-compose/ee/docker-compose.dev.yml @@ -122,7 +122,6 @@ services: volumes: - ../../../api/ee/src/crons/meters.sh:/meters.sh - - ../../../api/oss/src/crons/queries.sh:/queries.sh env_file: - ${ENV_FILE:-./.env.ee.dev} diff --git a/hosting/docker-compose/oss/docker-compose.dev.yml b/hosting/docker-compose/oss/docker-compose.dev.yml index 53c0eaef52..faf444c24a 100644 --- a/hosting/docker-compose/oss/docker-compose.dev.yml +++ b/hosting/docker-compose/oss/docker-compose.dev.yml @@ -103,30 +103,6 @@ services: command: > watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- celery -A entrypoint.celery_app worker --concurrency=1 --max-tasks-per-child=1 --prefetch-multiplier=1 - cron: - image: agenta-oss-dev-api:latest - - volumes: - # - - ../../../api/oss/src/crons/queries.sh:/queries.sh - - env_file: - - ${ENV_FILE:-./.env.oss.dev} - - depends_on: - - postgres - - api - - extra_hosts: - - "host.docker.internal:host-gateway" - - restart: always - - networks: - - agenta-network - - command: cron -f - alembic: build: context: ../../../api diff --git a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml index c7dbd5c730..ab0590b990 100644 --- a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml +++ b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml @@ -104,26 +104,6 @@ services: condition: service_healthy restart: always - cron: - image: agenta-oss-dev-api:latest - - env_file: - - ${ENV_FILE:-./.env.oss.dev} - - depends_on: - - postgres - - api - - extra_hosts: - - "host.docker.internal:host-gateway" - - restart: always - - networks: - - agenta-network - - command: cron -f - alembic: build: context: ../../../api diff --git a/hosting/docker-compose/oss/docker-compose.gh.yml b/hosting/docker-compose/oss/docker-compose.gh.yml index e079346a68..cc2e6612a3 100644 --- a/hosting/docker-compose/oss/docker-compose.gh.yml +++ b/hosting/docker-compose/oss/docker-compose.gh.yml @@ -80,26 +80,6 @@ services: - redis restart: always - cron: - image: agenta-oss-dev-api:latest - - env_file: - - ${ENV_FILE:-./.env.oss.dev} - - depends_on: - - postgres - - api - - extra_hosts: - - "host.docker.internal:host-gateway" - - restart: always - - networks: - - agenta-network - - command: cron -f - alembic: build: context: ../../../api diff --git a/sdk/agenta/client/backend/api_keys/client.py b/sdk/agenta/client/backend/api_keys/client.py index b6cfa2e3c9..167b48913e 100644 --- a/sdk/agenta/client/backend/api_keys/client.py +++ b/sdk/agenta/client/backend/api_keys/client.py @@ -95,7 +95,7 @@ def delete_api_key( self, key_prefix: str, *, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Dict[str, typing.Optional[typing.Any]]: """ Delete an API key with the given key prefix for the authenticated user. @@ -246,7 +246,7 @@ async def delete_api_key( self, key_prefix: str, *, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Dict[str, typing.Optional[typing.Any]]: """ Delete an API key with the given key prefix for the authenticated user. diff --git a/sdk/agenta/client/backend/billing/client.py b/sdk/agenta/client/backend/billing/client.py index b7881f3122..c4040b5cdc 100644 --- a/sdk/agenta/client/backend/billing/client.py +++ b/sdk/agenta/client/backend/billing/client.py @@ -80,7 +80,7 @@ def create_checkout( *, plan: Plan, success_url: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: """ Parameters @@ -343,7 +343,7 @@ async def create_checkout( *, plan: Plan, success_url: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: """ Parameters diff --git a/sdk/agenta/client/backend/billing/raw_client.py b/sdk/agenta/client/backend/billing/raw_client.py index f937046f78..055805a444 100644 --- a/sdk/agenta/client/backend/billing/raw_client.py +++ b/sdk/agenta/client/backend/billing/raw_client.py @@ -110,7 +110,7 @@ def create_checkout( *, plan: Plan, success_url: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> HttpResponse[typing.Optional[typing.Any]]: """ Parameters @@ -506,7 +506,7 @@ async def create_checkout( *, plan: Plan, success_url: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> AsyncHttpResponse[typing.Optional[typing.Any]]: """ Parameters diff --git a/sdk/agenta/client/backend/core/client_wrapper.py b/sdk/agenta/client/backend/core/client_wrapper.py index bd3c9a874b..84887b5aaa 100644 --- a/sdk/agenta/client/backend/core/client_wrapper.py +++ b/sdk/agenta/client/backend/core/client_wrapper.py @@ -35,7 +35,7 @@ def __init__( api_key: str, base_url: str, timeout: typing.Optional[float] = None, - httpx_client: httpx.Client, + httpx_client: httpx.Client ): super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = HttpClient( @@ -53,7 +53,7 @@ def __init__( api_key: str, base_url: str, timeout: typing.Optional[float] = None, - httpx_client: httpx.AsyncClient, + httpx_client: httpx.AsyncClient ): super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = AsyncHttpClient( diff --git a/sdk/agenta/client/backend/core/file.py b/sdk/agenta/client/backend/core/file.py index 3467175cb7..c6d11fc708 100644 --- a/sdk/agenta/client/backend/core/file.py +++ b/sdk/agenta/client/backend/core/file.py @@ -53,9 +53,7 @@ def with_content_type(*, file: File, default_content_type: str) -> File: filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore return (filename, content, default_content_type) elif len(file) == 3: - filename, content, file_content_type = cast( - Tuple[Optional[str], FileContent, Optional[str]], file - ) # type: ignore + filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore out_content_type = file_content_type or default_content_type return (filename, content, out_content_type) elif len(file) == 4: diff --git a/sdk/agenta/client/backend/core/pydantic_utilities.py b/sdk/agenta/client/backend/core/pydantic_utilities.py index e498155179..9035a193ec 100644 --- a/sdk/agenta/client/backend/core/pydantic_utilities.py +++ b/sdk/agenta/client/backend/core/pydantic_utilities.py @@ -78,9 +78,7 @@ class UniversalBaseModel(pydantic.BaseModel): ) @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore[attr-defined] - def serialize_model( - self, handler: pydantic.SerializerFunctionWrapHandler - ) -> Any: # type: ignore[name-defined] + def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> Any: # type: ignore[name-defined] serialized = handler(self) data = { k: serialize_datetime(v) if isinstance(v, dt.datetime) else v @@ -262,10 +260,7 @@ def universal_root_validator( ) -> Callable[[AnyCallable], AnyCallable]: def decorator(func: AnyCallable) -> AnyCallable: if IS_PYDANTIC_V2: - return cast( - AnyCallable, - pydantic.model_validator(mode="before" if pre else "after")(func), - ) # type: ignore[attr-defined] + return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined] return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload] return decorator @@ -276,12 +271,7 @@ def universal_field_validator( ) -> Callable[[AnyCallable], AnyCallable]: def decorator(func: AnyCallable) -> AnyCallable: if IS_PYDANTIC_V2: - return cast( - AnyCallable, - pydantic.field_validator(field_name, mode="before" if pre else "after")( - func - ), - ) # type: ignore[attr-defined] + return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined] return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func)) return decorator diff --git a/sdk/agenta/client/backend/types/account_request.py b/sdk/agenta/client/backend/types/account_request.py index d1bac62bd1..1c133345f3 100644 --- a/sdk/agenta/client/backend/types/account_request.py +++ b/sdk/agenta/client/backend/types/account_request.py @@ -15,9 +15,7 @@ class AccountRequest(UniversalBaseModel): subscription: typing.Optional[LegacySubscriptionRequest] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/account_response.py b/sdk/agenta/client/backend/types/account_response.py index 2894d7acf4..6cb5ecbcd6 100644 --- a/sdk/agenta/client/backend/types/account_response.py +++ b/sdk/agenta/client/backend/types/account_response.py @@ -13,9 +13,7 @@ class AccountResponse(UniversalBaseModel): scopes: typing.Optional[typing.List[LegacyScopesResponse]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_node_dto.py b/sdk/agenta/client/backend/types/agenta_node_dto.py index 7992f8fe0a..507b46f080 100644 --- a/sdk/agenta/client/backend/types/agenta_node_dto.py +++ b/sdk/agenta/client/backend/types/agenta_node_dto.py @@ -39,9 +39,7 @@ class AgentaNodeDto(UniversalBaseModel): ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_nodes_response.py b/sdk/agenta/client/backend/types/agenta_nodes_response.py index a07dfb596f..77494ee114 100644 --- a/sdk/agenta/client/backend/types/agenta_nodes_response.py +++ b/sdk/agenta/client/backend/types/agenta_nodes_response.py @@ -17,9 +17,7 @@ class AgentaNodesResponse(UniversalBaseModel): count: typing.Optional[int] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_root_dto.py b/sdk/agenta/client/backend/types/agenta_root_dto.py index 327d4de5db..5490cf802d 100644 --- a/sdk/agenta/client/backend/types/agenta_root_dto.py +++ b/sdk/agenta/client/backend/types/agenta_root_dto.py @@ -19,9 +19,7 @@ class AgentaRootDto(UniversalBaseModel): trees: typing.List[AgentaTreeDto] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_roots_response.py b/sdk/agenta/client/backend/types/agenta_roots_response.py index 3f24b51c8f..9d6b877368 100644 --- a/sdk/agenta/client/backend/types/agenta_roots_response.py +++ b/sdk/agenta/client/backend/types/agenta_roots_response.py @@ -19,9 +19,7 @@ class AgentaRootsResponse(UniversalBaseModel): count: typing.Optional[int] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_tree_dto.py b/sdk/agenta/client/backend/types/agenta_tree_dto.py index 1d741e6b6d..6ee850955b 100644 --- a/sdk/agenta/client/backend/types/agenta_tree_dto.py +++ b/sdk/agenta/client/backend/types/agenta_tree_dto.py @@ -19,9 +19,7 @@ class AgentaTreeDto(UniversalBaseModel): nodes: typing.List[AgentaNodeDto] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/agenta_trees_response.py b/sdk/agenta/client/backend/types/agenta_trees_response.py index 974159fee1..d3acaf17d3 100644 --- a/sdk/agenta/client/backend/types/agenta_trees_response.py +++ b/sdk/agenta/client/backend/types/agenta_trees_response.py @@ -19,9 +19,7 @@ class AgentaTreesResponse(UniversalBaseModel): count: typing.Optional[int] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/aggregated_result.py b/sdk/agenta/client/backend/types/aggregated_result.py index 3d6ed569aa..5318a7c568 100644 --- a/sdk/agenta/client/backend/types/aggregated_result.py +++ b/sdk/agenta/client/backend/types/aggregated_result.py @@ -13,9 +13,7 @@ class AggregatedResult(UniversalBaseModel): result: Result if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/analytics_response.py b/sdk/agenta/client/backend/types/analytics_response.py index ae779bc85f..f3002caf54 100644 --- a/sdk/agenta/client/backend/types/analytics_response.py +++ b/sdk/agenta/client/backend/types/analytics_response.py @@ -13,9 +13,7 @@ class AnalyticsResponse(UniversalBaseModel): buckets: typing.List[BucketDto] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation.py b/sdk/agenta/client/backend/types/annotation.py index 0cc814d1f6..a3247c4b4e 100644 --- a/sdk/agenta/client/backend/types/annotation.py +++ b/sdk/agenta/client/backend/types/annotation.py @@ -29,16 +29,14 @@ class Annotation(UniversalBaseModel): kind: typing.Optional[AnnotationKind] = None source: typing.Optional[AnnotationSource] = None data: typing.Dict[str, typing.Optional["FullJsonOutput"]] - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None references: AnnotationReferences links: typing.Dict[str, AnnotationLink] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_create.py b/sdk/agenta/client/backend/types/annotation_create.py index 3065c3a914..deb2cb42e7 100644 --- a/sdk/agenta/client/backend/types/annotation_create.py +++ b/sdk/agenta/client/backend/types/annotation_create.py @@ -25,9 +25,7 @@ class AnnotationCreate(UniversalBaseModel): links: typing.Dict[str, AnnotationLink] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_edit.py b/sdk/agenta/client/backend/types/annotation_edit.py index a2783bedcc..2e5a99b974 100644 --- a/sdk/agenta/client/backend/types/annotation_edit.py +++ b/sdk/agenta/client/backend/types/annotation_edit.py @@ -17,9 +17,7 @@ class AnnotationEdit(UniversalBaseModel): meta: typing.Optional[typing.Dict[str, typing.Optional["FullJsonInput"]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_link.py b/sdk/agenta/client/backend/types/annotation_link.py index bbc8ad0acb..8643681c52 100644 --- a/sdk/agenta/client/backend/types/annotation_link.py +++ b/sdk/agenta/client/backend/types/annotation_link.py @@ -11,9 +11,7 @@ class AnnotationLink(UniversalBaseModel): trace_id: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_link_response.py b/sdk/agenta/client/backend/types/annotation_link_response.py index 9821a81825..b6bc1cade3 100644 --- a/sdk/agenta/client/backend/types/annotation_link_response.py +++ b/sdk/agenta/client/backend/types/annotation_link_response.py @@ -11,9 +11,7 @@ class AnnotationLinkResponse(UniversalBaseModel): annotation: AnnotationLink if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_query.py b/sdk/agenta/client/backend/types/annotation_query.py index 44c339afd5..ac5033a31e 100644 --- a/sdk/agenta/client/backend/types/annotation_query.py +++ b/sdk/agenta/client/backend/types/annotation_query.py @@ -26,9 +26,7 @@ class AnnotationQuery(UniversalBaseModel): links: typing.Optional[typing.Dict[str, typing.Optional[AnnotationLink]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_query_request.py b/sdk/agenta/client/backend/types/annotation_query_request.py index d2afd6aeac..85fff1453e 100644 --- a/sdk/agenta/client/backend/types/annotation_query_request.py +++ b/sdk/agenta/client/backend/types/annotation_query_request.py @@ -11,9 +11,7 @@ class AnnotationQueryRequest(UniversalBaseModel): annotation: typing.Optional[AnnotationQuery] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_reference.py b/sdk/agenta/client/backend/types/annotation_reference.py index af194702ef..e3a8899ef7 100644 --- a/sdk/agenta/client/backend/types/annotation_reference.py +++ b/sdk/agenta/client/backend/types/annotation_reference.py @@ -12,9 +12,7 @@ class AnnotationReference(UniversalBaseModel): version: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_references.py b/sdk/agenta/client/backend/types/annotation_references.py index 79255ade37..f580cc93eb 100644 --- a/sdk/agenta/client/backend/types/annotation_references.py +++ b/sdk/agenta/client/backend/types/annotation_references.py @@ -13,9 +13,7 @@ class AnnotationReferences(UniversalBaseModel): testcase: typing.Optional[AnnotationReference] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotation_response.py b/sdk/agenta/client/backend/types/annotation_response.py index 84393ac781..e42904a404 100644 --- a/sdk/agenta/client/backend/types/annotation_response.py +++ b/sdk/agenta/client/backend/types/annotation_response.py @@ -11,9 +11,7 @@ class AnnotationResponse(UniversalBaseModel): annotation: typing.Optional[Annotation] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/annotations_response.py b/sdk/agenta/client/backend/types/annotations_response.py index 6285bb00e6..4ca6ac4504 100644 --- a/sdk/agenta/client/backend/types/annotations_response.py +++ b/sdk/agenta/client/backend/types/annotations_response.py @@ -15,9 +15,7 @@ class AnnotationsResponse(UniversalBaseModel): annotations: typing.Optional[typing.List[Annotation]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/app.py b/sdk/agenta/client/backend/types/app.py index acf3089c5c..1fdbeee2d9 100644 --- a/sdk/agenta/client/backend/types/app.py +++ b/sdk/agenta/client/backend/types/app.py @@ -13,9 +13,7 @@ class App(UniversalBaseModel): updated_at: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/app_variant_response.py b/sdk/agenta/client/backend/types/app_variant_response.py index 63f116ada8..d6fe10e48b 100644 --- a/sdk/agenta/client/backend/types/app_variant_response.py +++ b/sdk/agenta/client/backend/types/app_variant_response.py @@ -24,9 +24,7 @@ class AppVariantResponse(UniversalBaseModel): workspace_id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/app_variant_revision.py b/sdk/agenta/client/backend/types/app_variant_revision.py index 1c3312185d..709d9f8f2c 100644 --- a/sdk/agenta/client/backend/types/app_variant_revision.py +++ b/sdk/agenta/client/backend/types/app_variant_revision.py @@ -16,9 +16,7 @@ class AppVariantRevision(UniversalBaseModel): commit_message: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/artifact.py b/sdk/agenta/client/backend/types/artifact.py index 35c235fd41..d6fa55c584 100644 --- a/sdk/agenta/client/backend/types/artifact.py +++ b/sdk/agenta/client/backend/types/artifact.py @@ -25,14 +25,12 @@ class Artifact(UniversalBaseModel): slug: typing.Optional[str] = None id: typing.Optional[str] = None flags: typing.Optional[typing.Dict[str, typing.Optional[bool]]] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/base_output.py b/sdk/agenta/client/backend/types/base_output.py index 7c8e8dd2c2..cc04f56e96 100644 --- a/sdk/agenta/client/backend/types/base_output.py +++ b/sdk/agenta/client/backend/types/base_output.py @@ -11,9 +11,7 @@ class BaseOutput(UniversalBaseModel): base_name: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/body_fetch_workflow_revision.py b/sdk/agenta/client/backend/types/body_fetch_workflow_revision.py index 5957cdb08e..ef14df0220 100644 --- a/sdk/agenta/client/backend/types/body_fetch_workflow_revision.py +++ b/sdk/agenta/client/backend/types/body_fetch_workflow_revision.py @@ -12,9 +12,7 @@ class BodyFetchWorkflowRevision(UniversalBaseModel): revision_ref: typing.Optional[Reference] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/body_import_testset.py b/sdk/agenta/client/backend/types/body_import_testset.py index f507f40fc0..11f4de5d00 100644 --- a/sdk/agenta/client/backend/types/body_import_testset.py +++ b/sdk/agenta/client/backend/types/body_import_testset.py @@ -11,9 +11,7 @@ class BodyImportTestset(UniversalBaseModel): testset_name: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/bucket_dto.py b/sdk/agenta/client/backend/types/bucket_dto.py index c274787aaf..663d00d6a4 100644 --- a/sdk/agenta/client/backend/types/bucket_dto.py +++ b/sdk/agenta/client/backend/types/bucket_dto.py @@ -15,9 +15,7 @@ class BucketDto(UniversalBaseModel): error: MetricsDto if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/collect_status_response.py b/sdk/agenta/client/backend/types/collect_status_response.py index 377c566ef2..10494451ee 100644 --- a/sdk/agenta/client/backend/types/collect_status_response.py +++ b/sdk/agenta/client/backend/types/collect_status_response.py @@ -11,9 +11,7 @@ class CollectStatusResponse(UniversalBaseModel): status: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/config_db.py b/sdk/agenta/client/backend/types/config_db.py index 0c0a348818..ddf00f6ac2 100644 --- a/sdk/agenta/client/backend/types/config_db.py +++ b/sdk/agenta/client/backend/types/config_db.py @@ -11,9 +11,7 @@ class ConfigDb(UniversalBaseModel): parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/config_dto.py b/sdk/agenta/client/backend/types/config_dto.py index be8bb9e0b6..5a9a9e0233 100644 --- a/sdk/agenta/client/backend/types/config_dto.py +++ b/sdk/agenta/client/backend/types/config_dto.py @@ -21,9 +21,7 @@ class ConfigDto(UniversalBaseModel): environment_lifecycle: typing.Optional[LifecycleDto] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/config_response_model.py b/sdk/agenta/client/backend/types/config_response_model.py index 166cac71b4..e8946ddfe6 100644 --- a/sdk/agenta/client/backend/types/config_response_model.py +++ b/sdk/agenta/client/backend/types/config_response_model.py @@ -21,9 +21,7 @@ class ConfigResponseModel(UniversalBaseModel): environment_lifecycle: typing.Optional[LifecycleDto] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/correct_answer.py b/sdk/agenta/client/backend/types/correct_answer.py index cd33710c88..774b58a351 100644 --- a/sdk/agenta/client/backend/types/correct_answer.py +++ b/sdk/agenta/client/backend/types/correct_answer.py @@ -11,9 +11,7 @@ class CorrectAnswer(UniversalBaseModel): value: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/create_app_output.py b/sdk/agenta/client/backend/types/create_app_output.py index 22d105444b..dc7ccf44cf 100644 --- a/sdk/agenta/client/backend/types/create_app_output.py +++ b/sdk/agenta/client/backend/types/create_app_output.py @@ -11,9 +11,7 @@ class CreateAppOutput(UniversalBaseModel): app_name: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/custom_model_settings_dto.py b/sdk/agenta/client/backend/types/custom_model_settings_dto.py index 5f47561fc7..11542da489 100644 --- a/sdk/agenta/client/backend/types/custom_model_settings_dto.py +++ b/sdk/agenta/client/backend/types/custom_model_settings_dto.py @@ -11,9 +11,7 @@ class CustomModelSettingsDto(UniversalBaseModel): extras: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/custom_provider_dto.py b/sdk/agenta/client/backend/types/custom_provider_dto.py index 51b2d8cf71..9fe91614d3 100644 --- a/sdk/agenta/client/backend/types/custom_provider_dto.py +++ b/sdk/agenta/client/backend/types/custom_provider_dto.py @@ -17,9 +17,7 @@ class CustomProviderDto(UniversalBaseModel): model_keys: typing.Optional[typing.List[str]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/custom_provider_settings_dto.py b/sdk/agenta/client/backend/types/custom_provider_settings_dto.py index 16f5cb38c3..6742ce5647 100644 --- a/sdk/agenta/client/backend/types/custom_provider_settings_dto.py +++ b/sdk/agenta/client/backend/types/custom_provider_settings_dto.py @@ -13,9 +13,7 @@ class CustomProviderSettingsDto(UniversalBaseModel): extras: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/delete_evaluation.py b/sdk/agenta/client/backend/types/delete_evaluation.py index f1791f8119..ee5a46656d 100644 --- a/sdk/agenta/client/backend/types/delete_evaluation.py +++ b/sdk/agenta/client/backend/types/delete_evaluation.py @@ -10,9 +10,7 @@ class DeleteEvaluation(UniversalBaseModel): evaluations_ids: typing.List[str] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/environment_output.py b/sdk/agenta/client/backend/types/environment_output.py index c0fcf595e5..cb846cdd7d 100644 --- a/sdk/agenta/client/backend/types/environment_output.py +++ b/sdk/agenta/client/backend/types/environment_output.py @@ -18,9 +18,7 @@ class EnvironmentOutput(UniversalBaseModel): workspace_id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/environment_output_extended.py b/sdk/agenta/client/backend/types/environment_output_extended.py index bb7aae3db7..5058b3e323 100644 --- a/sdk/agenta/client/backend/types/environment_output_extended.py +++ b/sdk/agenta/client/backend/types/environment_output_extended.py @@ -20,9 +20,7 @@ class EnvironmentOutputExtended(UniversalBaseModel): workspace_id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/environment_revision.py b/sdk/agenta/client/backend/types/environment_revision.py index 7dca7e6baf..885f714679 100644 --- a/sdk/agenta/client/backend/types/environment_revision.py +++ b/sdk/agenta/client/backend/types/environment_revision.py @@ -17,9 +17,7 @@ class EnvironmentRevision(UniversalBaseModel): deployed_variant_name: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/error.py b/sdk/agenta/client/backend/types/error.py index ebf7449865..faebb12bed 100644 --- a/sdk/agenta/client/backend/types/error.py +++ b/sdk/agenta/client/backend/types/error.py @@ -11,9 +11,7 @@ class Error(UniversalBaseModel): stacktrace: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluation.py b/sdk/agenta/client/backend/types/evaluation.py index 4b6aafdf2e..b811b63a7d 100644 --- a/sdk/agenta/client/backend/types/evaluation.py +++ b/sdk/agenta/client/backend/types/evaluation.py @@ -28,9 +28,7 @@ class Evaluation(UniversalBaseModel): updated_at: dt.datetime if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluation_scenario.py b/sdk/agenta/client/backend/types/evaluation_scenario.py index bd6cbc5b0e..a4386cba17 100644 --- a/sdk/agenta/client/backend/types/evaluation_scenario.py +++ b/sdk/agenta/client/backend/types/evaluation_scenario.py @@ -21,9 +21,7 @@ class EvaluationScenario(UniversalBaseModel): results: typing.List[EvaluationScenarioResult] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluation_scenario_input.py b/sdk/agenta/client/backend/types/evaluation_scenario_input.py index fc9cba6320..6839f0009e 100644 --- a/sdk/agenta/client/backend/types/evaluation_scenario_input.py +++ b/sdk/agenta/client/backend/types/evaluation_scenario_input.py @@ -12,9 +12,7 @@ class EvaluationScenarioInput(UniversalBaseModel): value: typing.Optional[typing.Any] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluation_scenario_output.py b/sdk/agenta/client/backend/types/evaluation_scenario_output.py index 28a786c8c3..c8dc74923a 100644 --- a/sdk/agenta/client/backend/types/evaluation_scenario_output.py +++ b/sdk/agenta/client/backend/types/evaluation_scenario_output.py @@ -13,9 +13,7 @@ class EvaluationScenarioOutput(UniversalBaseModel): latency: typing.Optional[float] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluation_scenario_result.py b/sdk/agenta/client/backend/types/evaluation_scenario_result.py index b313435a92..ec1a1b1016 100644 --- a/sdk/agenta/client/backend/types/evaluation_scenario_result.py +++ b/sdk/agenta/client/backend/types/evaluation_scenario_result.py @@ -12,9 +12,7 @@ class EvaluationScenarioResult(UniversalBaseModel): result: Result if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator.py b/sdk/agenta/client/backend/types/evaluator.py index 2a40891d87..107548ff21 100644 --- a/sdk/agenta/client/backend/types/evaluator.py +++ b/sdk/agenta/client/backend/types/evaluator.py @@ -27,15 +27,13 @@ class Evaluator(UniversalBaseModel): slug: typing.Optional[str] = None id: typing.Optional[str] = None flags: typing.Optional[WorkflowFlags] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None data: typing.Optional[WorkflowData] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_config.py b/sdk/agenta/client/backend/types/evaluator_config.py index f21f4c63cb..7134db5e79 100644 --- a/sdk/agenta/client/backend/types/evaluator_config.py +++ b/sdk/agenta/client/backend/types/evaluator_config.py @@ -11,16 +11,14 @@ class EvaluatorConfig(UniversalBaseModel): name: str project_id: str evaluator_key: str - settings_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = ( - None - ) + settings_values: typing.Optional[ + typing.Dict[str, typing.Optional[typing.Any]] + ] = None created_at: str updated_at: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_flags.py b/sdk/agenta/client/backend/types/evaluator_flags.py index cc8197a297..14aadde2b5 100644 --- a/sdk/agenta/client/backend/types/evaluator_flags.py +++ b/sdk/agenta/client/backend/types/evaluator_flags.py @@ -12,9 +12,7 @@ class EvaluatorFlags(UniversalBaseModel): is_human: typing.Optional[bool] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py b/sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py index 3856bb811d..a3aeeb0757 100644 --- a/sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py +++ b/sdk/agenta/client/backend/types/evaluator_mapping_output_interface.py @@ -10,9 +10,7 @@ class EvaluatorMappingOutputInterface(UniversalBaseModel): outputs: typing.Dict[str, typing.Optional[typing.Any]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_output_interface.py b/sdk/agenta/client/backend/types/evaluator_output_interface.py index e146e4b2b1..1db8220dfb 100644 --- a/sdk/agenta/client/backend/types/evaluator_output_interface.py +++ b/sdk/agenta/client/backend/types/evaluator_output_interface.py @@ -10,9 +10,7 @@ class EvaluatorOutputInterface(UniversalBaseModel): outputs: typing.Dict[str, typing.Optional[typing.Any]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_query.py b/sdk/agenta/client/backend/types/evaluator_query.py index fbc1d6f275..7ed73abd28 100644 --- a/sdk/agenta/client/backend/types/evaluator_query.py +++ b/sdk/agenta/client/backend/types/evaluator_query.py @@ -18,9 +18,7 @@ class EvaluatorQuery(UniversalBaseModel): meta: typing.Optional[typing.Dict[str, typing.Optional["FullJsonInput"]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_query_request.py b/sdk/agenta/client/backend/types/evaluator_query_request.py index 63a428d3ac..0d2d0bbe16 100644 --- a/sdk/agenta/client/backend/types/evaluator_query_request.py +++ b/sdk/agenta/client/backend/types/evaluator_query_request.py @@ -16,9 +16,7 @@ class EvaluatorQueryRequest(UniversalBaseModel): metadata: typing.Dict[str, typing.Optional["FullJsonInput"]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_request.py b/sdk/agenta/client/backend/types/evaluator_request.py index 2954faf33d..b98fee8c76 100644 --- a/sdk/agenta/client/backend/types/evaluator_request.py +++ b/sdk/agenta/client/backend/types/evaluator_request.py @@ -11,9 +11,7 @@ class EvaluatorRequest(UniversalBaseModel): evaluator: Evaluator if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluator_response.py b/sdk/agenta/client/backend/types/evaluator_response.py index 4ecdc97d8d..6c51063e19 100644 --- a/sdk/agenta/client/backend/types/evaluator_response.py +++ b/sdk/agenta/client/backend/types/evaluator_response.py @@ -12,9 +12,7 @@ class EvaluatorResponse(UniversalBaseModel): evaluator: typing.Optional[Evaluator] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/evaluators_response.py b/sdk/agenta/client/backend/types/evaluators_response.py index e1f51d917e..6767051b1f 100644 --- a/sdk/agenta/client/backend/types/evaluators_response.py +++ b/sdk/agenta/client/backend/types/evaluators_response.py @@ -12,9 +12,7 @@ class EvaluatorsResponse(UniversalBaseModel): evaluator: typing.Optional[typing.List[Evaluator]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/exception_dto.py b/sdk/agenta/client/backend/types/exception_dto.py index 42202e195e..da5e15f981 100644 --- a/sdk/agenta/client/backend/types/exception_dto.py +++ b/sdk/agenta/client/backend/types/exception_dto.py @@ -15,9 +15,7 @@ class ExceptionDto(UniversalBaseModel): attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/extended_o_tel_tracing_response.py b/sdk/agenta/client/backend/types/extended_o_tel_tracing_response.py index ddf616323c..b71c3072be 100644 --- a/sdk/agenta/client/backend/types/extended_o_tel_tracing_response.py +++ b/sdk/agenta/client/backend/types/extended_o_tel_tracing_response.py @@ -13,9 +13,7 @@ class ExtendedOTelTracingResponse(UniversalBaseModel): spans: typing.List[OTelSpanDto] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/get_config_response.py b/sdk/agenta/client/backend/types/get_config_response.py index 6eb698283f..332e10b131 100644 --- a/sdk/agenta/client/backend/types/get_config_response.py +++ b/sdk/agenta/client/backend/types/get_config_response.py @@ -12,9 +12,7 @@ class GetConfigResponse(UniversalBaseModel): parameters: typing.Dict[str, typing.Optional[typing.Any]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/header.py b/sdk/agenta/client/backend/types/header.py index 6980ec887a..f274f59b7b 100644 --- a/sdk/agenta/client/backend/types/header.py +++ b/sdk/agenta/client/backend/types/header.py @@ -11,9 +11,7 @@ class Header(UniversalBaseModel): description: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/http_validation_error.py b/sdk/agenta/client/backend/types/http_validation_error.py index b066ebba5f..70a0f5f172 100644 --- a/sdk/agenta/client/backend/types/http_validation_error.py +++ b/sdk/agenta/client/backend/types/http_validation_error.py @@ -11,9 +11,7 @@ class HttpValidationError(UniversalBaseModel): detail: typing.Optional[typing.List[ValidationError]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/human_evaluation.py b/sdk/agenta/client/backend/types/human_evaluation.py index 2b2098e2ed..562b47ac09 100644 --- a/sdk/agenta/client/backend/types/human_evaluation.py +++ b/sdk/agenta/client/backend/types/human_evaluation.py @@ -22,9 +22,7 @@ class HumanEvaluation(UniversalBaseModel): updated_at: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/human_evaluation_scenario.py b/sdk/agenta/client/backend/types/human_evaluation_scenario.py index cf75465f7e..dcdc3d4fdf 100644 --- a/sdk/agenta/client/backend/types/human_evaluation_scenario.py +++ b/sdk/agenta/client/backend/types/human_evaluation_scenario.py @@ -21,9 +21,7 @@ class HumanEvaluationScenario(UniversalBaseModel): note: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/human_evaluation_scenario_input.py b/sdk/agenta/client/backend/types/human_evaluation_scenario_input.py index 6e64ddf64b..f6245cbbc8 100644 --- a/sdk/agenta/client/backend/types/human_evaluation_scenario_input.py +++ b/sdk/agenta/client/backend/types/human_evaluation_scenario_input.py @@ -11,9 +11,7 @@ class HumanEvaluationScenarioInput(UniversalBaseModel): input_value: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/human_evaluation_scenario_output.py b/sdk/agenta/client/backend/types/human_evaluation_scenario_output.py index 70652ea3f0..df597596e9 100644 --- a/sdk/agenta/client/backend/types/human_evaluation_scenario_output.py +++ b/sdk/agenta/client/backend/types/human_evaluation_scenario_output.py @@ -11,9 +11,7 @@ class HumanEvaluationScenarioOutput(UniversalBaseModel): variant_output: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/invite_request.py b/sdk/agenta/client/backend/types/invite_request.py index db34e84579..e5386bd40f 100644 --- a/sdk/agenta/client/backend/types/invite_request.py +++ b/sdk/agenta/client/backend/types/invite_request.py @@ -11,9 +11,7 @@ class InviteRequest(UniversalBaseModel): roles: typing.Optional[typing.List[str]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_analytics_response.py b/sdk/agenta/client/backend/types/legacy_analytics_response.py index 3dff39deb3..c3125eb2b3 100644 --- a/sdk/agenta/client/backend/types/legacy_analytics_response.py +++ b/sdk/agenta/client/backend/types/legacy_analytics_response.py @@ -18,9 +18,7 @@ class LegacyAnalyticsResponse(UniversalBaseModel): data: typing.List[LegacyDataPoint] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_data_point.py b/sdk/agenta/client/backend/types/legacy_data_point.py index fbbecae573..99fa9a1ba5 100644 --- a/sdk/agenta/client/backend/types/legacy_data_point.py +++ b/sdk/agenta/client/backend/types/legacy_data_point.py @@ -16,9 +16,7 @@ class LegacyDataPoint(UniversalBaseModel): total_tokens: int if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_evaluator.py b/sdk/agenta/client/backend/types/legacy_evaluator.py index ef2325c4ab..2cfef45886 100644 --- a/sdk/agenta/client/backend/types/legacy_evaluator.py +++ b/sdk/agenta/client/backend/types/legacy_evaluator.py @@ -17,9 +17,7 @@ class LegacyEvaluator(UniversalBaseModel): tags: typing.List[str] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_scope_request.py b/sdk/agenta/client/backend/types/legacy_scope_request.py index b209fea283..b4eb0af563 100644 --- a/sdk/agenta/client/backend/types/legacy_scope_request.py +++ b/sdk/agenta/client/backend/types/legacy_scope_request.py @@ -10,9 +10,7 @@ class LegacyScopeRequest(UniversalBaseModel): name: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_scopes_response.py b/sdk/agenta/client/backend/types/legacy_scopes_response.py index 0c73ad5124..8e26d5830d 100644 --- a/sdk/agenta/client/backend/types/legacy_scopes_response.py +++ b/sdk/agenta/client/backend/types/legacy_scopes_response.py @@ -18,9 +18,7 @@ class LegacyScopesResponse(UniversalBaseModel): credentials: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_subscription_request.py b/sdk/agenta/client/backend/types/legacy_subscription_request.py index 1139139284..84f3d4db8b 100644 --- a/sdk/agenta/client/backend/types/legacy_subscription_request.py +++ b/sdk/agenta/client/backend/types/legacy_subscription_request.py @@ -10,9 +10,7 @@ class LegacySubscriptionRequest(UniversalBaseModel): plan: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_user_request.py b/sdk/agenta/client/backend/types/legacy_user_request.py index 748f1f6912..f3c1631bc0 100644 --- a/sdk/agenta/client/backend/types/legacy_user_request.py +++ b/sdk/agenta/client/backend/types/legacy_user_request.py @@ -11,9 +11,7 @@ class LegacyUserRequest(UniversalBaseModel): email: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/legacy_user_response.py b/sdk/agenta/client/backend/types/legacy_user_response.py index fa833dc82b..76d5b11542 100644 --- a/sdk/agenta/client/backend/types/legacy_user_response.py +++ b/sdk/agenta/client/backend/types/legacy_user_response.py @@ -10,9 +10,7 @@ class LegacyUserResponse(UniversalBaseModel): id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/lifecycle_dto.py b/sdk/agenta/client/backend/types/lifecycle_dto.py index a948b7433a..d9a958d835 100644 --- a/sdk/agenta/client/backend/types/lifecycle_dto.py +++ b/sdk/agenta/client/backend/types/lifecycle_dto.py @@ -13,9 +13,7 @@ class LifecycleDto(UniversalBaseModel): updated_by: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/link_dto.py b/sdk/agenta/client/backend/types/link_dto.py index 53792bbbb5..7ec2c9ef42 100644 --- a/sdk/agenta/client/backend/types/link_dto.py +++ b/sdk/agenta/client/backend/types/link_dto.py @@ -13,9 +13,7 @@ class LinkDto(UniversalBaseModel): tree_id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/list_api_keys_response.py b/sdk/agenta/client/backend/types/list_api_keys_response.py index 5645b7b97a..f369b622c2 100644 --- a/sdk/agenta/client/backend/types/list_api_keys_response.py +++ b/sdk/agenta/client/backend/types/list_api_keys_response.py @@ -13,9 +13,7 @@ class ListApiKeysResponse(UniversalBaseModel): expiration_date: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/llm_run_rate_limit.py b/sdk/agenta/client/backend/types/llm_run_rate_limit.py index e49c088b8a..0980d9214d 100644 --- a/sdk/agenta/client/backend/types/llm_run_rate_limit.py +++ b/sdk/agenta/client/backend/types/llm_run_rate_limit.py @@ -13,9 +13,7 @@ class LlmRunRateLimit(UniversalBaseModel): delay_between_batches: int if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/meta_request.py b/sdk/agenta/client/backend/types/meta_request.py index 8d327c8e1d..c6c5c2f637 100644 --- a/sdk/agenta/client/backend/types/meta_request.py +++ b/sdk/agenta/client/backend/types/meta_request.py @@ -16,9 +16,7 @@ class MetaRequest(UniversalBaseModel): meta: typing.Dict[str, typing.Optional["FullJsonInput"]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/metrics_dto.py b/sdk/agenta/client/backend/types/metrics_dto.py index 0069e27753..a0eefd86f5 100644 --- a/sdk/agenta/client/backend/types/metrics_dto.py +++ b/sdk/agenta/client/backend/types/metrics_dto.py @@ -13,9 +13,7 @@ class MetricsDto(UniversalBaseModel): tokens: typing.Optional[int] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/new_testset.py b/sdk/agenta/client/backend/types/new_testset.py index 365f7ca7a6..decf6d3828 100644 --- a/sdk/agenta/client/backend/types/new_testset.py +++ b/sdk/agenta/client/backend/types/new_testset.py @@ -11,9 +11,7 @@ class NewTestset(UniversalBaseModel): csvdata: typing.List[typing.Dict[str, typing.Optional[typing.Any]]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/node_dto.py b/sdk/agenta/client/backend/types/node_dto.py index a80a41b4fe..259f18b33c 100644 --- a/sdk/agenta/client/backend/types/node_dto.py +++ b/sdk/agenta/client/backend/types/node_dto.py @@ -13,9 +13,7 @@ class NodeDto(UniversalBaseModel): type: typing.Optional[NodeType] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_context_dto.py b/sdk/agenta/client/backend/types/o_tel_context_dto.py index 518183ae00..2faa593ba4 100644 --- a/sdk/agenta/client/backend/types/o_tel_context_dto.py +++ b/sdk/agenta/client/backend/types/o_tel_context_dto.py @@ -11,9 +11,7 @@ class OTelContextDto(UniversalBaseModel): span_id: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_event.py b/sdk/agenta/client/backend/types/o_tel_event.py index 2dd62903bb..f5a2bc7cc5 100644 --- a/sdk/agenta/client/backend/types/o_tel_event.py +++ b/sdk/agenta/client/backend/types/o_tel_event.py @@ -16,14 +16,12 @@ class OTelEvent(UniversalBaseModel): name: str timestamp: Timestamp - attributes: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + attributes: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_event_dto.py b/sdk/agenta/client/backend/types/o_tel_event_dto.py index 152ccc1cd0..2031177463 100644 --- a/sdk/agenta/client/backend/types/o_tel_event_dto.py +++ b/sdk/agenta/client/backend/types/o_tel_event_dto.py @@ -12,9 +12,7 @@ class OTelEventDto(UniversalBaseModel): attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_extra_dto.py b/sdk/agenta/client/backend/types/o_tel_extra_dto.py index f497b57a56..68b5fca1f2 100644 --- a/sdk/agenta/client/backend/types/o_tel_extra_dto.py +++ b/sdk/agenta/client/backend/types/o_tel_extra_dto.py @@ -15,9 +15,7 @@ class OTelExtraDto(UniversalBaseModel): links: typing.Optional[typing.List[OTelLinkDto]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_flat_span.py b/sdk/agenta/client/backend/types/o_tel_flat_span.py index 97bc818ec5..5873d6d85c 100644 --- a/sdk/agenta/client/backend/types/o_tel_flat_span.py +++ b/sdk/agenta/client/backend/types/o_tel_flat_span.py @@ -35,16 +35,14 @@ class OTelFlatSpan(UniversalBaseModel): end_time: typing.Optional[OTelFlatSpanOutputEndTime] = None status_code: typing.Optional[OTelStatusCode] = None status_message: typing.Optional[str] = None - attributes: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + attributes: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None events: typing.Optional[typing.List[OTelEvent]] = None links: typing.Optional[typing.List[OTelLink]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_link.py b/sdk/agenta/client/backend/types/o_tel_link.py index 186ff2c4a3..0d449c52b9 100644 --- a/sdk/agenta/client/backend/types/o_tel_link.py +++ b/sdk/agenta/client/backend/types/o_tel_link.py @@ -15,14 +15,12 @@ class OTelLink(UniversalBaseModel): span_id: str trace_id: str - attributes: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + attributes: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_link_dto.py b/sdk/agenta/client/backend/types/o_tel_link_dto.py index b439486296..7261632637 100644 --- a/sdk/agenta/client/backend/types/o_tel_link_dto.py +++ b/sdk/agenta/client/backend/types/o_tel_link_dto.py @@ -12,9 +12,7 @@ class OTelLinkDto(UniversalBaseModel): attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_links_response.py b/sdk/agenta/client/backend/types/o_tel_links_response.py index 83844ba741..9f7be47a34 100644 --- a/sdk/agenta/client/backend/types/o_tel_links_response.py +++ b/sdk/agenta/client/backend/types/o_tel_links_response.py @@ -13,9 +13,7 @@ class OTelLinksResponse(UniversalBaseModel): links: typing.Optional[typing.List[OTelLink]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_span.py b/sdk/agenta/client/backend/types/o_tel_span.py index eec70d2f1a..a39377066f 100644 --- a/sdk/agenta/client/backend/types/o_tel_span.py +++ b/sdk/agenta/client/backend/types/o_tel_span.py @@ -45,7 +45,7 @@ class OTelSpan(UniversalBaseModel): model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True, - json_schema_extra=lambda schema, model: None, # fmt: off + json_schema_extra=lambda schema, model: None, # Disable schema generation to prevent recursion ) # type: ignore # Pydantic v2 else: diff --git a/sdk/agenta/client/backend/types/o_tel_span_dto.py b/sdk/agenta/client/backend/types/o_tel_span_dto.py index b60460f142..24999a7127 100644 --- a/sdk/agenta/client/backend/types/o_tel_span_dto.py +++ b/sdk/agenta/client/backend/types/o_tel_span_dto.py @@ -26,9 +26,7 @@ class OTelSpanDto(UniversalBaseModel): links: typing.Optional[typing.List[OTelLinkDto]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_spans_tree.py b/sdk/agenta/client/backend/types/o_tel_spans_tree.py index 92f4809a66..c58bc757ad 100644 --- a/sdk/agenta/client/backend/types/o_tel_spans_tree.py +++ b/sdk/agenta/client/backend/types/o_tel_spans_tree.py @@ -13,9 +13,7 @@ class OTelSpansTree(UniversalBaseModel): ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_tracing_data_response.py b/sdk/agenta/client/backend/types/o_tel_tracing_data_response.py index 2c44e2eebc..078d7899c8 100644 --- a/sdk/agenta/client/backend/types/o_tel_tracing_data_response.py +++ b/sdk/agenta/client/backend/types/o_tel_tracing_data_response.py @@ -13,9 +13,7 @@ class OTelTracingDataResponse(UniversalBaseModel): spans: typing.List[OTelSpanDto] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_tracing_request.py b/sdk/agenta/client/backend/types/o_tel_tracing_request.py index f098ae74f3..73a7395f4a 100644 --- a/sdk/agenta/client/backend/types/o_tel_tracing_request.py +++ b/sdk/agenta/client/backend/types/o_tel_tracing_request.py @@ -13,9 +13,7 @@ class OTelTracingRequest(UniversalBaseModel): traces: typing.Optional[typing.Dict[str, typing.Optional[OTelSpansTree]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/o_tel_tracing_response.py b/sdk/agenta/client/backend/types/o_tel_tracing_response.py index 8f4987c81e..63739203ab 100644 --- a/sdk/agenta/client/backend/types/o_tel_tracing_response.py +++ b/sdk/agenta/client/backend/types/o_tel_tracing_response.py @@ -18,9 +18,7 @@ class OTelTracingResponse(UniversalBaseModel): traces: typing.Optional[typing.Dict[str, typing.Optional[OTelSpansTree]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/organization.py b/sdk/agenta/client/backend/types/organization.py index fef8794c5e..182b59e638 100644 --- a/sdk/agenta/client/backend/types/organization.py +++ b/sdk/agenta/client/backend/types/organization.py @@ -15,9 +15,7 @@ class Organization(UniversalBaseModel): workspaces: typing.Optional[typing.List[str]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/organization_details.py b/sdk/agenta/client/backend/types/organization_details.py index 9720147c63..fe92068238 100644 --- a/sdk/agenta/client/backend/types/organization_details.py +++ b/sdk/agenta/client/backend/types/organization_details.py @@ -18,9 +18,7 @@ class OrganizationDetails(UniversalBaseModel): ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/organization_membership_request.py b/sdk/agenta/client/backend/types/organization_membership_request.py index 15e639eaa6..a68fbb3359 100644 --- a/sdk/agenta/client/backend/types/organization_membership_request.py +++ b/sdk/agenta/client/backend/types/organization_membership_request.py @@ -15,9 +15,7 @@ class OrganizationMembershipRequest(UniversalBaseModel): organization_ref: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/organization_output.py b/sdk/agenta/client/backend/types/organization_output.py index 45dc45a53e..55d4a1aea3 100644 --- a/sdk/agenta/client/backend/types/organization_output.py +++ b/sdk/agenta/client/backend/types/organization_output.py @@ -11,9 +11,7 @@ class OrganizationOutput(UniversalBaseModel): name: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/organization_request.py b/sdk/agenta/client/backend/types/organization_request.py index a8bda700a3..688eeaff1b 100644 --- a/sdk/agenta/client/backend/types/organization_request.py +++ b/sdk/agenta/client/backend/types/organization_request.py @@ -12,9 +12,7 @@ class OrganizationRequest(UniversalBaseModel): is_paying: bool if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/parent_dto.py b/sdk/agenta/client/backend/types/parent_dto.py index be3179f83f..285505beae 100644 --- a/sdk/agenta/client/backend/types/parent_dto.py +++ b/sdk/agenta/client/backend/types/parent_dto.py @@ -10,9 +10,7 @@ class ParentDto(UniversalBaseModel): id: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/project_membership_request.py b/sdk/agenta/client/backend/types/project_membership_request.py index 8438f57ae9..efbb405623 100644 --- a/sdk/agenta/client/backend/types/project_membership_request.py +++ b/sdk/agenta/client/backend/types/project_membership_request.py @@ -15,9 +15,7 @@ class ProjectMembershipRequest(UniversalBaseModel): project_ref: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/project_request.py b/sdk/agenta/client/backend/types/project_request.py index ff6098e47b..7fc1a9429a 100644 --- a/sdk/agenta/client/backend/types/project_request.py +++ b/sdk/agenta/client/backend/types/project_request.py @@ -15,9 +15,7 @@ class ProjectRequest(UniversalBaseModel): organization_ref: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/project_scope.py b/sdk/agenta/client/backend/types/project_scope.py index 80244f856d..17d17438bd 100644 --- a/sdk/agenta/client/backend/types/project_scope.py +++ b/sdk/agenta/client/backend/types/project_scope.py @@ -18,9 +18,7 @@ class ProjectScope(UniversalBaseModel): organization: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/projects_response.py b/sdk/agenta/client/backend/types/projects_response.py index 6bd68600a5..2688ea5348 100644 --- a/sdk/agenta/client/backend/types/projects_response.py +++ b/sdk/agenta/client/backend/types/projects_response.py @@ -17,9 +17,7 @@ class ProjectsResponse(UniversalBaseModel): is_demo: typing.Optional[bool] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/reference.py b/sdk/agenta/client/backend/types/reference.py index 09f33766ae..ef16defd21 100644 --- a/sdk/agenta/client/backend/types/reference.py +++ b/sdk/agenta/client/backend/types/reference.py @@ -16,14 +16,12 @@ class Reference(UniversalBaseModel): id: typing.Optional[str] = None slug: typing.Optional[str] = None version: typing.Optional[str] = None - attributes: typing.Optional[typing.Dict[str, typing.Optional["FullJsonInput"]]] = ( - None - ) + attributes: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonInput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/reference_dto.py b/sdk/agenta/client/backend/types/reference_dto.py index 40fd02f23a..c4ce3b6094 100644 --- a/sdk/agenta/client/backend/types/reference_dto.py +++ b/sdk/agenta/client/backend/types/reference_dto.py @@ -13,9 +13,7 @@ class ReferenceDto(UniversalBaseModel): id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/reference_request_model.py b/sdk/agenta/client/backend/types/reference_request_model.py index 859f77daf2..37e8f65155 100644 --- a/sdk/agenta/client/backend/types/reference_request_model.py +++ b/sdk/agenta/client/backend/types/reference_request_model.py @@ -13,9 +13,7 @@ class ReferenceRequestModel(UniversalBaseModel): id: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/result.py b/sdk/agenta/client/backend/types/result.py index f9154bae26..8eec65536a 100644 --- a/sdk/agenta/client/backend/types/result.py +++ b/sdk/agenta/client/backend/types/result.py @@ -13,9 +13,7 @@ class Result(UniversalBaseModel): error: typing.Optional[Error] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/root_dto.py b/sdk/agenta/client/backend/types/root_dto.py index 2753ce6105..e099af2cbb 100644 --- a/sdk/agenta/client/backend/types/root_dto.py +++ b/sdk/agenta/client/backend/types/root_dto.py @@ -10,9 +10,7 @@ class RootDto(UniversalBaseModel): id: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/scopes_response_model.py b/sdk/agenta/client/backend/types/scopes_response_model.py index a7526eac7b..4531c11be0 100644 --- a/sdk/agenta/client/backend/types/scopes_response_model.py +++ b/sdk/agenta/client/backend/types/scopes_response_model.py @@ -11,9 +11,7 @@ class ScopesResponseModel(UniversalBaseModel): projects: typing.Optional[typing.Dict[str, typing.Dict[str, ProjectScope]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/secret_dto.py b/sdk/agenta/client/backend/types/secret_dto.py index 37ce12b9a7..9aa3259b53 100644 --- a/sdk/agenta/client/backend/types/secret_dto.py +++ b/sdk/agenta/client/backend/types/secret_dto.py @@ -13,9 +13,7 @@ class SecretDto(UniversalBaseModel): data: Data if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/secret_response_dto.py b/sdk/agenta/client/backend/types/secret_response_dto.py index 5d0b6d9f75..77c2a45945 100644 --- a/sdk/agenta/client/backend/types/secret_response_dto.py +++ b/sdk/agenta/client/backend/types/secret_response_dto.py @@ -18,9 +18,7 @@ class SecretResponseDto(UniversalBaseModel): lifecycle: typing.Optional[LifecycleDto] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/simple_evaluation_output.py b/sdk/agenta/client/backend/types/simple_evaluation_output.py index 02b6f5bafe..c0b927f73c 100644 --- a/sdk/agenta/client/backend/types/simple_evaluation_output.py +++ b/sdk/agenta/client/backend/types/simple_evaluation_output.py @@ -15,9 +15,7 @@ class SimpleEvaluationOutput(UniversalBaseModel): evaluation_type: EvaluationType if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/span_dto.py b/sdk/agenta/client/backend/types/span_dto.py index b92370d2ad..691d71a6f9 100644 --- a/sdk/agenta/client/backend/types/span_dto.py +++ b/sdk/agenta/client/backend/types/span_dto.py @@ -39,14 +39,12 @@ class SpanDto(UniversalBaseModel): refs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None links: typing.Optional[typing.List[LinkDto]] = None otel: typing.Optional[OTelExtraDto] = None - nodes: typing.Optional[typing.Dict[str, typing.Optional["SpanDtoNodesValue"]]] = ( - None - ) + nodes: typing.Optional[ + typing.Dict[str, typing.Optional["SpanDtoNodesValue"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/standard_provider_dto.py b/sdk/agenta/client/backend/types/standard_provider_dto.py index 9ce6db03ff..df25d76d4d 100644 --- a/sdk/agenta/client/backend/types/standard_provider_dto.py +++ b/sdk/agenta/client/backend/types/standard_provider_dto.py @@ -13,9 +13,7 @@ class StandardProviderDto(UniversalBaseModel): provider: StandardProviderSettingsDto if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/standard_provider_settings_dto.py b/sdk/agenta/client/backend/types/standard_provider_settings_dto.py index 3b26a3f201..c0bc92be60 100644 --- a/sdk/agenta/client/backend/types/standard_provider_settings_dto.py +++ b/sdk/agenta/client/backend/types/standard_provider_settings_dto.py @@ -10,9 +10,7 @@ class StandardProviderSettingsDto(UniversalBaseModel): key: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/status_dto.py b/sdk/agenta/client/backend/types/status_dto.py index 51d96949a3..47a96e3984 100644 --- a/sdk/agenta/client/backend/types/status_dto.py +++ b/sdk/agenta/client/backend/types/status_dto.py @@ -12,9 +12,7 @@ class StatusDto(UniversalBaseModel): message: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/tags_request.py b/sdk/agenta/client/backend/types/tags_request.py index 46056e16e7..5ee86f99d9 100644 --- a/sdk/agenta/client/backend/types/tags_request.py +++ b/sdk/agenta/client/backend/types/tags_request.py @@ -16,9 +16,7 @@ class TagsRequest(UniversalBaseModel): metadata: typing.Dict[str, typing.Optional["FullJsonInput"]] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testcase_response.py b/sdk/agenta/client/backend/types/testcase_response.py index 72a99183ad..56fab720ae 100644 --- a/sdk/agenta/client/backend/types/testcase_response.py +++ b/sdk/agenta/client/backend/types/testcase_response.py @@ -14,14 +14,12 @@ class TestcaseResponse(UniversalBaseModel): count: int - testcase: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + testcase: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testset.py b/sdk/agenta/client/backend/types/testset.py index b0f93a2067..5a50875540 100644 --- a/sdk/agenta/client/backend/types/testset.py +++ b/sdk/agenta/client/backend/types/testset.py @@ -27,14 +27,12 @@ class Testset(UniversalBaseModel): testcases: typing.Optional[ typing.List[typing.Dict[str, typing.Optional["FullJsonOutput"]]] ] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testset_output_response.py b/sdk/agenta/client/backend/types/testset_output_response.py index 882a60687c..c73add9298 100644 --- a/sdk/agenta/client/backend/types/testset_output_response.py +++ b/sdk/agenta/client/backend/types/testset_output_response.py @@ -15,9 +15,7 @@ class TestsetOutputResponse(UniversalBaseModel): updated_at: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testset_request.py b/sdk/agenta/client/backend/types/testset_request.py index 12a336b7f0..1e0448b2d3 100644 --- a/sdk/agenta/client/backend/types/testset_request.py +++ b/sdk/agenta/client/backend/types/testset_request.py @@ -11,9 +11,7 @@ class TestsetRequest(UniversalBaseModel): testset: Testset if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testset_response.py b/sdk/agenta/client/backend/types/testset_response.py index 5c402dc3c3..55a02c3e83 100644 --- a/sdk/agenta/client/backend/types/testset_response.py +++ b/sdk/agenta/client/backend/types/testset_response.py @@ -12,9 +12,7 @@ class TestsetResponse(UniversalBaseModel): testset: typing.Optional[Testset] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testset_simple_response.py b/sdk/agenta/client/backend/types/testset_simple_response.py index 6b6c4cbdff..abd4e66869 100644 --- a/sdk/agenta/client/backend/types/testset_simple_response.py +++ b/sdk/agenta/client/backend/types/testset_simple_response.py @@ -12,9 +12,7 @@ class TestsetSimpleResponse(UniversalBaseModel): created_at: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/testsets_response.py b/sdk/agenta/client/backend/types/testsets_response.py index f2f3eaeb46..e1778bf1eb 100644 --- a/sdk/agenta/client/backend/types/testsets_response.py +++ b/sdk/agenta/client/backend/types/testsets_response.py @@ -12,9 +12,7 @@ class TestsetsResponse(UniversalBaseModel): testsets: typing.Optional[typing.List[Testset]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/time_dto.py b/sdk/agenta/client/backend/types/time_dto.py index 5ed2705441..7f0aa58a57 100644 --- a/sdk/agenta/client/backend/types/time_dto.py +++ b/sdk/agenta/client/backend/types/time_dto.py @@ -12,9 +12,7 @@ class TimeDto(UniversalBaseModel): end: dt.datetime if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/tree_dto.py b/sdk/agenta/client/backend/types/tree_dto.py index da4d5edb5b..f62fd7f7e4 100644 --- a/sdk/agenta/client/backend/types/tree_dto.py +++ b/sdk/agenta/client/backend/types/tree_dto.py @@ -12,9 +12,7 @@ class TreeDto(UniversalBaseModel): type: typing.Optional[TreeType] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/update_app_output.py b/sdk/agenta/client/backend/types/update_app_output.py index bc24403f19..b96c4af042 100644 --- a/sdk/agenta/client/backend/types/update_app_output.py +++ b/sdk/agenta/client/backend/types/update_app_output.py @@ -11,9 +11,7 @@ class UpdateAppOutput(UniversalBaseModel): app_name: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/user_request.py b/sdk/agenta/client/backend/types/user_request.py index c12e4d4e7b..f36c14bbed 100644 --- a/sdk/agenta/client/backend/types/user_request.py +++ b/sdk/agenta/client/backend/types/user_request.py @@ -11,9 +11,7 @@ class UserRequest(UniversalBaseModel): email: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/validation_error.py b/sdk/agenta/client/backend/types/validation_error.py index a632c6d67b..e388c3c1c1 100644 --- a/sdk/agenta/client/backend/types/validation_error.py +++ b/sdk/agenta/client/backend/types/validation_error.py @@ -13,9 +13,7 @@ class ValidationError(UniversalBaseModel): type: str if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_artifact.py b/sdk/agenta/client/backend/types/workflow_artifact.py index b772b1ff68..4ea448652c 100644 --- a/sdk/agenta/client/backend/types/workflow_artifact.py +++ b/sdk/agenta/client/backend/types/workflow_artifact.py @@ -26,14 +26,12 @@ class WorkflowArtifact(UniversalBaseModel): slug: typing.Optional[str] = None id: typing.Optional[str] = None flags: typing.Optional[WorkflowFlags] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_data.py b/sdk/agenta/client/backend/types/workflow_data.py index aef669af24..81b144eb04 100644 --- a/sdk/agenta/client/backend/types/workflow_data.py +++ b/sdk/agenta/client/backend/types/workflow_data.py @@ -11,9 +11,7 @@ class WorkflowData(UniversalBaseModel): configuration: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_flags.py b/sdk/agenta/client/backend/types/workflow_flags.py index 815178930a..fe1e606e5c 100644 --- a/sdk/agenta/client/backend/types/workflow_flags.py +++ b/sdk/agenta/client/backend/types/workflow_flags.py @@ -12,9 +12,7 @@ class WorkflowFlags(UniversalBaseModel): is_human: typing.Optional[bool] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_request.py b/sdk/agenta/client/backend/types/workflow_request.py index ba7b43f283..2a00daa602 100644 --- a/sdk/agenta/client/backend/types/workflow_request.py +++ b/sdk/agenta/client/backend/types/workflow_request.py @@ -11,9 +11,7 @@ class WorkflowRequest(UniversalBaseModel): workflow: WorkflowArtifact if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_response.py b/sdk/agenta/client/backend/types/workflow_response.py index 38edf76823..62583827bb 100644 --- a/sdk/agenta/client/backend/types/workflow_response.py +++ b/sdk/agenta/client/backend/types/workflow_response.py @@ -12,9 +12,7 @@ class WorkflowResponse(UniversalBaseModel): workflow: typing.Optional[WorkflowArtifact] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_revision.py b/sdk/agenta/client/backend/types/workflow_revision.py index 68dd05400e..a114ebb0a8 100644 --- a/sdk/agenta/client/backend/types/workflow_revision.py +++ b/sdk/agenta/client/backend/types/workflow_revision.py @@ -33,9 +33,9 @@ class WorkflowRevision(UniversalBaseModel): slug: typing.Optional[str] = None id: typing.Optional[str] = None flags: typing.Optional[WorkflowFlags] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None data: typing.Optional[WorkflowData] = None artifact_id: typing.Optional[str] = None artifact: typing.Optional[Artifact] = None @@ -43,9 +43,7 @@ class WorkflowRevision(UniversalBaseModel): variant: typing.Optional[WorkflowVariant] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_revision_request.py b/sdk/agenta/client/backend/types/workflow_revision_request.py index 96d604a712..49f78fdfb6 100644 --- a/sdk/agenta/client/backend/types/workflow_revision_request.py +++ b/sdk/agenta/client/backend/types/workflow_revision_request.py @@ -11,9 +11,7 @@ class WorkflowRevisionRequest(UniversalBaseModel): revision: WorkflowRevision if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_revision_response.py b/sdk/agenta/client/backend/types/workflow_revision_response.py index bbf0cf80fa..3f6169a7d8 100644 --- a/sdk/agenta/client/backend/types/workflow_revision_response.py +++ b/sdk/agenta/client/backend/types/workflow_revision_response.py @@ -12,9 +12,7 @@ class WorkflowRevisionResponse(UniversalBaseModel): revision: typing.Optional[WorkflowRevision] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_revisions_response.py b/sdk/agenta/client/backend/types/workflow_revisions_response.py index 180cd8c261..23e72a17e2 100644 --- a/sdk/agenta/client/backend/types/workflow_revisions_response.py +++ b/sdk/agenta/client/backend/types/workflow_revisions_response.py @@ -12,9 +12,7 @@ class WorkflowRevisionsResponse(UniversalBaseModel): revisions: typing.Optional[typing.List[WorkflowRevision]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_variant.py b/sdk/agenta/client/backend/types/workflow_variant.py index a4f5bb3c6a..2cea82ae9e 100644 --- a/sdk/agenta/client/backend/types/workflow_variant.py +++ b/sdk/agenta/client/backend/types/workflow_variant.py @@ -27,16 +27,14 @@ class WorkflowVariant(UniversalBaseModel): slug: typing.Optional[str] = None id: typing.Optional[str] = None flags: typing.Optional[WorkflowFlags] = None - metadata: typing.Optional[typing.Dict[str, typing.Optional["FullJsonOutput"]]] = ( - None - ) + metadata: typing.Optional[ + typing.Dict[str, typing.Optional["FullJsonOutput"]] + ] = None artifact_id: typing.Optional[str] = None artifact: typing.Optional[WorkflowArtifact] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_variant_request.py b/sdk/agenta/client/backend/types/workflow_variant_request.py index 8eb98f1b36..78044e7682 100644 --- a/sdk/agenta/client/backend/types/workflow_variant_request.py +++ b/sdk/agenta/client/backend/types/workflow_variant_request.py @@ -11,9 +11,7 @@ class WorkflowVariantRequest(UniversalBaseModel): variant: WorkflowVariant if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_variant_response.py b/sdk/agenta/client/backend/types/workflow_variant_response.py index ba4b7221b7..1622e4709d 100644 --- a/sdk/agenta/client/backend/types/workflow_variant_response.py +++ b/sdk/agenta/client/backend/types/workflow_variant_response.py @@ -12,9 +12,7 @@ class WorkflowVariantResponse(UniversalBaseModel): variant: typing.Optional[WorkflowVariant] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflow_variants_response.py b/sdk/agenta/client/backend/types/workflow_variants_response.py index 140d48a4aa..b97a378f72 100644 --- a/sdk/agenta/client/backend/types/workflow_variants_response.py +++ b/sdk/agenta/client/backend/types/workflow_variants_response.py @@ -12,9 +12,7 @@ class WorkflowVariantsResponse(UniversalBaseModel): variants: typing.Optional[typing.List[WorkflowVariant]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workflows_response.py b/sdk/agenta/client/backend/types/workflows_response.py index e85e8940de..8bf9724d44 100644 --- a/sdk/agenta/client/backend/types/workflows_response.py +++ b/sdk/agenta/client/backend/types/workflows_response.py @@ -12,9 +12,7 @@ class WorkflowsResponse(UniversalBaseModel): workflows: typing.Optional[typing.List[WorkflowArtifact]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace.py b/sdk/agenta/client/backend/types/workspace.py index 0b368ecc4b..0cb0f3340b 100644 --- a/sdk/agenta/client/backend/types/workspace.py +++ b/sdk/agenta/client/backend/types/workspace.py @@ -13,9 +13,7 @@ class Workspace(UniversalBaseModel): type: typing.Optional[str] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace_member_response.py b/sdk/agenta/client/backend/types/workspace_member_response.py index d7971468de..487d9c405d 100644 --- a/sdk/agenta/client/backend/types/workspace_member_response.py +++ b/sdk/agenta/client/backend/types/workspace_member_response.py @@ -12,9 +12,7 @@ class WorkspaceMemberResponse(UniversalBaseModel): roles: typing.List[WorkspacePermission] if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace_membership_request.py b/sdk/agenta/client/backend/types/workspace_membership_request.py index 27bad0f18f..d6767b4237 100644 --- a/sdk/agenta/client/backend/types/workspace_membership_request.py +++ b/sdk/agenta/client/backend/types/workspace_membership_request.py @@ -15,9 +15,7 @@ class WorkspaceMembershipRequest(UniversalBaseModel): workspace_ref: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace_permission.py b/sdk/agenta/client/backend/types/workspace_permission.py index c883b966ad..b493882843 100644 --- a/sdk/agenta/client/backend/types/workspace_permission.py +++ b/sdk/agenta/client/backend/types/workspace_permission.py @@ -14,9 +14,7 @@ class WorkspacePermission(UniversalBaseModel): permissions: typing.Optional[typing.List[Permission]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace_request.py b/sdk/agenta/client/backend/types/workspace_request.py index 849cde31fd..301bce0e26 100644 --- a/sdk/agenta/client/backend/types/workspace_request.py +++ b/sdk/agenta/client/backend/types/workspace_request.py @@ -14,9 +14,7 @@ class WorkspaceRequest(UniversalBaseModel): organization_ref: Reference if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/types/workspace_response.py b/sdk/agenta/client/backend/types/workspace_response.py index 43c8d543ee..c79b86eb73 100644 --- a/sdk/agenta/client/backend/types/workspace_response.py +++ b/sdk/agenta/client/backend/types/workspace_response.py @@ -18,9 +18,7 @@ class WorkspaceResponse(UniversalBaseModel): members: typing.Optional[typing.List[WorkspaceMemberResponse]] = None if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( - extra="allow", frozen=True - ) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: class Config: diff --git a/sdk/agenta/client/backend/workspace/client.py b/sdk/agenta/client/backend/workspace/client.py index 5eba1c5c32..9997c212af 100644 --- a/sdk/agenta/client/backend/workspace/client.py +++ b/sdk/agenta/client/backend/workspace/client.py @@ -102,7 +102,7 @@ def remove_user_from_workspace( workspace_id: str, *, email: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: """ Remove a user from a workspace. @@ -255,7 +255,7 @@ async def remove_user_from_workspace( workspace_id: str, *, email: str, - request_options: typing.Optional[RequestOptions] = None, + request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: """ Remove a user from a workspace. diff --git a/sdk/agenta/sdk/agenta_init.py b/sdk/agenta/sdk/agenta_init.py index 612ffbf5e2..45c85a13ff 100644 --- a/sdk/agenta/sdk/agenta_init.py +++ b/sdk/agenta/sdk/agenta_init.py @@ -97,9 +97,9 @@ def init( _api_url = _host + "/api" try: - assert _host and isinstance(_host, str), ( - "Host is required. Please provide a valid host or set AGENTA_HOST environment variable." - ) + assert _host and isinstance( + _host, str + ), "Host is required. Please provide a valid host or set AGENTA_HOST environment variable." self.host = parse_url(url=_host) self.api_url = self.host + "/api" except AssertionError as e: diff --git a/sdk/agenta/sdk/decorators/running.py b/sdk/agenta/sdk/decorators/running.py index b01ee15133..0a44bcd072 100644 --- a/sdk/agenta/sdk/decorators/running.py +++ b/sdk/agenta/sdk/decorators/running.py @@ -53,11 +53,13 @@ class InvokeFn(Protocol): async def __call__( self, request: Union[WorkflowServiceRequest, dict], - ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: ... + ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: + ... class InspectFn(Protocol): - async def __call__(self) -> WorkflowServiceRequest: ... + async def __call__(self) -> WorkflowServiceRequest: + ... class Workflow: @@ -79,7 +81,8 @@ async def invoke( credentials: Optional[str] = None, # **kwargs, - ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: ... + ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: + ... async def inspect( self, @@ -87,7 +90,8 @@ async def inspect( credentials: Optional[str] = None, # **kwargs, - ) -> WorkflowServiceRequest: ... + ) -> WorkflowServiceRequest: + ... def __call__(self, *args, **kwargs) -> Any: return self._fn(*args, **kwargs) diff --git a/sdk/agenta/sdk/decorators/serving.py b/sdk/agenta/sdk/decorators/serving.py index 6f2d501cf7..c8683e4f63 100644 --- a/sdk/agenta/sdk/decorators/serving.py +++ b/sdk/agenta/sdk/decorators/serving.py @@ -656,9 +656,9 @@ def add_config_params_to_parser( def add_func_params_to_parser(self, updated_params: list) -> None: """Add function parameters to function signature.""" for name, param in signature(self.func).parameters.items(): - assert len(param.default.__class__.__bases__) == 1, ( - f"Inherited standard type of {param.default.__class__} needs to be one." - ) + assert ( + len(param.default.__class__.__bases__) == 1 + ), f"Inherited standard type of {param.default.__class__} needs to be one." updated_params.append( Parameter( name, @@ -718,9 +718,9 @@ def openapi(self): -1 ] # Extract schema name if schema_name in schema_name_map: - content["schema"]["$ref"] = ( - f"#/components/schemas/{schema_name_map[schema_name]}" - ) + content["schema"][ + "$ref" + ] = f"#/components/schemas/{schema_name_map[schema_name]}" if "responses" in method: for status_code, response in method["responses"].items(): @@ -734,9 +734,9 @@ def openapi(self): -1 ] # Extract schema name if schema_name in schema_name_map: - content["schema"]["$ref"] = ( - f"#/components/schemas/{schema_name_map[schema_name]}" - ) + content["schema"][ + "$ref" + ] = f"#/components/schemas/{schema_name_map[schema_name]}" # ✅ Update OpenAPI schema with fixed schemas openapi_schema["components"]["schemas"] = updated_schemas diff --git a/sdk/agenta/sdk/evaluations/preview/evaluate.py b/sdk/agenta/sdk/evaluations/preview/evaluate.py index af809876a4..c5e460ffb6 100644 --- a/sdk/agenta/sdk/evaluations/preview/evaluate.py +++ b/sdk/agenta/sdk/evaluations/preview/evaluate.py @@ -301,11 +301,13 @@ async def aevaluate( print() print( - "────────────────────────────────────────────────────────────────────────────" + "──────────────────────────────────────" + "──────────────────────────────────────" ) print(f"Evaluation running...") print( - "────────────────────────────────────────────────────────────────────────────" + "──────────────────────────────────────" + "──────────────────────────────────────" ) suffix = _timestamp_suffix() @@ -375,7 +377,7 @@ async def aevaluate( print( f"{UNICODE['pipe']}" - f"{UNICODE['next' if testcase_idx < len(testcases) - 1 else 'last']}" + f"{UNICODE['next'if testcase_idx < len(testcases) - 1 else 'last']}" f"{UNICODE['here']}" f"{UNICODE['skip']}" f"{UNICODE['skip']}" @@ -746,15 +748,18 @@ async def aevaluate( run_url = await aget_url(run_id=run.id) print( - "────────────────────────────────────────────────────────────────────────────" + "──────────────────────────────────────" + "──────────────────────────────────────" ) print(f"Evaluation finished.") print( - "----------------------------------------------------------------------------" + "--------------------------------------" + "--------------------------------------" ) print(f"Evaluation URL: {run_url or '[unavailable]'}") print( - "────────────────────────────────────────────────────────────────────────────" + "──────────────────────────────────────" + "──────────────────────────────────────" ) print() diff --git a/sdk/agenta/sdk/evaluations/preview/utils.py b/sdk/agenta/sdk/evaluations/preview/utils.py index c6b1e72f98..609ecfce47 100644 --- a/sdk/agenta/sdk/evaluations/preview/utils.py +++ b/sdk/agenta/sdk/evaluations/preview/utils.py @@ -191,7 +191,7 @@ def smart_format_content(content: Any, max_length: int = 200) -> str: if len(content) <= max_length: return content else: - return f"{content[: max_length - 3]}..." + return f"{content[:max_length-3]}..." if isinstance(content, (dict, list)): try: @@ -213,7 +213,7 @@ def smart_format_content(content: Any, max_length: int = 200) -> str: if len(full_preview) <= max_length: return full_preview else: - return f"{full_preview[: max_length - 3]}..." + return f"{full_preview[:max_length-3]}..." else: # list count = len(content) item_preview = ( @@ -411,7 +411,7 @@ def make_row(values, widths, left="┃", mid="┃", right="┃"): if "\n" in val_str: # Take first line for table display val_str = val_str.split("\n")[0] - formatted.append(f" {val_str:<{width - 2}} ") + formatted.append(f" {val_str:<{width-2}} ") return left + mid.join(formatted) + right # Responsive column widths @@ -541,7 +541,7 @@ async def display_evaluation_results( for i, scenario in enumerate(scenario_iterator): if not RICH_AVAILABLE and show_detailed_logs: print( - f" 📄 scenario {i + 1}/{len(scenarios_to_process)}: {scenario['scenario'].id}" + f" 📄 scenario {i+1}/{len(scenarios_to_process)}: {scenario['scenario'].id}" ) # type:ignore elif show_detailed_logs: print(f" scenario_id={scenario['scenario'].id}") # type:ignore diff --git a/sdk/agenta/sdk/litellm/mockllm.py b/sdk/agenta/sdk/litellm/mockllm.py index 249505be82..01b9fecff2 100644 --- a/sdk/agenta/sdk/litellm/mockllm.py +++ b/sdk/agenta/sdk/litellm/mockllm.py @@ -67,7 +67,8 @@ def user_aws_credentials_from(ps: dict): class LitellmProtocol(Protocol): - async def acompletion(self, *args: Any, **kwargs: Any) -> Any: ... + async def acompletion(self, *args: Any, **kwargs: Any) -> Any: + ... async def acompletion(*args, **kwargs): diff --git a/sdk/agenta/sdk/managers/shared.py b/sdk/agenta/sdk/managers/shared.py index 99fc5cba32..0c214e408a 100644 --- a/sdk/agenta/sdk/managers/shared.py +++ b/sdk/agenta/sdk/managers/shared.py @@ -478,9 +478,7 @@ def commit( config=ConfigRequest( params=parameters, variant_ref=variant_ref.model_dump() if variant_ref else None, # type: ignore - application_ref=application_ref.model_dump() - if application_ref - else None, # type: ignore + application_ref=application_ref.model_dump() if application_ref else None, # type: ignore ) ) diff --git a/sdk/agenta/sdk/middlewares/running/normalizer.py b/sdk/agenta/sdk/middlewares/running/normalizer.py index cec3cda676..e3fdd2acdd 100644 --- a/sdk/agenta/sdk/middlewares/running/normalizer.py +++ b/sdk/agenta/sdk/middlewares/running/normalizer.py @@ -106,10 +106,7 @@ async def _normalize_request( async def _normalize_response( self, result: Any, - ) -> Union[ - WorkflowServiceBatchResponse, - WorkflowServiceStreamResponse, - ]: + ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse,]: if isawaitable(result): result = await result diff --git a/sdk/agenta/sdk/tracing/exporters.py b/sdk/agenta/sdk/tracing/exporters.py index a121bd857a..c156d7d906 100644 --- a/sdk/agenta/sdk/tracing/exporters.py +++ b/sdk/agenta/sdk/tracing/exporters.py @@ -24,7 +24,7 @@ log = get_module_logger(__name__) -_ASYNC_EXPORT = environ.get("AGENTA_OTLP_ASYNC_EXPORT", "true").lower() in TRUTHY +_ASYNC_EXPORT = environ.get("AGENTA_OTLP_ASYNC_EXPORT", "false").lower() in TRUTHY class InlineTraceExporter(SpanExporter): diff --git a/sdk/agenta/sdk/workflows/handlers.py b/sdk/agenta/sdk/workflows/handlers.py index 7216761897..2a72fe20eb 100644 --- a/sdk/agenta/sdk/workflows/handlers.py +++ b/sdk/agenta/sdk/workflows/handlers.py @@ -511,24 +511,20 @@ def field_match_test_v0( correct_answer = inputs[correct_answer_key] if not isinstance(outputs, str) and not isinstance(outputs, dict): - # raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs) - return {"success": False} + raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs) outputs_dict = outputs if isinstance(outputs, str): try: outputs_dict = loads(outputs) except json.JSONDecodeError as e: - # raise InvalidOutputsV0Error(expected="dict", got=outputs) from e - return {"success": False} + raise InvalidOutputsV0Error(expected="dict", got=outputs) from e if not isinstance(outputs_dict, dict): - # raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs) - return {"success": False} + raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs) if not json_field in outputs_dict: - # raise MissingOutputV0Error(path=json_field) - return {"success": False} + raise MissingOutputV0Error(path=json_field) # -------------------------------------------------------------------------- success = outputs_dict[json_field] == correct_answer @@ -780,9 +776,7 @@ async def auto_ai_critique_v0( got=model, ) - response_type = parameters.get("response_type") or ( - "json_schema" if template_version == "4" else "text" - ) + response_type = parameters.get("response_type") or "text" if not response_type in ["text", "json_object", "json_schema"]: raise InvalidConfigurationParameterV0Error( diff --git a/sdk/poetry.lock b/sdk/poetry.lock index dce11a6848..9108c002a1 100644 --- a/sdk/poetry.lock +++ b/sdk/poetry.lock @@ -228,18 +228,18 @@ files = [ [[package]] name = "boto3" -version = "1.40.69" +version = "1.40.68" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.40.69-py3-none-any.whl", hash = "sha256:c3f710a1990c4be1c0db43b938743d4e404c7f1f06d5f1fa0c8e9b1cea4290b2"}, - {file = "boto3-1.40.69.tar.gz", hash = "sha256:5273f6bac347331a87db809dff97d8736c50c3be19f2bb36ad08c5131c408976"}, + {file = "boto3-1.40.68-py3-none-any.whl", hash = "sha256:4f08115e3a4d1e1056003e433d393e78c20da6af7753409992bb33fb69f04186"}, + {file = "boto3-1.40.68.tar.gz", hash = "sha256:c7994989e5bbba071b7c742adfba35773cf03e87f5d3f9f2b0a18c1664417b61"}, ] [package.dependencies] -botocore = ">=1.40.69,<1.41.0" +botocore = ">=1.40.68,<1.41.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.14.0,<0.15.0" @@ -248,14 +248,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.40.69" +version = "1.40.68" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.40.69-py3-none-any.whl", hash = "sha256:5d810efeb9e18f91f32690642fa81ae60e482eefeea0d35ec72da2e3d924c1a5"}, - {file = "botocore-1.40.69.tar.gz", hash = "sha256:df310ddc4d2de5543ba3df4e4b5f9907a2951896d63a9fbae115c26ca0976951"}, + {file = "botocore-1.40.68-py3-none-any.whl", hash = "sha256:9d514f9c9054e1af055f2cbe9e0d6771d407a600206d45a01b54d5f09538fecb"}, + {file = "botocore-1.40.68.tar.gz", hash = "sha256:28f41b463d9f012a711ee8b61d4e26cd14ee3b450b816d5dee849aa79155e856"}, ] [package.dependencies] @@ -993,114 +993,114 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jiter" -version = "0.12.0" +version = "0.11.1" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65"}, - {file = "jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74"}, - {file = "jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2"}, - {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025"}, - {file = "jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca"}, - {file = "jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4"}, - {file = "jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11"}, - {file = "jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9"}, - {file = "jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6"}, - {file = "jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725"}, - {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6"}, - {file = "jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e"}, - {file = "jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c"}, - {file = "jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f"}, - {file = "jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5"}, - {file = "jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37"}, - {file = "jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403"}, - {file = "jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126"}, - {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9"}, - {file = "jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86"}, - {file = "jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44"}, - {file = "jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb"}, - {file = "jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789"}, - {file = "jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e"}, - {file = "jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed"}, - {file = "jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9"}, - {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626"}, - {file = "jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c"}, - {file = "jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de"}, - {file = "jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a"}, - {file = "jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60"}, - {file = "jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6"}, - {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4"}, - {file = "jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb"}, - {file = "jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7"}, - {file = "jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3"}, - {file = "jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525"}, - {file = "jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a"}, - {file = "jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a"}, - {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67"}, - {file = "jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b"}, - {file = "jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42"}, - {file = "jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf"}, - {file = "jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451"}, - {file = "jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f"}, - {file = "jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783"}, - {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b"}, - {file = "jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6"}, - {file = "jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183"}, - {file = "jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873"}, - {file = "jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473"}, - {file = "jiter-0.12.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9d28b218d5f9e5f69a0787a196322a5056540cb378cac8ff542b4fa7219966c"}, - {file = "jiter-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0ee12028daf8cfcf880dd492349a122a64f42c059b6c62a2b0c96a83a8da820"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b135ebe757a82d67ed2821526e72d0acf87dd61f6013e20d3c45b8048af927b"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15d7fafb81af8a9e3039fc305529a61cd933eecee33b4251878a1c89859552a3"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92d1f41211d8a8fe412faad962d424d334764c01dac6691c44691c2e4d3eedaf"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a64a48d7c917b8f32f25c176df8749ecf08cec17c466114727efe7441e17f6d"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:122046f3b3710b85de99d9aa2f3f0492a8233a2f54a64902b096efc27ea747b5"}, - {file = "jiter-0.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27ec39225e03c32c6b863ba879deb427882f243ae46f0d82d68b695fa5b48b40"}, - {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26b9e155ddc132225a39b1995b3b9f0fe0f79a6d5cbbeacf103271e7d309b404"}, - {file = "jiter-0.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab05b7c58e29bb9e60b70c2e0094c98df79a1e42e397b9bb6eaa989b7a66dd0"}, - {file = "jiter-0.12.0-cp39-cp39-win32.whl", hash = "sha256:59f9f9df87ed499136db1c2b6c9efb902f964bed42a582ab7af413b6a293e7b0"}, - {file = "jiter-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:d3719596a1ebe7a48a498e8d5d0c4bf7553321d4c3eee1d620628d51351a3928"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e"}, - {file = "jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f"}, - {file = "jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c"}, - {file = "jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b"}, + {file = "jiter-0.11.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ed58841a491bbbf3f7c55a6b68fff568439ab73b2cce27ace0e169057b5851df"}, + {file = "jiter-0.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:499beb9b2d7e51d61095a8de39ebcab1d1778f2a74085f8305a969f6cee9f3e4"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87b2821795e28cc990939b68ce7a038edea680a24910bd68a79d54ff3f03c02"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:83f6fa494d8bba14ab100417c80e70d32d737e805cb85be2052d771c76fcd1f8"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fbc6aea1daa2ec6f5ed465f0c5e7b0607175062ceebbea5ca70dd5ddab58083"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:302288e2edc43174bb2db838e94688d724f9aad26c5fb9a74f7a5fb427452a6a"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85db563fe3b367bb568af5d29dea4d4066d923b8e01f3417d25ebecd958de815"}, + {file = "jiter-0.11.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f1c1ba2b6b22f775444ef53bc2d5778396d3520abc7b2e1da8eb0c27cb3ffb10"}, + {file = "jiter-0.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:523be464b14f8fd0cc78da6964b87b5515a056427a2579f9085ce30197a1b54a"}, + {file = "jiter-0.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:25b99b3f04cd2a38fefb22e822e35eb203a2cd37d680dbbc0c0ba966918af336"}, + {file = "jiter-0.11.1-cp310-cp310-win32.whl", hash = "sha256:47a79e90545a596bb9104109777894033347b11180d4751a216afef14072dbe7"}, + {file = "jiter-0.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:cace75621ae9bd66878bf69fbd4dfc1a28ef8661e0c2d0eb72d3d6f1268eddf5"}, + {file = "jiter-0.11.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9b0088ff3c374ce8ce0168523ec8e97122ebb788f950cf7bb8e39c7dc6a876a2"}, + {file = "jiter-0.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74433962dd3c3090655e02e461267095d6c84f0741c7827de11022ef8d7ff661"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d98030e345e6546df2cc2c08309c502466c66c4747b043f1a0d415fada862b8"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d6db0b2e788db46bec2cf729a88b6dd36959af2abd9fa2312dfba5acdd96dcb"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55678fbbda261eafe7289165dd2ddd0e922df5f9a1ae46d7c79a5a15242bd7d1"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a6b74fae8e40497653b52ce6ca0f1b13457af769af6fb9c1113efc8b5b4d9be"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a55a453f8b035eb4f7852a79a065d616b7971a17f5e37a9296b4b38d3b619e4"}, + {file = "jiter-0.11.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2638148099022e6bdb3f42904289cd2e403609356fb06eb36ddec2d50958bc29"}, + {file = "jiter-0.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:252490567a5d990986f83b95a5f1ca1bf205ebd27b3e9e93bb7c2592380e29b9"}, + {file = "jiter-0.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d431d52b0ca2436eea6195f0f48528202100c7deda354cb7aac0a302167594d5"}, + {file = "jiter-0.11.1-cp311-cp311-win32.whl", hash = "sha256:db6f41e40f8bae20c86cb574b48c4fd9f28ee1c71cb044e9ec12e78ab757ba3a"}, + {file = "jiter-0.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0cc407b8e6cdff01b06bb80f61225c8b090c3df108ebade5e0c3c10993735b19"}, + {file = "jiter-0.11.1-cp311-cp311-win_arm64.whl", hash = "sha256:fe04ea475392a91896d1936367854d346724a1045a247e5d1c196410473b8869"}, + {file = "jiter-0.11.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c92148eec91052538ce6823dfca9525f5cfc8b622d7f07e9891a280f61b8c96c"}, + {file = "jiter-0.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ecd4da91b5415f183a6be8f7158d127bdd9e6a3174138293c0d48d6ea2f2009d"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e3ac25c00b9275684d47aa42febaa90a9958e19fd1726c4ecf755fbe5e553b"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d7305c0a841858f866cd459cd9303f73883fb5e097257f3d4a3920722c69d4"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e86fa10e117dce22c547f31dd6d2a9a222707d54853d8de4e9a2279d2c97f239"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae5ef1d48aec7e01ee8420155d901bb1d192998fa811a65ebb82c043ee186711"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb68e7bf65c990531ad8715e57d50195daf7c8e6f1509e617b4e692af1108939"}, + {file = "jiter-0.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43b30c8154ded5845fa454ef954ee67bfccce629b2dea7d01f795b42bc2bda54"}, + {file = "jiter-0.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:586cafbd9dd1f3ce6a22b4a085eaa6be578e47ba9b18e198d4333e598a91db2d"}, + {file = "jiter-0.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:677cc2517d437a83bb30019fd4cf7cad74b465914c56ecac3440d597ac135250"}, + {file = "jiter-0.11.1-cp312-cp312-win32.whl", hash = "sha256:fa992af648fcee2b850a3286a35f62bbbaeddbb6dbda19a00d8fbc846a947b6e"}, + {file = "jiter-0.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:88b5cae9fa51efeb3d4bd4e52bfd4c85ccc9cac44282e2a9640893a042ba4d87"}, + {file = "jiter-0.11.1-cp312-cp312-win_arm64.whl", hash = "sha256:9a6cae1ab335551917f882f2c3c1efe7617b71b4c02381e4382a8fc80a02588c"}, + {file = "jiter-0.11.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:71b6a920a5550f057d49d0e8bcc60945a8da998019e83f01adf110e226267663"}, + {file = "jiter-0.11.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b3de72e925388453a5171be83379549300db01284f04d2a6f244d1d8de36f94"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc19dd65a2bd3d9c044c5b4ebf657ca1e6003a97c0fc10f555aa4f7fb9821c00"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d58faaa936743cd1464540562f60b7ce4fd927e695e8bc31b3da5b914baa9abd"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:902640c3103625317291cb73773413b4d71847cdf9383ba65528745ff89f1d14"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30405f726e4c2ed487b176c09f8b877a957f535d60c1bf194abb8dadedb5836f"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3217f61728b0baadd2551844870f65219ac4a1285d5e1a4abddff3d51fdabe96"}, + {file = "jiter-0.11.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1364cc90c03a8196f35f396f84029f12abe925415049204446db86598c8b72c"}, + {file = "jiter-0.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:53a54bf8e873820ab186b2dca9f6c3303f00d65ae5e7b7d6bda1b95aa472d646"}, + {file = "jiter-0.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7e29aca023627b0e0c2392d4248f6414d566ff3974fa08ff2ac8dbb96dfee92a"}, + {file = "jiter-0.11.1-cp313-cp313-win32.whl", hash = "sha256:f153e31d8bca11363751e875c0a70b3d25160ecbaee7b51e457f14498fb39d8b"}, + {file = "jiter-0.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:f773f84080b667c69c4ea0403fc67bb08b07e2b7ce1ef335dea5868451e60fed"}, + {file = "jiter-0.11.1-cp313-cp313-win_arm64.whl", hash = "sha256:635ecd45c04e4c340d2187bcb1cea204c7cc9d32c1364d251564bf42e0e39c2d"}, + {file = "jiter-0.11.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d892b184da4d94d94ddb4031296931c74ec8b325513a541ebfd6dfb9ae89904b"}, + {file = "jiter-0.11.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa22c223a3041dacb2fcd37c70dfd648b44662b4a48e242592f95bda5ab09d58"}, + {file = "jiter-0.11.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:330e8e6a11ad4980cd66a0f4a3e0e2e0f646c911ce047014f984841924729789"}, + {file = "jiter-0.11.1-cp313-cp313t-win_amd64.whl", hash = "sha256:09e2e386ebf298547ca3a3704b729471f7ec666c2906c5c26c1a915ea24741ec"}, + {file = "jiter-0.11.1-cp313-cp313t-win_arm64.whl", hash = "sha256:fe4a431c291157e11cee7c34627990ea75e8d153894365a3bc84b7a959d23ca8"}, + {file = "jiter-0.11.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:0fa1f70da7a8a9713ff8e5f75ec3f90c0c870be6d526aa95e7c906f6a1c8c676"}, + {file = "jiter-0.11.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:569ee559e5046a42feb6828c55307cf20fe43308e3ae0d8e9e4f8d8634d99944"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f69955fa1d92e81987f092b233f0be49d4c937da107b7f7dcf56306f1d3fcce9"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:090f4c9d4a825e0fcbd0a2647c9a88a0f366b75654d982d95a9590745ff0c48d"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbf3d8cedf9e9d825233e0dcac28ff15c47b7c5512fdfe2e25fd5bbb6e6b0cee"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa9b1958f9c30d3d1a558b75f0626733c60eb9b7774a86b34d88060be1e67fe"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42d1ca16590b768c5e7d723055acd2633908baacb3628dd430842e2e035aa90"}, + {file = "jiter-0.11.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5db4c2486a023820b701a17aec9c5a6173c5ba4393f26662f032f2de9c848b0f"}, + {file = "jiter-0.11.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:4573b78777ccfac954859a6eff45cbd9d281d80c8af049d0f1a3d9fc323d5c3a"}, + {file = "jiter-0.11.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:7593ac6f40831d7961cb67633c39b9fef6689a211d7919e958f45710504f52d3"}, + {file = "jiter-0.11.1-cp314-cp314-win32.whl", hash = "sha256:87202ec6ff9626ff5f9351507def98fcf0df60e9a146308e8ab221432228f4ea"}, + {file = "jiter-0.11.1-cp314-cp314-win_amd64.whl", hash = "sha256:a5dd268f6531a182c89d0dd9a3f8848e86e92dfff4201b77a18e6b98aa59798c"}, + {file = "jiter-0.11.1-cp314-cp314-win_arm64.whl", hash = "sha256:5d761f863f912a44748a21b5c4979c04252588ded8d1d2760976d2e42cd8d991"}, + {file = "jiter-0.11.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2cc5a3965285ddc33e0cab933e96b640bc9ba5940cea27ebbbf6695e72d6511c"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b572b3636a784c2768b2342f36a23078c8d3aa6d8a30745398b1bab58a6f1a8"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad93e3d67a981f96596d65d2298fe8d1aa649deb5374a2fb6a434410ee11915e"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a83097ce379e202dcc3fe3fc71a16d523d1ee9192c8e4e854158f96b3efe3f2f"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7042c51e7fbeca65631eb0c332f90c0c082eab04334e7ccc28a8588e8e2804d9"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a68d679c0e47649a61df591660507608adc2652442de7ec8276538ac46abe08"}, + {file = "jiter-0.11.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1b0da75dbf4b6ec0b3c9e604d1ee8beaf15bc046fff7180f7d89e3cdbd3bb51"}, + {file = "jiter-0.11.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:69dd514bf0fa31c62147d6002e5ca2b3e7ef5894f5ac6f0a19752385f4e89437"}, + {file = "jiter-0.11.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:bb31ac0b339efa24c0ca606febd8b77ef11c58d09af1b5f2be4c99e907b11111"}, + {file = "jiter-0.11.1-cp314-cp314t-win32.whl", hash = "sha256:b2ce0d6156a1d3ad41da3eec63b17e03e296b78b0e0da660876fccfada86d2f7"}, + {file = "jiter-0.11.1-cp314-cp314t-win_amd64.whl", hash = "sha256:f4db07d127b54c4a2d43b4cf05ff0193e4f73e0dd90c74037e16df0b29f666e1"}, + {file = "jiter-0.11.1-cp314-cp314t-win_arm64.whl", hash = "sha256:28e4fdf2d7ebfc935523e50d1efa3970043cfaa161674fe66f9642409d001dfe"}, + {file = "jiter-0.11.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:baa99c8db49467527658bb479857344daf0a14dff909b7f6714579ac439d1253"}, + {file = "jiter-0.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:860fe55fa3b01ad0edf2adde1098247ff5c303d0121f9ce028c03d4f88c69502"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:173dd349d99b6feaf5a25a6fbcaf3489a6f947708d808240587a23df711c67db"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:14ac1dca837514cc946a6ac2c4995d9695303ecc754af70a3163d057d1a444ab"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69af47de5f93a231d5b85f7372d3284a5be8edb4cc758f006ec5a1406965ac5e"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:685f8b3abd3bbd3e06e4dfe2429ff87fd5d7a782701151af99b1fcbd80e31b2b"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d04afa2d4e5526e54ae8a58feea953b1844bf6e3526bc589f9de68e86d0ea01"}, + {file = "jiter-0.11.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1e92b927259035b50d8e11a8fdfe0ebd014d883e4552d37881643fa289a4bcf1"}, + {file = "jiter-0.11.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e7bd8be4fad8d4c5558b7801770cd2da6c072919c6f247cc5336edb143f25304"}, + {file = "jiter-0.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:121381a77a3c85987f3eba0d30ceaca9116f7463bedeec2fa79b2e7286b89b60"}, + {file = "jiter-0.11.1-cp39-cp39-win32.whl", hash = "sha256:160225407f6dfabdf9be1b44e22f06bc293a78a28ffa4347054698bd712dad06"}, + {file = "jiter-0.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:028e0d59bcdfa1079f8df886cdaefc6f515c27a5288dec956999260c7e4a7cfd"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:e642b5270e61dd02265866398707f90e365b5db2eb65a4f30c789d826682e1f6"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:464ba6d000585e4e2fd1e891f31f1231f497273414f5019e27c00a4b8f7a24ad"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:055568693ab35e0bf3a171b03bb40b2dcb10352359e0ab9b5ed0da2bf1eb6f6f"}, + {file = "jiter-0.11.1-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c69ea798d08a915ba4478113efa9e694971e410056392f4526d796f136d3fa"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:0d4d6993edc83cf75e8c6828a8d6ce40a09ee87e38c7bfba6924f39e1337e21d"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:f78d151c83a87a6cf5461d5ee55bc730dd9ae227377ac6f115b922989b95f838"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9022974781155cd5521d5cb10997a03ee5e31e8454c9d999dcdccd253f2353f"}, + {file = "jiter-0.11.1-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18c77aaa9117510d5bdc6a946baf21b1f0cfa58ef04d31c8d016f206f2118960"}, + {file = "jiter-0.11.1.tar.gz", hash = "sha256:849dcfc76481c0ea0099391235b7ca97d7279e0fa4c86005457ac7c88e8b76dc"}, ] [[package]] diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml index 00e85df3dc..40e602ec35 100644 --- a/sdk/pyproject.toml +++ b/sdk/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "agenta" -version = "0.62.1" +version = "0.61.0" description = "The SDK for agenta is an open-source LLMOps platform." readme = "README.md" authors = [ diff --git a/sdk/tests/legacy/management/variant/test_variant_manager.py b/sdk/tests/legacy/management/variant/test_variant_manager.py index 1d555a0db4..660f105cbc 100644 --- a/sdk/tests/legacy/management/variant/test_variant_manager.py +++ b/sdk/tests/legacy/management/variant/test_variant_manager.py @@ -24,9 +24,9 @@ async def test_configs_add_success(self, http_client, get_completion_app_from_li ) # ASSERT: Verify response - assert response.status_code == 200, ( - f"Failed to add config for variant {test_variant_slug}" - ) + assert ( + response.status_code == 200 + ), f"Failed to add config for variant {test_variant_slug}" response_data = response.json() assert "params" in response_data, "Response missing 'params'" assert "url" in response_data, "Response missing 'url'" @@ -55,9 +55,9 @@ async def test_configs_add_duplicate( # ASSERT: Verify error response for duplicate assert response.status_code == 400, "Expected 400 error for duplicate config" - assert response.json()["detail"] == "Config already exists.", ( - "Incorrect error message for duplicate config" - ) + assert ( + response.json()["detail"] == "Config already exists." + ), "Incorrect error message for duplicate config" @pytest.mark.asyncio @pytest.mark.variant_manager @@ -79,12 +79,12 @@ async def test_configs_nonexistent_app(self, http_client): ) # ASSERT: Verify error response for non-existent application - assert response.status_code == 404, ( - "Expected 404 error for non-existent application" - ) - assert response.json()["detail"] == "Config not found.", ( - "Incorrect error message for non-existent application" - ) + assert ( + response.status_code == 404 + ), "Expected 404 error for non-existent application" + assert ( + response.json()["detail"] == "Config not found." + ), "Incorrect error message for non-existent application" @pytest.mark.asyncio @pytest.mark.variant_manager diff --git a/sdk/tests/legacy/new_tests/admin/tests.py b/sdk/tests/legacy/new_tests/admin/tests.py index 27d4aeb883..356aa29581 100644 --- a/sdk/tests/legacy/new_tests/admin/tests.py +++ b/sdk/tests/legacy/new_tests/admin/tests.py @@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/new_tests/apps/tests.py b/sdk/tests/legacy/new_tests/apps/tests.py index 60b77aba13..5eb076f323 100644 --- a/sdk/tests/legacy/new_tests/apps/tests.py +++ b/sdk/tests/legacy/new_tests/apps/tests.py @@ -39,12 +39,12 @@ async def test_create_without_default_params(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) - assert response_data["app_name"] == app_data["app_name"], ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" + assert ( + response_data["app_name"] == app_data["app_name"] + ), f"Failed for case: {description}" # Cleanup: Remove application await delete_application(http_client, response_data["app_id"], headers) @@ -64,9 +64,9 @@ async def test_create_invalid_params(self, http_client): response = await http_client.post("/apps", json=app_data, headers=headers) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -86,9 +86,9 @@ async def test_create_conflicts(self, http_client): response = await http_client.post("/apps", json=app_data, headers=headers) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup: Remove application app_cleanup_response = await http_client.get("/apps", headers=headers) @@ -120,9 +120,9 @@ async def test_permissions_principal_not_in_scope_post(self, http_client): ) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -213,9 +213,9 @@ async def test_list_query_filter_no_element(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == len(elements), f"Failed for case: {description}" @pytest.mark.asyncio @@ -236,9 +236,9 @@ async def test_list_query_filter_one_element(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup: Remove application @@ -263,9 +263,9 @@ async def test_list_query_filter_many_elements_small_data(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup: Remove applications @@ -291,9 +291,9 @@ async def test_list_query_filter_many_elements_big_data(self, http_client): response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup: Remove applications @@ -326,9 +326,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup: Delete the application with valid principal await delete_application(http_client, app["app_id"], owner_headers) @@ -361,9 +361,9 @@ async def test_permissions_allowed(self, http_client): list_of_status_codes.append(response.status_code) # Assert: Verify the response - assert list_of_status_codes.count(expected_status) == 3, ( - f"Failed for case: {description}" - ) + assert ( + list_of_status_codes.count(expected_status) == 3 + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical diff --git a/sdk/tests/legacy/new_tests/auth/tests.py b/sdk/tests/legacy/new_tests/auth/tests.py index 1a61f0a034..b09b4b4c61 100644 --- a/sdk/tests/legacy/new_tests/auth/tests.py +++ b/sdk/tests/legacy/new_tests/auth/tests.py @@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/new_tests/sdk/apps/tests.py b/sdk/tests/legacy/new_tests/sdk/apps/tests.py index d53a001e99..3d379d5d5f 100644 --- a/sdk/tests/legacy/new_tests/sdk/apps/tests.py +++ b/sdk/tests/legacy/new_tests/sdk/apps/tests.py @@ -44,9 +44,9 @@ async def test_create_app_successfully(self, http_client, setup_class_fixture): # ASSERT assert response.app_name == app_name - assert isinstance(response.model_dump(), dict), ( - "Response data is not a dictionary." - ) + assert isinstance( + response.model_dump(), dict + ), "Response data is not a dictionary." # CLEANUP await delete_application( diff --git a/sdk/tests/legacy/new_tests/testsets/tests.py b/sdk/tests/legacy/new_tests/testsets/tests.py index 543bdaceaf..70646d4f9c 100644 --- a/sdk/tests/legacy/new_tests/testsets/tests.py +++ b/sdk/tests/legacy/new_tests/testsets/tests.py @@ -77,9 +77,9 @@ async def test_upload_file_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # @pytest.mark.asyncio # @pytest.mark.typical @@ -148,9 +148,9 @@ async def test_get_testset_owner_access(self, http_client): response = await http_client.get(f"/testsets/{testset['id']}", headers=headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert "id" in response.json(), f"Failed for case: {description}" # Cleanup @@ -191,9 +191,9 @@ async def test_create_testset_success(self, http_client): await delete_testset(http_client, response_data["id"], headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert "id" in response_data, f"Failed for case: {description}" @pytest.mark.asyncio @@ -213,9 +213,9 @@ async def test_create_testset_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -241,9 +241,9 @@ async def test_create_testset_non_member_access(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -260,9 +260,9 @@ async def test_no_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 0, f"Failed for case: {description}" @pytest.mark.asyncio @@ -282,9 +282,9 @@ async def test_one_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup @@ -308,9 +308,9 @@ async def test_many_elements_small_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup @@ -335,9 +335,9 @@ async def test_many_elements_big_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup @@ -368,9 +368,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], owner_headers) @@ -391,9 +391,9 @@ async def test_permissions_allowed(self, http_client): response = await http_client.get("/testsets", headers=owner_headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -410,9 +410,9 @@ async def test_no_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 0, f"Failed for case: {description}" @pytest.mark.asyncio @@ -432,9 +432,9 @@ async def test_one_element(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 1, f"Failed for case: {description}" # Cleanup @@ -458,9 +458,9 @@ async def test_many_elements_small_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 3, f"Failed for case: {description}" # Cleanup @@ -485,9 +485,9 @@ async def test_many_elements_big_data(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert len(response_data) == 6, f"Failed for case: {description}" # Cleanup @@ -518,9 +518,9 @@ async def test_permissions_principal_not_in_scope(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], owner_headers) @@ -541,9 +541,9 @@ async def test_permissions_allowed(self, http_client): response = await http_client.get("/testsets", headers=owner_headers) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -565,9 +565,9 @@ async def test_update_success(self, http_client): response_data = response.json() # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" assert response_data["_id"] == testset["id"], f"Failed for case: {description}" # Cleanup @@ -592,9 +592,9 @@ async def test_update_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], headers) @@ -622,9 +622,9 @@ async def test_update_non_member_access(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" # Cleanup await delete_testset(http_client, testset["id"], member_headers) @@ -650,9 +650,9 @@ async def test_delete_success(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -674,9 +674,9 @@ async def test_delete_validation_failure(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -698,6 +698,6 @@ async def test_delete_non_existent(self, http_client): ) # Assert - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/new_tests/vault_router/test_vault_secrets_apis.py b/sdk/tests/legacy/new_tests/vault_router/test_vault_secrets_apis.py index 0de50397f6..245b5affc1 100644 --- a/sdk/tests/legacy/new_tests/vault_router/test_vault_secrets_apis.py +++ b/sdk/tests/legacy/new_tests/vault_router/test_vault_secrets_apis.py @@ -41,9 +41,9 @@ async def test_create_secret_with_viewer_role( json=valid_secret_payload, ) - assert create_response.status_code == 403, ( - "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + create_response.status_code == 403 + ), "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role." created_secret_message = create_response.json()["detail"] assert ( @@ -84,9 +84,9 @@ async def test_create_secret_with_invalid_secret_kind(self, async_client): "secrets", json=invalid_payload, ) - assert response.status_code == 422, ( - "Should reject payload with invalid secret kind" - ) + assert ( + response.status_code == 422 + ), "Should reject payload with invalid secret kind" @pytest.mark.asyncio @pytest.mark.secret_creation @@ -104,9 +104,9 @@ async def test_create_secret_with_invalid_provider_kind(self, async_client): "secrets", json=invalid_payload, ) - assert response.status_code == 422, ( - "Should reject payload with invalid secret provider kind" - ) + assert ( + response.status_code == 422 + ), "Should reject payload with invalid secret provider kind" @pytest.mark.asyncio @pytest.mark.secret_retrieval @@ -204,9 +204,9 @@ async def test_update_secret_with_viewer_role( json=update_payload, ) - assert update_response.status_code == 403, ( - "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + update_response.status_code == 403 + ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." update_response_message = update_response.json()["detail"] assert ( @@ -233,9 +233,9 @@ async def test_delete_secret(self, async_client, valid_secret_payload): get_response = await async_client.get( f"secrets/{secret_id}", ) - assert get_response.status_code == 404, ( - "Deleted secret should not be retrievable" - ) + assert ( + get_response.status_code == 404 + ), "Deleted secret should not be retrievable" @pytest.mark.asyncio @pytest.mark.secret_deletion @@ -254,9 +254,9 @@ async def test_delete_secret_with_viewer_role( f"secrets/{secret_id}", headers={"Authorization": f"ApiKey {os.environ.get('VIEWER_API_KEY', '')}"}, ) - assert delete_response.status_code == 403, ( - "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." - ) + assert ( + delete_response.status_code == 403 + ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role." delete_response_message = delete_response.json()["detail"] assert ( @@ -272,6 +272,6 @@ async def test_delete_nonexistent_secret(self, async_client): response = await async_client.delete( f"secrets/{non_existent_id}", ) - assert response.status_code == 204, ( - "Should always return 204 since the endpoint is idempotent" - ) + assert ( + response.status_code == 204 + ), "Should always return 204 since the endpoint is idempotent" diff --git a/sdk/tests/legacy/new_tests/workflows/admin/tests.py b/sdk/tests/legacy/new_tests/workflows/admin/tests.py index 3b695fe6d5..efa0a31025 100644 --- a/sdk/tests/legacy/new_tests/workflows/admin/tests.py +++ b/sdk/tests/legacy/new_tests/workflows/admin/tests.py @@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("admin/accounts", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/new_tests/workflows/auth/tests.py b/sdk/tests/legacy/new_tests/workflows/auth/tests.py index 5dbfeb795e..32dee3c697 100644 --- a/sdk/tests/legacy/new_tests/workflows/auth/tests.py +++ b/sdk/tests/legacy/new_tests/workflows/auth/tests.py @@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" @pytest.mark.asyncio @pytest.mark.typical @@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client): response = await http_client.get("apps", headers=headers) # ASSERT: verify response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/new_tests/workflows/observability/tests.py b/sdk/tests/legacy/new_tests/workflows/observability/tests.py index 26a539563d..3e215fab92 100644 --- a/sdk/tests/legacy/new_tests/workflows/observability/tests.py +++ b/sdk/tests/legacy/new_tests/workflows/observability/tests.py @@ -70,6 +70,6 @@ async def test_completion_generate_observability_tree( ) is_match = exact_match(workflow_nodes, observability_nodes) - assert is_match is True, ( - "Workflow nodes does not match nodes from observability" - ) + assert ( + is_match is True + ), "Workflow nodes does not match nodes from observability" diff --git a/sdk/tests/legacy/new_tests/workflows/permissions/tests.py b/sdk/tests/legacy/new_tests/workflows/permissions/tests.py index d33923c12c..97dcb14561 100644 --- a/sdk/tests/legacy/new_tests/workflows/permissions/tests.py +++ b/sdk/tests/legacy/new_tests/workflows/permissions/tests.py @@ -67,9 +67,9 @@ async def test_permissions_principal_not_in_scope( response_data = response.json() # Assert: Verify the response - assert response.status_code == expected_status, ( - f"Failed for case: {description}" - ) - assert response.json().get("detail") == "Service execution not allowed.", ( - f"Failed for case: {description}" - ) + assert ( + response.status_code == expected_status + ), f"Failed for case: {description}" + assert ( + response.json().get("detail") == "Service execution not allowed." + ), f"Failed for case: {description}" diff --git a/sdk/tests/legacy/sdk_routing/conftest.py b/sdk/tests/legacy/sdk_routing/conftest.py index ba5866d645..d02938591b 100644 --- a/sdk/tests/legacy/sdk_routing/conftest.py +++ b/sdk/tests/legacy/sdk_routing/conftest.py @@ -177,10 +177,10 @@ def ensure_server(fastapi_server, http_client): return print( - f"Health check attempt {i + 1}/10 failed with status {response.status_code}" + f"Health check attempt {i+1}/10 failed with status {response.status_code}" ) except (ConnectionError, TimeoutError) as e: - print(f"Health check attempt {i + 1}/10 failed: {e}") + print(f"Health check attempt {i+1}/10 failed: {e}") time.sleep(2) stdout, stderr = process.communicate(timeout=1) diff --git a/sdk/tests/legacy/sdk_tests/prompt_sdk/test_client.py b/sdk/tests/legacy/sdk_tests/prompt_sdk/test_client.py index 7ad09615d3..378fe2e7a0 100644 --- a/sdk/tests/legacy/sdk_tests/prompt_sdk/test_client.py +++ b/sdk/tests/legacy/sdk_tests/prompt_sdk/test_client.py @@ -30,18 +30,18 @@ def test_get_config_with_caching(agenta_client): ) as mock_get_config: # Retrieve configuration to store in cache response = agenta_client.get_config("base123", "production") - assert response == {"parameters": "something"}, ( - "First response should match the mock data." - ) + assert response == { + "parameters": "something" + }, "First response should match the mock data." # Modify the return value of the mock mock_get_config.return_value = {"parameters": "something else"} # Attempt to retrieve configuration again, expecting cached data response = agenta_client.get_config("base123", "production") - assert response == {"parameters": "something"}, ( - "Second response should return cached data, not new mock data." - ) + assert response == { + "parameters": "something" + }, "Second response should return cached data, not new mock data." def test_get_config_without_caching(agenta_client): @@ -59,15 +59,15 @@ def test_get_config_without_caching(agenta_client): ) as mock_get_config: # Retrieve configuration with caching disabled response = agenta_client.get_config("base123", "production", cache_timeout=0) - assert response == {"parameters": "something"}, ( - "First response should match the mock data." - ) + assert response == { + "parameters": "something" + }, "First response should match the mock data." # Modify the return value of the mock mock_get_config.return_value = {"parameters": "something else"} # Retrieve new configuration with caching disabled response = agenta_client.get_config("base123", "production", cache_timeout=0) - assert response == {"parameters": "something else"}, ( - "Second response should match the new mock data." - ) + assert response == { + "parameters": "something else" + }, "Second response should match the new mock data." diff --git a/web/ee/package.json b/web/ee/package.json index 555d036cf2..5bc3ba3451 100644 --- a/web/ee/package.json +++ b/web/ee/package.json @@ -1,6 +1,6 @@ { "name": "@agenta/ee", - "version": "0.62.1", + "version": "0.61.0", "private": true, "engines": { "node": ">=18" diff --git a/web/oss/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx b/web/ee/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx similarity index 100% rename from web/oss/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx rename to web/ee/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx diff --git a/web/oss/src/components/DeleteEvaluationModal/types.ts b/web/ee/src/components/DeleteEvaluationModal/types.ts similarity index 100% rename from web/oss/src/components/DeleteEvaluationModal/types.ts rename to web/ee/src/components/DeleteEvaluationModal/types.ts diff --git a/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx b/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx index 3b6f9cdb32..d596e2bc42 100644 --- a/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx +++ b/web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx @@ -15,7 +15,7 @@ import { fetchAllDeploymentRevisions, } from "@/oss/services/deploymentVersioning/api" -import {DeploymentRevisionConfig, DeploymentRevisions} from "@agenta/oss/src/lib/types_ee" +import {DeploymentRevisionConfig, DeploymentRevisions} from "../../lib/types_ee" dayjs.extend(relativeTime) dayjs.extend(duration) diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/TagWithLink.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/TagWithLink.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/TagWithLink.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/TagWithLink.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/types.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/types.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/types.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/utils.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/utils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/utils.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/utils.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/variantUtils.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/variantUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/assets/variantUtils.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/variantUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunOutput.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunOutput.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunOutput.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunOutput.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunTraceHeader.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunTraceHeader.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunTraceHeader.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunTraceHeader.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerHeader/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerHeader/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerHeader/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerHeader/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerContentSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerContentSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerContentSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerContentSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerHeaderSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerHeaderSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerHeaderSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerHeaderSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerSidePanelSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerSidePanelSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerSidePanelSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerSidePanelSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/assets/EvalRunHeaderSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/assets/EvalRunHeaderSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/assets/EvalRunHeaderSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/assets/EvalRunHeaderSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/EvalRunScoreTableSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/EvalRunScoreTableSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/EvalRunScoreTableSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/EvalRunScoreTableSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/TraceMetrics.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/TraceMetrics.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/TraceMetrics.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/TraceMetrics.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/constants.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/constants.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/constants.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/constants.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunSelectedEvaluations/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunSelectedEvaluations/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunSelectedEvaluations/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunSelectedEvaluations/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewUtilityOptions/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewUtilityOptions/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewUtilityOptions/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewUtilityOptions/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestcaseViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/types.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/types.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/types.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/BarChart.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/BarChart.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/BarChart.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/BarChart.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/EvaluatorMetricsChartSkeleton.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/EvaluatorMetricsChartSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/EvaluatorMetricsChartSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/EvaluatorMetricsChartSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/HistogramChart.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/HistogramChart.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/HistogramChart.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/HistogramChart.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/BarChartPlaceholder.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/BarChartPlaceholder.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/BarChartPlaceholder.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/BarChartPlaceholder.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/PlaceholderOverlay.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/PlaceholderOverlay.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/PlaceholderOverlay.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/PlaceholderOverlay.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/SpiderChartPlaceholder.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/SpiderChartPlaceholder.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/components/shared/SpiderChartPlaceholder.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/components/shared/SpiderChartPlaceholder.tsx diff --git a/web/oss/src/components/EvalRunDetails/AutoEvalRun/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/AutoEvalRun/index.tsx rename to web/ee/src/components/EvalRunDetails/AutoEvalRun/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/annotationUtils.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/annotationUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/annotationUtils.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/annotationUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/helpers.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/helpers.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/helpers.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/helpers.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/optimisticUtils.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/optimisticUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/optimisticUtils.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/optimisticUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/runnableSelectors.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/runnableSelectors.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/runnableSelectors.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/runnableSelectors.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/stepsMetricsUtils.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/stepsMetricsUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/stepsMetricsUtils.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/stepsMetricsUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/assets/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunBatchActions.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunBatchActions.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunBatchActions.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunBatchActions.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunName/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunName/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunName/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunName/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/EvalRunScenarioCardBody.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/EvalRunScenarioCardBody.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/EvalRunScenarioCardBody.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/EvalRunScenarioCardBody.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationInputs.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationInputs.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationInputs.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationInputs.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationResponse.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationResponse.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationResponse.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationResponse.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationRun.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationRun.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationRun.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationRun.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/KeyValue.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/KeyValue.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/KeyValue.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/KeyValue.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/utils.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/utils.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/utils.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/utils.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/EvalRunScenarioCards.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/EvalRunScenarioCards.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/EvalRunScenarioCards.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/EvalRunScenarioCards.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/assets/constants.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/assets/constants.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/assets/constants.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/assets/constants.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioFilters.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioFilters.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioFilters.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioFilters.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/assets/InstructionButton.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/assets/InstructionButton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/assets/InstructionButton.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/assets/InstructionButton.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalButton.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalButton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalButton.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalButton.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalModalContent.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalModalContent.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalModalContent.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalModalContent.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/types.d.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/types.d.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/Modals/types.d.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/types.d.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/ScenarioLoadingIndicator.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/ScenarioLoadingIndicator.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/ScenarioLoadingIndicator.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/ScenarioLoadingIndicator.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/assets/constants.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/assets/constants.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/assets/constants.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/assets/constants.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/types.ts b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/types.ts rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/types.ts diff --git a/web/oss/src/components/EvalRunDetails/HumanEvalRun/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/HumanEvalRun/index.tsx rename to web/ee/src/components/EvalRunDetails/HumanEvalRun/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/OnlineEvalRun/OnlineUrlSync.tsx b/web/ee/src/components/EvalRunDetails/OnlineEvalRun/OnlineUrlSync.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/OnlineEvalRun/OnlineUrlSync.tsx rename to web/ee/src/components/EvalRunDetails/OnlineEvalRun/OnlineUrlSync.tsx diff --git a/web/oss/src/components/EvalRunDetails/OnlineEvalRun/components/ConfigurationViewer/index.tsx b/web/ee/src/components/EvalRunDetails/OnlineEvalRun/components/ConfigurationViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/OnlineEvalRun/components/ConfigurationViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/OnlineEvalRun/components/ConfigurationViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/OnlineEvalRun/components/TracesViewer/index.tsx b/web/ee/src/components/EvalRunDetails/OnlineEvalRun/components/TracesViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/OnlineEvalRun/components/TracesViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/OnlineEvalRun/components/TracesViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/OnlineEvalRun/index.tsx b/web/ee/src/components/EvalRunDetails/OnlineEvalRun/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/OnlineEvalRun/index.tsx rename to web/ee/src/components/EvalRunDetails/OnlineEvalRun/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/UrlSync.tsx b/web/ee/src/components/EvalRunDetails/UrlSync.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/UrlSync.tsx rename to web/ee/src/components/EvalRunDetails/UrlSync.tsx diff --git a/web/oss/src/components/EvalRunDetails/assets/renderChatMessages.tsx b/web/ee/src/components/EvalRunDetails/assets/renderChatMessages.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/assets/renderChatMessages.tsx rename to web/ee/src/components/EvalRunDetails/assets/renderChatMessages.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/ComparisonDataFetcher.tsx b/web/ee/src/components/EvalRunDetails/components/ComparisonDataFetcher.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/ComparisonDataFetcher.tsx rename to web/ee/src/components/EvalRunDetails/components/ComparisonDataFetcher.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunScenarioNavigator/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunScenarioNavigator/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunScenarioNavigator/index.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunScenarioNavigator/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/assets/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/assets/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/assets/index.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/assets/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/index.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/assets/constants.ts b/web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/assets/constants.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/assets/constants.ts rename to web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/assets/constants.ts diff --git a/web/oss/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/index.tsx rename to web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataButton.tsx b/web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataButton.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataButton.tsx rename to web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataButton.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataModalContent.tsx b/web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataModalContent.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataModalContent.tsx rename to web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataModalContent.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/types.ts b/web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/SaveDataModal/assets/types.ts rename to web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/types.ts diff --git a/web/oss/src/components/EvalRunDetails/components/SaveDataModal/index.tsx b/web/ee/src/components/EvalRunDetails/components/SaveDataModal/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/SaveDataModal/index.tsx rename to web/ee/src/components/EvalRunDetails/components/SaveDataModal/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ComparisonScenarioTable.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ComparisonScenarioTable.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ComparisonScenarioTable.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ComparisonScenarioTable.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ActionCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ActionCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ActionCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ActionCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx similarity index 99% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx index fa0aef82a1..a5f4612b58 100644 --- a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx @@ -10,7 +10,7 @@ import TooltipButton from "@/oss/components/Playground/assets/EnhancedButton" import {Expandable} from "@/oss/components/Tables/ExpandableCell" import {useOptionalRunId, useRunId} from "@/oss/contexts/RunIdContext" import {useInvocationResult} from "@/oss/lib/hooks/useInvocationResult" -import {resolvePath} from "@/oss/lib/evalRunner/pureEnrichment" +import {resolvePath} from "@/oss/lib/workers/evalRunner/pureEnrichment" import {useAppNavigation, useAppState} from "@/oss/state/appState" import { diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ComparisonModeToggle.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ComparisonModeToggle.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ComparisonModeToggle.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ComparisonModeToggle.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedAnnotationValueCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedAnnotationValueCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedAnnotationValueCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedAnnotationValueCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricValueCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricValueCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricValueCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricValueCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricsCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricsCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricsCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricsCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/MetricCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/MetricCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/MetricCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/MetricCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/helpers.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/helpers.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/helpers.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/helpers.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/types.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/types.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/types.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ScenarioTraceSummary.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ScenarioTraceSummary.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ScenarioTraceSummary.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ScenarioTraceSummary.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/StatusCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/StatusCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/StatusCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/StatusCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/TimestampCell.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/TimestampCell.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/TimestampCell.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/TimestampCell.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/VirtualizedScenarioTableAnnotateDrawer.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/VirtualizedScenarioTableAnnotateDrawer.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/VirtualizedScenarioTableAnnotateDrawer.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/VirtualizedScenarioTableAnnotateDrawer.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/atoms/evaluatorFailures.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/atoms/evaluatorFailures.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/atoms/evaluatorFailures.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/atoms/evaluatorFailures.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorNameUtils.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorNameUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorNameUtils.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorNameUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorSchemaUtils.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorSchemaUtils.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorSchemaUtils.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/evaluatorSchemaUtils.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/flatDataSourceBuilder.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/flatDataSourceBuilder.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/flatDataSourceBuilder.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/flatDataSourceBuilder.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/types.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/types.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/types.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useScrollToScenario.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useScrollToScenario.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useScrollToScenario.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useScrollToScenario.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/index.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/index.tsx rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/types.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/types.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/components/VirtualizedScenarioTable/types.ts rename to web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/types.ts diff --git a/web/oss/src/components/EvalRunDetails/hooks/useCachedScenarioSteps.ts b/web/ee/src/components/EvalRunDetails/hooks/useCachedScenarioSteps.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/hooks/useCachedScenarioSteps.ts rename to web/ee/src/components/EvalRunDetails/hooks/useCachedScenarioSteps.ts diff --git a/web/oss/src/components/EvalRunDetails/hooks/useMetricStepError.ts b/web/ee/src/components/EvalRunDetails/hooks/useMetricStepError.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/hooks/useMetricStepError.ts rename to web/ee/src/components/EvalRunDetails/hooks/useMetricStepError.ts diff --git a/web/oss/src/components/EvalRunDetails/index.tsx b/web/ee/src/components/EvalRunDetails/index.tsx similarity index 100% rename from web/oss/src/components/EvalRunDetails/index.tsx rename to web/ee/src/components/EvalRunDetails/index.tsx diff --git a/web/oss/src/components/EvalRunDetails/state/evalType.ts b/web/ee/src/components/EvalRunDetails/state/evalType.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/state/evalType.ts rename to web/ee/src/components/EvalRunDetails/state/evalType.ts diff --git a/web/oss/src/components/EvalRunDetails/state/focusScenarioAtom.ts b/web/ee/src/components/EvalRunDetails/state/focusScenarioAtom.ts similarity index 100% rename from web/oss/src/components/EvalRunDetails/state/focusScenarioAtom.ts rename to web/ee/src/components/EvalRunDetails/state/focusScenarioAtom.ts diff --git a/web/oss/src/components/EvalRunDetails/state/urlState.ts b/web/ee/src/components/EvalRunDetails/state/urlState.ts similarity index 97% rename from web/oss/src/components/EvalRunDetails/state/urlState.ts rename to web/ee/src/components/EvalRunDetails/state/urlState.ts index aa4697994a..29a988ca50 100644 --- a/web/oss/src/components/EvalRunDetails/state/urlState.ts +++ b/web/ee/src/components/EvalRunDetails/state/urlState.ts @@ -1,7 +1,7 @@ import {atom} from "jotai" import {atomWithImmer} from "jotai-immer" -import {evalTypeAtom} from "./evalType" +import {evalTypeAtom} from "../state/evalType" export interface EvalRunUrlState { view?: diff --git a/web/oss/src/components/EvaluationTable/ABTestingEvaluationTable.tsx b/web/ee/src/components/EvaluationTable/ABTestingEvaluationTable.tsx similarity index 100% rename from web/oss/src/components/EvaluationTable/ABTestingEvaluationTable.tsx rename to web/ee/src/components/EvaluationTable/ABTestingEvaluationTable.tsx diff --git a/web/oss/src/components/EvaluationTable/SingleModelEvaluationTable.tsx b/web/ee/src/components/EvaluationTable/SingleModelEvaluationTable.tsx similarity index 99% rename from web/oss/src/components/EvaluationTable/SingleModelEvaluationTable.tsx rename to web/ee/src/components/EvaluationTable/SingleModelEvaluationTable.tsx index 2bac4883a9..ad8e49c7cb 100644 --- a/web/oss/src/components/EvaluationTable/SingleModelEvaluationTable.tsx +++ b/web/ee/src/components/EvaluationTable/SingleModelEvaluationTable.tsx @@ -19,8 +19,8 @@ import {getDefaultStore, useAtomValue} from "jotai" import debounce from "lodash/debounce" import {useRouter} from "next/router" -import SaveTestsetModal from "@/oss/components/SaveTestsetModal/SaveTestsetModal" import SecondaryButton from "@/oss/components/SecondaryButton/SecondaryButton" +import {useQueryParamState} from "@/oss/state/appState" import {EvaluationFlow} from "@/oss/lib/enums" import {exportSingleModelEvaluationData} from "@/oss/lib/helpers/evaluate" import {isBaseResponse, isFuncResponse} from "@/oss/lib/helpers/playgroundResp" @@ -41,7 +41,6 @@ import {transformToRequestBody} from "@/oss/lib/shared/variant/transformer/trans import type {BaseResponse, EvaluationScenario, KeyValuePair, Variant} from "@/oss/lib/Types" import {callVariant} from "@/oss/services/api" import {updateEvaluation, updateEvaluationScenario} from "@/oss/services/human-evaluations/api" -import {useQueryParamState} from "@/oss/state/appState" import {customPropertiesByRevisionAtomFamily} from "@/oss/state/newPlayground/core/customProperties" import { stablePromptVariablesAtomFamily, @@ -52,6 +51,7 @@ import {appUriInfoAtom, appSchemaAtom} from "@/oss/state/variant/atoms/fetcher" import EvaluationCardView from "../Evaluations/EvaluationCardView" import EvaluationVotePanel from "../Evaluations/EvaluationCardView/EvaluationVotePanel" +import SaveTestsetModal from "../SaveTestsetModal/SaveTestsetModal" import {useSingleModelEvaluationTableStyles} from "./assets/styles" import ParamsFormWithRun from "./components/ParamsFormWithRun" diff --git a/web/oss/src/components/EvaluationTable/assets/styles.ts b/web/ee/src/components/EvaluationTable/assets/styles.ts similarity index 100% rename from web/oss/src/components/EvaluationTable/assets/styles.ts rename to web/ee/src/components/EvaluationTable/assets/styles.ts diff --git a/web/oss/src/components/EvaluationTable/components/ParamsFormWithRun.tsx b/web/ee/src/components/EvaluationTable/components/ParamsFormWithRun.tsx similarity index 100% rename from web/oss/src/components/EvaluationTable/components/ParamsFormWithRun.tsx rename to web/ee/src/components/EvaluationTable/components/ParamsFormWithRun.tsx diff --git a/web/oss/src/components/EvaluationTable/types.d.ts b/web/ee/src/components/EvaluationTable/types.d.ts similarity index 100% rename from web/oss/src/components/EvaluationTable/types.d.ts rename to web/ee/src/components/EvaluationTable/types.d.ts diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/EvaluationCard.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/EvaluationCard.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/EvaluationCard.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/EvaluationCard.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/EvaluationChatResponse.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/EvaluationChatResponse.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/EvaluationChatResponse.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/EvaluationChatResponse.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/EvaluationInputs.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/EvaluationInputs.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/EvaluationInputs.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/EvaluationInputs.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/EvaluationVotePanel.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVotePanel.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/EvaluationVotePanel.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVotePanel.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/VariantAlphabet.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/VariantAlphabet.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/VariantAlphabet.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/VariantAlphabet.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/assets/styles.ts b/web/ee/src/components/Evaluations/EvaluationCardView/assets/styles.ts similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/assets/styles.ts rename to web/ee/src/components/Evaluations/EvaluationCardView/assets/styles.ts diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/index.tsx b/web/ee/src/components/Evaluations/EvaluationCardView/index.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/index.tsx rename to web/ee/src/components/Evaluations/EvaluationCardView/index.tsx diff --git a/web/oss/src/components/Evaluations/EvaluationCardView/types.d.ts b/web/ee/src/components/Evaluations/EvaluationCardView/types.d.ts similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationCardView/types.d.ts rename to web/ee/src/components/Evaluations/EvaluationCardView/types.d.ts diff --git a/web/oss/src/components/Evaluations/EvaluationErrorModal.tsx b/web/ee/src/components/Evaluations/EvaluationErrorModal.tsx similarity index 100% rename from web/oss/src/components/Evaluations/EvaluationErrorModal.tsx rename to web/ee/src/components/Evaluations/EvaluationErrorModal.tsx diff --git a/web/oss/src/services/evaluationRuns/utils.ts b/web/ee/src/components/Evaluations/HumanEvaluationResult.tsx similarity index 100% rename from web/oss/src/services/evaluationRuns/utils.ts rename to web/ee/src/components/Evaluations/HumanEvaluationResult.tsx diff --git a/web/oss/src/components/Evaluations/ShareEvaluationModal.tsx b/web/ee/src/components/Evaluations/ShareEvaluationModal.tsx similarity index 100% rename from web/oss/src/components/Evaluations/ShareEvaluationModal.tsx rename to web/ee/src/components/Evaluations/ShareEvaluationModal.tsx diff --git a/web/oss/src/components/Evaluators/assets/cells/EvaluatorTagsCell.tsx b/web/ee/src/components/Evaluators/assets/cells/EvaluatorTagsCell.tsx similarity index 100% rename from web/oss/src/components/Evaluators/assets/cells/EvaluatorTagsCell.tsx rename to web/ee/src/components/Evaluators/assets/cells/EvaluatorTagsCell.tsx diff --git a/web/oss/src/components/Evaluators/assets/cells/EvaluatorTypePill.tsx b/web/ee/src/components/Evaluators/assets/cells/EvaluatorTypePill.tsx similarity index 100% rename from web/oss/src/components/Evaluators/assets/cells/EvaluatorTypePill.tsx rename to web/ee/src/components/Evaluators/assets/cells/EvaluatorTypePill.tsx diff --git a/web/oss/src/components/Evaluators/assets/cells/TableDropdownMenu/index.tsx b/web/ee/src/components/Evaluators/assets/cells/TableDropdownMenu/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/assets/cells/TableDropdownMenu/index.tsx rename to web/ee/src/components/Evaluators/assets/cells/TableDropdownMenu/index.tsx diff --git a/web/oss/src/components/Evaluators/assets/cells/TableDropdownMenu/types.ts b/web/ee/src/components/Evaluators/assets/cells/TableDropdownMenu/types.ts similarity index 100% rename from web/oss/src/components/Evaluators/assets/cells/TableDropdownMenu/types.ts rename to web/ee/src/components/Evaluators/assets/cells/TableDropdownMenu/types.ts diff --git a/web/oss/src/components/Evaluators/assets/constants.ts b/web/ee/src/components/Evaluators/assets/constants.ts similarity index 100% rename from web/oss/src/components/Evaluators/assets/constants.ts rename to web/ee/src/components/Evaluators/assets/constants.ts diff --git a/web/oss/src/components/Evaluators/assets/getColumns.tsx b/web/ee/src/components/Evaluators/assets/getColumns.tsx similarity index 100% rename from web/oss/src/components/Evaluators/assets/getColumns.tsx rename to web/ee/src/components/Evaluators/assets/getColumns.tsx diff --git a/web/oss/src/components/Evaluators/assets/types.ts b/web/ee/src/components/Evaluators/assets/types.ts similarity index 100% rename from web/oss/src/components/Evaluators/assets/types.ts rename to web/ee/src/components/Evaluators/assets/types.ts diff --git a/web/oss/src/components/Evaluators/assets/utils.ts b/web/ee/src/components/Evaluators/assets/utils.ts similarity index 100% rename from web/oss/src/components/Evaluators/assets/utils.ts rename to web/ee/src/components/Evaluators/assets/utils.ts diff --git a/web/oss/src/components/Evaluators/components/ConfigureEvaluator/assets/ConfigureEvaluatorSkeleton.tsx b/web/ee/src/components/Evaluators/components/ConfigureEvaluator/assets/ConfigureEvaluatorSkeleton.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/ConfigureEvaluator/assets/ConfigureEvaluatorSkeleton.tsx rename to web/ee/src/components/Evaluators/components/ConfigureEvaluator/assets/ConfigureEvaluatorSkeleton.tsx diff --git a/web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx b/web/ee/src/components/Evaluators/components/ConfigureEvaluator/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx rename to web/ee/src/components/Evaluators/components/ConfigureEvaluator/index.tsx diff --git a/web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/assets/DeleteEvaluatorsModalContent/index.tsx b/web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/assets/DeleteEvaluatorsModalContent/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/assets/DeleteEvaluatorsModalContent/index.tsx rename to web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/assets/DeleteEvaluatorsModalContent/index.tsx diff --git a/web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/index.tsx b/web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/index.tsx rename to web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/index.tsx diff --git a/web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/types.ts b/web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/types.ts similarity index 100% rename from web/oss/src/components/Evaluators/components/DeleteEvaluatorsModal/types.ts rename to web/ee/src/components/Evaluators/components/DeleteEvaluatorsModal/types.ts diff --git a/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/assets/SelectEvaluatorModalContent/index.tsx b/web/ee/src/components/Evaluators/components/SelectEvaluatorModal/assets/SelectEvaluatorModalContent/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/SelectEvaluatorModal/assets/SelectEvaluatorModalContent/index.tsx rename to web/ee/src/components/Evaluators/components/SelectEvaluatorModal/assets/SelectEvaluatorModalContent/index.tsx diff --git a/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx b/web/ee/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx rename to web/ee/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx diff --git a/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/types.ts b/web/ee/src/components/Evaluators/components/SelectEvaluatorModal/types.ts similarity index 100% rename from web/oss/src/components/Evaluators/components/SelectEvaluatorModal/types.ts rename to web/ee/src/components/Evaluators/components/SelectEvaluatorModal/types.ts diff --git a/web/oss/src/components/Evaluators/hooks/useEvaluatorsRegistryData.ts b/web/ee/src/components/Evaluators/hooks/useEvaluatorsRegistryData.ts similarity index 100% rename from web/oss/src/components/Evaluators/hooks/useEvaluatorsRegistryData.ts rename to web/ee/src/components/Evaluators/hooks/useEvaluatorsRegistryData.ts diff --git a/web/oss/src/components/Evaluators/index.tsx b/web/ee/src/components/Evaluators/index.tsx similarity index 100% rename from web/oss/src/components/Evaluators/index.tsx rename to web/ee/src/components/Evaluators/index.tsx diff --git a/web/oss/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx b/web/ee/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx rename to web/ee/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx diff --git a/web/oss/src/components/HumanEvaluationModal/assets/styles.ts b/web/ee/src/components/HumanEvaluationModal/assets/styles.ts similarity index 100% rename from web/oss/src/components/HumanEvaluationModal/assets/styles.ts rename to web/ee/src/components/HumanEvaluationModal/assets/styles.ts diff --git a/web/oss/src/components/HumanEvaluationModal/types.d.ts b/web/ee/src/components/HumanEvaluationModal/types.d.ts similarity index 100% rename from web/oss/src/components/HumanEvaluationModal/types.d.ts rename to web/ee/src/components/HumanEvaluationModal/types.d.ts diff --git a/web/oss/src/components/HumanEvaluations/AbTestingEvaluation.tsx b/web/ee/src/components/HumanEvaluations/AbTestingEvaluation.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/AbTestingEvaluation.tsx rename to web/ee/src/components/HumanEvaluations/AbTestingEvaluation.tsx diff --git a/web/oss/src/components/HumanEvaluations/SingleModelEvaluation.tsx b/web/ee/src/components/HumanEvaluations/SingleModelEvaluation.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/SingleModelEvaluation.tsx rename to web/ee/src/components/HumanEvaluations/SingleModelEvaluation.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/EvaluationStatusCell.tsx b/web/ee/src/components/HumanEvaluations/assets/EvaluationStatusCell.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/EvaluationStatusCell.tsx rename to web/ee/src/components/HumanEvaluations/assets/EvaluationStatusCell.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/LegacyEvalResultCell.tsx b/web/ee/src/components/HumanEvaluations/assets/LegacyEvalResultCell.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/LegacyEvalResultCell.tsx rename to web/ee/src/components/HumanEvaluations/assets/LegacyEvalResultCell.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartAxis.tsx b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartAxis.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartAxis.tsx rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartAxis.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartFrame.tsx b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartFrame.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartFrame.tsx rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartFrame.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveFrequencyChart.tsx b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveFrequencyChart.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveFrequencyChart.tsx rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveFrequencyChart.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveMetricChart.tsx b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveMetricChart.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveMetricChart.tsx rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveMetricChart.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/chartUtils.ts b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/chartUtils.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/chartUtils.ts rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/chartUtils.ts diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/utils.ts b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/utils.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/utils.ts rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/utils.ts diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/index.tsx b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/index.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/index.tsx rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/index.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/types.ts b/web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/types.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/MetricDetailsPopover/types.ts rename to web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/types.ts diff --git a/web/oss/src/components/HumanEvaluations/assets/SingleModelEvaluationHeader/index.tsx b/web/ee/src/components/HumanEvaluations/assets/SingleModelEvaluationHeader/index.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/SingleModelEvaluationHeader/index.tsx rename to web/ee/src/components/HumanEvaluations/assets/SingleModelEvaluationHeader/index.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx rename to web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx diff --git a/web/oss/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts rename to web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts diff --git a/web/oss/src/components/HumanEvaluations/assets/styles.ts b/web/ee/src/components/HumanEvaluations/assets/styles.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/styles.ts rename to web/ee/src/components/HumanEvaluations/assets/styles.ts diff --git a/web/oss/src/components/HumanEvaluations/assets/utils.tsx b/web/ee/src/components/HumanEvaluations/assets/utils.tsx similarity index 100% rename from web/oss/src/components/HumanEvaluations/assets/utils.tsx rename to web/ee/src/components/HumanEvaluations/assets/utils.tsx diff --git a/web/oss/src/components/HumanEvaluations/types.ts b/web/ee/src/components/HumanEvaluations/types.ts similarity index 100% rename from web/oss/src/components/HumanEvaluations/types.ts rename to web/ee/src/components/HumanEvaluations/types.ts diff --git a/web/oss/src/components/SaveTestsetModal/SaveTestsetModal.tsx b/web/ee/src/components/SaveTestsetModal/SaveTestsetModal.tsx similarity index 100% rename from web/oss/src/components/SaveTestsetModal/SaveTestsetModal.tsx rename to web/ee/src/components/SaveTestsetModal/SaveTestsetModal.tsx diff --git a/web/oss/src/components/SaveTestsetModal/types.d.ts b/web/ee/src/components/SaveTestsetModal/types.d.ts similarity index 100% rename from web/oss/src/components/SaveTestsetModal/types.d.ts rename to web/ee/src/components/SaveTestsetModal/types.d.ts diff --git a/web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx b/web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx rename to web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx diff --git a/web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorPopover.tsx b/web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorPopover.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorPopover.tsx rename to web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorPopover.tsx diff --git a/web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx b/web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx rename to web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx diff --git a/web/oss/src/components/pages/evaluations/EvaluationsView.tsx b/web/ee/src/components/pages/evaluations/EvaluationsView.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/EvaluationsView.tsx rename to web/ee/src/components/pages/evaluations/EvaluationsView.tsx diff --git a/web/oss/src/components/pages/evaluations/FilterColumns/FilterColumns.tsx b/web/ee/src/components/pages/evaluations/FilterColumns/FilterColumns.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/FilterColumns/FilterColumns.tsx rename to web/ee/src/components/pages/evaluations/FilterColumns/FilterColumns.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/AdvancedSettings.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/AdvancedSettings.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/AdvancedSettings.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/AdvancedSettings.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalContent.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalContent.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalContent.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalContent.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectAppSection.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectAppSection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectAppSection.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectAppSection.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/SelectEvaluatorSection.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/SelectEvaluatorSection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/SelectEvaluatorSection.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/SelectEvaluatorSection.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectTestsetSection.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectTestsetSection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectTestsetSection.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectTestsetSection.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectVariantSection.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectVariantSection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectVariantSection.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectVariantSection.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/index.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/index.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/index.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/index.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/types.ts b/web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/types.ts rename to web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/types.ts diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/constants.ts b/web/ee/src/components/pages/evaluations/NewEvaluation/assets/constants.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/assets/constants.ts rename to web/ee/src/components/pages/evaluations/NewEvaluation/assets/constants.ts diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts b/web/ee/src/components/pages/evaluations/NewEvaluation/assets/styles.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts rename to web/ee/src/components/pages/evaluations/NewEvaluation/assets/styles.ts diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx b/web/ee/src/components/pages/evaluations/NewEvaluation/index.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx rename to web/ee/src/components/pages/evaluations/NewEvaluation/index.tsx diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/types.ts b/web/ee/src/components/pages/evaluations/NewEvaluation/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/NewEvaluation/types.ts rename to web/ee/src/components/pages/evaluations/NewEvaluation/types.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaEditor.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaEditor.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaEditor.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaEditor.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts similarity index 82% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts index a56de11836..b6acddb008 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts +++ b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/JSONSchemaGenerator.ts @@ -23,15 +23,15 @@ export function generateJSONSchema(config: SchemaConfig): GeneratedJSONSchema { const {responseFormat, includeReasoning, continuousConfig, categoricalOptions} = config const properties: Record = {} - const required: string[] = ["score"] + const required: string[] = ["correctness"] // Base description is always "The grade results" const baseDescription = "The grade results" - // Add the main score field based on response format + // Add the main correctness field based on response format switch (responseFormat) { case "continuous": - properties.score = { + properties.correctness = { type: "number", description: baseDescription, minimum: continuousConfig?.minimum ?? 0, @@ -40,7 +40,7 @@ export function generateJSONSchema(config: SchemaConfig): GeneratedJSONSchema { break case "boolean": - properties.score = { + properties.correctness = { type: "boolean", description: baseDescription, } @@ -53,14 +53,14 @@ export function generateJSONSchema(config: SchemaConfig): GeneratedJSONSchema { .map((opt) => `"${opt.name}": ${opt.description}`) .join("| ") - properties.score = { + properties.correctness = { type: "string", description: `${baseDescription}. Categories: ${categoryDescriptions}`, enum: enumValues, } } else { // Fallback if no categories defined - properties.score = { + properties.correctness = { type: "string", description: baseDescription, } @@ -97,43 +97,43 @@ export function parseJSONSchema(schemaString: string): SchemaConfig | null { // Handle both old format (direct schema) and new format (with name wrapper) const schema = parsed.schema || parsed - if (!schema.properties || !schema.properties.score) { + if (!schema.properties || !schema.properties.correctness) { return null } - const score = schema.properties.score + const correctness = schema.properties.correctness const hasReasoning = !!schema.properties.comment let responseFormat: SchemaConfig["responseFormat"] = "boolean" let continuousConfig: SchemaConfig["continuousConfig"] let categoricalOptions: SchemaConfig["categoricalOptions"] - if (score.type === "number") { + if (correctness.type === "number") { responseFormat = "continuous" continuousConfig = { - minimum: score.minimum ?? 0, - maximum: score.maximum ?? 10, + minimum: correctness.minimum ?? 0, + maximum: correctness.maximum ?? 10, } - } else if (score.type === "boolean") { + } else if (correctness.type === "boolean") { responseFormat = "boolean" - } else if (score.type === "string" && score.enum) { + } else if (correctness.type === "string" && correctness.enum) { responseFormat = "categorical" // Parse category descriptions from the description field - const desc = score.description || "" + const desc = correctness.description || "" const categoriesMatch = desc.match(/Categories: (.+)/) if (categoriesMatch) { const categoriesStr = categoriesMatch[1] const categoryPairs = categoriesStr.split("| ") - categoricalOptions = score.enum.map((name: string) => { + categoricalOptions = correctness.enum.map((name: string) => { const pair = categoryPairs.find((p: string) => p.startsWith(`"${name}":`)) const description = pair ? pair.split(": ")[1] || "" : "" return {name, description} }) } else { - categoricalOptions = score.enum.map((name: string) => ({ + categoricalOptions = correctness.enum.map((name: string) => ({ name, description: "", })) diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/index.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/index.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/index.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/index.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/types.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/types.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/JSONSchema/types.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/Messages.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/Messages.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/Messages.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/Messages.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/assets/styles.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/assets/styles.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/assets/styles.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/assets/styles.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/types.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/types.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/types.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/variantUtils.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/variantUtils.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/variantUtils.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/variantUtils.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/DeleteModal.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/DeleteModal.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/DeleteModal.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/DeleteModal.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorCard.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorCard.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorCard.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorCard.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorList.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorList.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorList.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorList.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/index.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/index.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/index.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/index.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/EvaluatorsModal.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/EvaluatorsModal.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/EvaluatorsModal.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/EvaluatorsModal.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorCard.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorCard.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorCard.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorCard.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorList.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorList.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorList.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorList.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/index.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/index.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/index.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/index.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/Filters/SearchFilter.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/Filters/SearchFilter.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/Filters/SearchFilter.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/Filters/SearchFilter.tsx diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx similarity index 99% rename from web/oss/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx rename to web/ee/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx index 783fb6804f..e52e9227fb 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx +++ b/web/ee/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx @@ -25,9 +25,9 @@ import {getMetricConfig} from "@/oss/lib/metrics/utils" import {EvaluationStatus} from "@/oss/lib/Types" import {getAppValues} from "@/oss/state/app" -import {statusMapper} from "../../cellRenderers/cellRenderers" +import {statusMapper} from "../../../evaluations/cellRenderers/cellRenderers" import {buildEvaluationNavigationUrl} from "../../utils" -import {useStyles} from "./styles" +import {useStyles} from "../assets/styles" import {AutoEvaluationHeaderProps} from "./types" diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/assets/styles.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/assets/styles.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/assets/styles.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/assets/styles.ts diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/assets/types.ts b/web/ee/src/components/pages/evaluations/autoEvaluation/assets/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/autoEvaluation/assets/types.ts rename to web/ee/src/components/pages/evaluations/autoEvaluation/assets/types.ts diff --git a/web/oss/src/components/pages/evaluations/cellRenderers/StatusRenderer.tsx b/web/ee/src/components/pages/evaluations/cellRenderers/StatusRenderer.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/cellRenderers/StatusRenderer.tsx rename to web/ee/src/components/pages/evaluations/cellRenderers/StatusRenderer.tsx diff --git a/web/oss/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx b/web/ee/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx rename to web/ee/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx diff --git a/web/oss/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx b/web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx rename to web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx diff --git a/web/oss/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx b/web/ee/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx rename to web/ee/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx diff --git a/web/oss/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx b/web/ee/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx rename to web/ee/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluation.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluation.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluation.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluation.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluationDrawer.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluationDrawer.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluationDrawer.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/OnlineEvaluationDrawer.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/assets/helpers.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/assets/helpers.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/assets/helpers.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/assets/helpers.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/assets/state.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/assets/state.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/assets/state.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/assets/state.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/assets/styles.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/assets/styles.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/assets/styles.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/assets/styles.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorDetailsPreview.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorDetailsPreview.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorDetailsPreview.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorDetailsPreview.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorTypeTag.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorTypeTag.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorTypeTag.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/EvaluatorTypeTag.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/FiltersPreview.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/FiltersPreview.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/FiltersPreview.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/FiltersPreview.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/OnlineEvaluationRowActions.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/OnlineEvaluationRowActions.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/OnlineEvaluationRowActions.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/OnlineEvaluationRowActions.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/PromptPreview.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/PromptPreview.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/PromptPreview.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/PromptPreview.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersCell.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersCell.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersCell.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersCell.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersSummaryCard.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersSummaryCard.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersSummaryCard.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/QueryFiltersSummaryCard.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/ReadOnlyBox.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/ReadOnlyBox.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/ReadOnlyBox.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/ReadOnlyBox.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/components/SamplingRateControl.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/components/SamplingRateControl.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/components/SamplingRateControl.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/components/SamplingRateControl.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/constants.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/constants.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/constants.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/constants.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorDetails.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorDetails.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorDetails.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorDetails.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorSelection.tsx b/web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorSelection.tsx similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorSelection.tsx rename to web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorSelection.tsx diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeFromConfigs.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeFromConfigs.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeFromConfigs.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeFromConfigs.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeMeta.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeMeta.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeMeta.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeMeta.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useOnlineEvaluations.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useOnlineEvaluations.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/hooks/useOnlineEvaluations.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/hooks/useOnlineEvaluations.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/types.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/types.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/types.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/types.ts diff --git a/web/oss/src/components/pages/evaluations/onlineEvaluation/utils/evaluatorDetails.ts b/web/ee/src/components/pages/evaluations/onlineEvaluation/utils/evaluatorDetails.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/onlineEvaluation/utils/evaluatorDetails.ts rename to web/ee/src/components/pages/evaluations/onlineEvaluation/utils/evaluatorDetails.ts diff --git a/web/oss/src/components/pages/evaluations/utils.ts b/web/ee/src/components/pages/evaluations/utils.ts similarity index 100% rename from web/oss/src/components/pages/evaluations/utils.ts rename to web/ee/src/components/pages/evaluations/utils.ts diff --git a/web/oss/src/contexts/RunIdContext.tsx b/web/ee/src/contexts/RunIdContext.tsx similarity index 100% rename from web/oss/src/contexts/RunIdContext.tsx rename to web/ee/src/contexts/RunIdContext.tsx diff --git a/web/ee/src/lib/helpers/evaluate.ts b/web/ee/src/lib/helpers/evaluate.ts new file mode 100644 index 0000000000..49c631c561 --- /dev/null +++ b/web/ee/src/lib/helpers/evaluate.ts @@ -0,0 +1,469 @@ +import {EvaluationType} from "@agenta/oss/src/lib/enums" +import {convertToCsv, downloadCsv} from "@agenta/oss/src/lib/helpers/fileManipulations" +import {formatCurrency, formatLatency} from "@agenta/oss/src/lib/helpers/formatters" +import {isDemo} from "@agenta/oss/src/lib/helpers/utils" +import { + Evaluation, + GenericObject, + TypedValue, + Variant, + _Evaluation, + EvaluationScenario, +} from "@agenta/oss/src/lib/Types" +import dayjs from "dayjs" +import capitalize from "lodash/capitalize" +import round from "lodash/round" + +import AlertPopup from "@/oss/components/AlertPopup/AlertPopup" +import {runningStatuses} from "@/oss/components/pages/evaluations/cellRenderers/cellRenderers" +import { + HumanEvaluationListTableDataType, + SingleModelEvaluationListTableDataType, +} from "@/oss/lib/Types" +import {fetchEvaluatonIdsByResource} from "@/oss/services/evaluations/api" + +export const exportExactEvaluationData = (evaluation: Evaluation, rows: GenericObject[]) => { + const exportRow = rows.map((data, ix) => { + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Correct answer"]: data.correctAnswer, + ["Evaluation"]: data.score, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportSimilarityEvaluationData = (evaluation: Evaluation, rows: GenericObject[]) => { + const exportRow = rows.map((data, ix) => { + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Correct answer"]: data.correctAnswer, + ["Score"]: data.score, + ["Evaluation"]: data.similarity, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportAICritiqueEvaluationData = (evaluation: Evaluation, rows: GenericObject[]) => { + const exportRow = rows.map((data, ix) => { + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Correct answer"]: data.correctAnswer, + ["Score"]: data.score, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportABTestingEvaluationData = ( + evaluation: Evaluation, + scenarios: EvaluationScenario[], + rows: GenericObject[], +) => { + const exportRow = rows.map((data, ix) => { + const inputColumns = evaluation.testset.testsetChatColumn + ? {Input: evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn]} + : data.inputs.reduce( + (columns: any, input: {input_name: string; input_value: string}) => { + columns[`${input.input_name}`] = input.input_value + return columns + }, + {}, + ) + return { + ...inputColumns, + [`App Variant ${evaluation.variants[0].variantName} Output 0`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + [`App Variant ${evaluation.variants[1].variantName} Output 1`]: data?.columnData1 + ? data?.columnData1 + : data.outputs[1]?.variant_output, + ["Vote"]: + evaluation.variants.find((v: Variant) => v.variantId === data.vote)?.variantName || + data.vote, + ["Expected Output"]: + scenarios[ix]?.correctAnswer || evaluation.testset.csvdata[ix].correct_answer, + ["Additional notes"]: scenarios[ix]?.note, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.variants[1].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportSingleModelEvaluationData = ( + evaluation: Evaluation, + scenarios: EvaluationScenario[], + rows: GenericObject[], +) => { + const exportRow = rows.map((data, ix) => { + const inputColumns = evaluation.testset.testsetChatColumn + ? {Input: evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn]} + : data.inputs.reduce( + (columns: any, input: {input_name: string; input_value: string}) => { + columns[`${input.input_name}`] = input.input_value + return columns + }, + {}, + ) + const numericScore = parseInt(data.score) + return { + ...inputColumns, + [`App Variant ${evaluation.variants[0].variantName} Output 0`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Score"]: isNaN(numericScore) ? "-" : numericScore, + ["Expected Output"]: + scenarios[ix]?.correctAnswer || evaluation.testset.csvdata[ix].correct_answer, + ["Additional notes"]: scenarios[ix]?.note, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportRegexEvaluationData = ( + evaluation: Evaluation, + rows: GenericObject[], + settings: GenericObject, +) => { + const exportRow = rows.map((data, ix) => { + const isCorrect = data.score === "correct" + const isMatch = settings.regexShouldMatch ? isCorrect : !isCorrect + + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Match / Mismatch"]: isMatch ? "Match" : "Mismatch", + ["Evaluation"]: data.score, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportWebhookEvaluationData = (evaluation: Evaluation, rows: GenericObject[]) => { + const exportRow = rows.map((data, ix) => { + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Correct answer"]: data.correctAnswer, + ["Score"]: data.score, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const exportCustomCodeEvaluationData = (evaluation: Evaluation, rows: GenericObject[]) => { + const exportRow = rows.map((data, ix) => { + return { + ["Inputs"]: + evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn] || + data.inputs[0].input_value, + [`App Variant ${evaluation.variants[0].variantName} Output`]: data?.columnData0 + ? data?.columnData0 + : data.outputs[0]?.variant_output, + ["Correct answer"]: data.correctAnswer, + ["Score"]: data.score, + } + }) + const exportCol = Object.keys(exportRow[0]) + + const csvData = convertToCsv(exportRow, exportCol) + const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` + downloadCsv(csvData, filename) +} + +export const calculateResultsDataAvg = (resultsData: Record, multiplier = 10) => { + const obj = {...resultsData} + Object.keys(obj).forEach((key) => { + if (isNaN(+key)) delete obj[key] + }) + + const count = Object.values(obj).reduce((acc, value) => acc + +value, 0) + const sum = Object.keys(obj).reduce((acc, key) => acc + (parseFloat(key) || 0) * +obj[key], 0) + return (sum / count) * multiplier +} + +export const getVotesPercentage = (record: HumanEvaluationListTableDataType, index: number) => { + const variant = record.votesData.variants[index] + return record.votesData.variants_votes_data[variant]?.percentage +} + +export const checkIfResourceValidForDeletion = async ( + data: Omit[0], "appId">, +) => { + if (isDemo()) { + const response = await fetchEvaluatonIdsByResource(data) + if (response.data.length > 0) { + const name = + (data.resourceType === "testset" + ? "Testset" + : data.resourceType === "evaluator_config" + ? "Evaluator" + : "Variant") + (data.resourceIds.length > 1 ? "s" : "") + + const suffix = response.data.length > 1 ? "s" : "" + AlertPopup({ + title: `${name} is in use`, + message: `The ${name} is currently in used by ${response.data.length} evaluation${suffix}. Please delete the evaluation${suffix} first.`, + cancelText: null, + okText: "Ok", + }) + return false + } + } + return true +} + +export function getTypedValue(res?: TypedValue) { + const {value, type, error} = res || {} + if (type === "error") { + return error?.message + } + + if (value === undefined) return "-" + + switch (type) { + case "number": + return round(Number(value), 2) + case "boolean": + case "bool": + return capitalize(value?.toString()) + case "cost": + return formatCurrency(Number(value)) + case "latency": + return formatLatency(Number(value)) + case "string": + case "text": + return value?.toString() ?? "-" + case "code": + case "regex": + return value?.toString() ?? "-" + case "object": + return typeof value === "object" + ? JSON.stringify(value, null, 2) + : (value?.toString() ?? "-") + case "messages": + return Array.isArray(value) + ? value + .map((msg) => (typeof msg === "string" ? msg : JSON.stringify(msg))) + .join("\n") + : (value?.toString() ?? "-") + case "multiple_choice": + return Array.isArray(value) ? value.join(", ") : (value?.toString() ?? "-") + case "hidden": + return "-" + default: + return value?.toString() ?? "-" + } +} + +type CellDataType = "number" | "text" | "date" +export function getFilterParams(type: CellDataType) { + const filterParams: GenericObject = {} + if (type == "date") { + filterParams.comparator = function ( + filterLocalDateAtMidnight: Date, + cellValue: string | null, + ) { + if (cellValue == null) return -1 + const cellDate = dayjs(cellValue).startOf("day").toDate() + if (filterLocalDateAtMidnight.getTime() === cellDate.getTime()) { + return 0 + } + if (cellDate < filterLocalDateAtMidnight) { + return -1 + } + if (cellDate > filterLocalDateAtMidnight) { + return 1 + } + } + } + + return { + sortable: true, + floatingFilter: true, + filter: + type === "number" + ? "agNumberColumnFilter" + : type === "date" + ? "agDateColumnFilter" + : "agTextColumnFilter", + cellDataType: type === "number" ? "text" : type, + filterParams, + comparator: getCustomComparator(type), + } +} + +export const calcEvalDuration = (evaluation: _Evaluation) => { + return dayjs( + runningStatuses.includes(evaluation.status.value) ? Date.now() : evaluation.updated_at, + ).diff(dayjs(evaluation.created_at), "milliseconds") +} + +const getCustomComparator = (type: CellDataType) => (valueA: string, valueB: string) => { + const getNumber = (val: string) => { + const num = parseFloat(val || "0") + return isNaN(num) ? 0 : num + } + + valueA = String(valueA) + valueB = String(valueB) + + switch (type) { + case "date": + return dayjs(valueA).diff(dayjs(valueB)) + case "text": + return valueA.localeCompare(valueB) + case "number": + return getNumber(valueA) - getNumber(valueB) + default: + return 0 + } +} + +export const removeCorrectAnswerPrefix = (str: string) => { + return str.replace(/^correctAnswer_/, "") +} + +export const mapTestcaseAndEvalValues = ( + settingsValues: Record, + selectedTestcase: Record, +) => { + const testcaseObj: Record = {} + const evalMapObj: Record = {} + + Object.entries(settingsValues).forEach(([key, value]) => { + if (typeof value === "string" && value.startsWith("testcase.")) { + testcaseObj[key] = selectedTestcase[value.split(".")[1]] + } else { + evalMapObj[key] = value + } + }) + + return {testcaseObj, evalMapObj} +} + +export const transformTraceKeysInSettings = ( + settingsValues: Record, +): Record => { + return Object.keys(settingsValues).reduce( + (acc, curr) => { + if ( + !acc[curr] && + typeof settingsValues[curr] === "string" && + settingsValues[curr].startsWith("trace.") + ) { + acc[curr] = settingsValues[curr].replace("trace.", "") + } else { + acc[curr] = settingsValues[curr] + } + + return acc + }, + {} as Record, + ) +} + +export const getEvaluatorTags = () => { + const evaluatorTags = [ + { + label: "Classifiers", + value: "classifiers", + }, + { + label: "Similarity", + value: "similarity", + }, + { + label: "AI / LLM", + value: "ai_llm", + }, + { + label: "Functional", + value: "functional", + }, + ] + + if (isDemo()) { + evaluatorTags.unshift({ + label: "RAG", + value: "rag", + }) + } + + return evaluatorTags +} + +export const calculateAvgScore = (evaluation: SingleModelEvaluationListTableDataType) => { + let score = 0 + if (evaluation.scoresData) { + score = + ((evaluation.scoresData.correct?.length || evaluation.scoresData.true?.length || 0) / + evaluation.scoresData.nb_of_rows) * + 100 + } else if (evaluation.resultsData) { + const multiplier = { + [EvaluationType.auto_webhook_test]: 100, + [EvaluationType.single_model_test]: 1, + } + score = calculateResultsDataAvg( + evaluation.resultsData, + multiplier[evaluation.evaluationType as keyof typeof multiplier], + ) + score = isNaN(score) ? 0 : score + } else if (evaluation.avgScore) { + score = evaluation.avgScore * 100 + } + + return score +} diff --git a/web/ee/src/lib/helpers/hashUtils.ts b/web/ee/src/lib/helpers/hashUtils.ts new file mode 100644 index 0000000000..5c66724e5a --- /dev/null +++ b/web/ee/src/lib/helpers/hashUtils.ts @@ -0,0 +1,73 @@ +// Utility to generate a hash ID for annotation/invocation steps, aligned with backend make_hash_id +// Uses blake2b if available, otherwise falls back to SHA-256 + +import blake from "blakejs" +// import { v4 as uuidv4 } from "uuid" // Use this for UUIDs if needed + +const REFERENCE_KEYS = [ + "application", + "application_variant", + "application_revision", + "testset", + "testcase", + "evaluator", +] + +// Recursively stable, whitespace-free JSON stringifier +function stableStringifyRecursive(obj: any): string { + if (obj === null || typeof obj !== "object") { + return JSON.stringify(obj) + } + if (Array.isArray(obj)) { + return `[${obj.map(stableStringifyRecursive).join(",")}]` + } + const keys = Object.keys(obj).sort() + const entries = keys.map( + (key) => `${JSON.stringify(key)}:${stableStringifyRecursive(obj[key])}`, + ) + return `{${entries.join(",")}}` +} + +export function makeHashId({ + references, + links, +}: { + references?: Record + links?: Record +}): string { + if (!references && !links) return "" + const payload: Record = {} + + for (const k of Object.keys(references || {})) { + if (REFERENCE_KEYS.includes(k)) { + const v = references![k] + // Only include 'id' field, not 'slug' + if (v.id != null) { + payload[k] = {id: v.id} + } + } + } + for (const k of Object.keys(links || {})) { + const v = links![k] + payload[k] = { + span_id: v.span_id, + trace_id: v.trace_id, + } + } + // Stable, deep, whitespace-free JSON + const serialized = stableStringifyRecursive(payload) + + // blake2b hash (digest_size=16) + try { + // Use blakejs (same as backend example) + return blake.blake2bHex(serialized, null, 16) + } catch (e) { + // Fallback: SHA-256 + if (window.crypto?.subtle) { + throw new Error( + "blake2b not available and crypto.subtle is async. Provide a polyfill or use a sync fallback.", + ) + } + return btoa(serialized) + } +} diff --git a/web/oss/src/lib/helpers/serviceValidations.ts b/web/ee/src/lib/helpers/serviceValidations.ts similarity index 100% rename from web/oss/src/lib/helpers/serviceValidations.ts rename to web/ee/src/lib/helpers/serviceValidations.ts diff --git a/web/oss/src/lib/helpers/traceUtils.ts b/web/ee/src/lib/helpers/traceUtils.ts similarity index 98% rename from web/oss/src/lib/helpers/traceUtils.ts rename to web/ee/src/lib/helpers/traceUtils.ts index 536a7b5bc6..f232711598 100644 --- a/web/oss/src/lib/helpers/traceUtils.ts +++ b/web/ee/src/lib/helpers/traceUtils.ts @@ -1,6 +1,6 @@ import {uuidToTraceId} from "@/oss/lib/hooks/useAnnotations/assets/helpers" -import {TraceData, TraceTree} from "@agenta/oss/src/lib/hooks/useEvaluationRunScenarioSteps/types" +import {TraceData, TraceTree} from "../hooks/useEvaluationRunScenarioSteps/types" export function findTraceForStep(traces: any[] | undefined, traceId?: string): any | undefined { if (!traces?.length || !traceId) return undefined diff --git a/web/oss/src/lib/hooks/useEvalScenarioQueue/index.ts b/web/ee/src/lib/hooks/useEvalScenarioQueue/index.ts similarity index 99% rename from web/oss/src/lib/hooks/useEvalScenarioQueue/index.ts rename to web/ee/src/lib/hooks/useEvalScenarioQueue/index.ts index 4250637c7d..bfeead6e7b 100644 --- a/web/oss/src/lib/hooks/useEvalScenarioQueue/index.ts +++ b/web/ee/src/lib/hooks/useEvalScenarioQueue/index.ts @@ -16,7 +16,7 @@ import {evaluationRunStateFamily} from "@/oss/lib/hooks/useEvaluationRunData/ass import {useJwtRefresher} from "@/oss/lib/hooks/useJWT" import {EvaluationStatus} from "@/oss/lib/Types" import {slugify} from "@/oss/lib/utils/slugify" -import type {ConfigMessage, ResultMessage, RunEvalMessage} from "@/oss/lib/evalRunner/types" +import type {ConfigMessage, ResultMessage, RunEvalMessage} from "@/oss/lib/workers/evalRunner/types" import {getProjectValues} from "@/oss/state/project" // import {setOptimisticStepData} from "../../../components/EvalRunDetails/assets/optimisticUtils" @@ -169,7 +169,7 @@ export function useEvalScenarioQueue(options?: {concurrency?: number; runId?: st useEffect(() => { if (!sharedWorker) { sharedWorker = new Worker( - new URL("@/oss/lib/evalRunner/evalRunner.worker.ts", import.meta.url), + new URL("@/oss/lib/workers/evalRunner/evalRunner.worker.ts", import.meta.url), ) } diff --git a/web/oss/src/lib/hooks/useEvalScenarioQueue/responseQueue.ts b/web/ee/src/lib/hooks/useEvalScenarioQueue/responseQueue.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvalScenarioQueue/responseQueue.ts rename to web/ee/src/lib/hooks/useEvalScenarioQueue/responseQueue.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/bulkFetch.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/bulkFetch.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/bulkFetch.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/bulkFetch.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/cache.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/cache.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/cache.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/cache.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/index.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/index.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/index.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/index.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/migrationHelper.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/migrationHelper.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/migrationHelper.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/migrationHelper.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts similarity index 99% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts index 4cea5caa76..41abde06a9 100644 --- a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts @@ -23,7 +23,7 @@ import {evalAtomStore} from "./store" // Re-export the atom families for external use export {runMetricsCacheFamily, runMetricsStatsCacheFamily} -import {fetchRunMetricsViaWorker} from "@/agenta-oss-common/lib/evalRunner/runMetricsWorker" +import {fetchRunMetricsViaWorker} from "@/agenta-oss-common/lib/workers/evalRunner/runMetricsWorker" // Helper: flatten acc object and nested metrics similar to legacy mergedMetricsAtom export function flattenMetrics(raw: Record): Record { diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedScenarios.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedScenarios.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedScenarios.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedScenarios.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/store.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/store.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/store.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/store.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/types.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/utils.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/utils.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/atoms/utils.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/utils.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/constants.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/constants.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/constants.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/constants.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts similarity index 93% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts index b39b1c86e2..ace3e7fa8e 100644 --- a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts @@ -7,7 +7,7 @@ let _worker: Worker | null = null function getWorker() { if (!_worker) { _worker = new Worker( - new URL("@/oss/lib/evalRunner/scenarioListWorker.ts", import.meta.url), + new URL("@/oss/lib/workers/evalRunner/scenarioListWorker.ts", import.meta.url), { type: "module", }, diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts similarity index 99% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts index 2b81ed1826..667b7858ba 100644 --- a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts @@ -85,7 +85,7 @@ const performFetch = async ( const {jwt, apiUrl, projectId} = await buildAuthContext() const {fetchStepsViaWorker} = await import( - "@/oss/lib/evalRunner/bulkWorker" + "@/agenta-oss-common/lib/workers/evalRunner/bulkWorker" ) const store = evalAtomStore() diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/scenarioFilters.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/scenarioFilters.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/scenarioFilters.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/scenarioFilters.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/index.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/index.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/index.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/index.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/types.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/index.ts b/web/ee/src/lib/hooks/useEvaluationRunData/index.ts similarity index 99% rename from web/oss/src/lib/hooks/useEvaluationRunData/index.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/index.ts index 7be13af806..df4bef1420 100644 --- a/web/oss/src/lib/hooks/useEvaluationRunData/index.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/index.ts @@ -83,7 +83,7 @@ const useEvaluationRunData = (evaluationTableId: string | null, debug = false, r const projectId = useAtomValue(projectIdAtom) const setProjectVariantReferences = useSetAtom(setProjectVariantReferencesAtom) const user = useAtomValue(userAtom) - const requireUser = true + const requireUser = isDemo() const enrichRun = useEnrichEvaluationRun({debug, evalType}) const suppressLoadingRef = useRef(false) diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/refreshLiveRun.ts b/web/ee/src/lib/hooks/useEvaluationRunData/refreshLiveRun.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/refreshLiveRun.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/refreshLiveRun.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/types.ts b/web/ee/src/lib/hooks/useEvaluationRunData/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/useEvalRunScenarioData.tsx b/web/ee/src/lib/hooks/useEvaluationRunData/useEvalRunScenarioData.tsx similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/useEvalRunScenarioData.tsx rename to web/ee/src/lib/hooks/useEvaluationRunData/useEvalRunScenarioData.tsx diff --git a/web/oss/src/lib/hooks/useEvaluationRunData/useScenarioStepSnapshot.ts b/web/ee/src/lib/hooks/useEvaluationRunData/useScenarioStepSnapshot.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunData/useScenarioStepSnapshot.ts rename to web/ee/src/lib/hooks/useEvaluationRunData/useScenarioStepSnapshot.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunMetrics/assets/utils.ts b/web/ee/src/lib/hooks/useEvaluationRunMetrics/assets/utils.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunMetrics/assets/utils.ts rename to web/ee/src/lib/hooks/useEvaluationRunMetrics/assets/utils.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunMetrics/index.ts b/web/ee/src/lib/hooks/useEvaluationRunMetrics/index.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunMetrics/index.ts rename to web/ee/src/lib/hooks/useEvaluationRunMetrics/index.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunMetrics/types.ts b/web/ee/src/lib/hooks/useEvaluationRunMetrics/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunMetrics/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunMetrics/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunScenarioSteps/types.ts b/web/ee/src/lib/hooks/useEvaluationRunScenarioSteps/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunScenarioSteps/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunScenarioSteps/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunScenarios/index.ts b/web/ee/src/lib/hooks/useEvaluationRunScenarios/index.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunScenarios/index.ts rename to web/ee/src/lib/hooks/useEvaluationRunScenarios/index.ts diff --git a/web/oss/src/lib/hooks/useEvaluationRunScenarios/types.ts b/web/ee/src/lib/hooks/useEvaluationRunScenarios/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluationRunScenarios/types.ts rename to web/ee/src/lib/hooks/useEvaluationRunScenarios/types.ts diff --git a/web/oss/src/lib/hooks/useEvaluations.ts b/web/ee/src/lib/hooks/useEvaluations.ts similarity index 100% rename from web/oss/src/lib/hooks/useEvaluations.ts rename to web/ee/src/lib/hooks/useEvaluations.ts diff --git a/web/oss/src/lib/hooks/useInvocationResult/index.ts b/web/ee/src/lib/hooks/useInvocationResult/index.ts similarity index 99% rename from web/oss/src/lib/hooks/useInvocationResult/index.ts rename to web/ee/src/lib/hooks/useInvocationResult/index.ts index 201f709a48..a61fc59695 100644 --- a/web/oss/src/lib/hooks/useInvocationResult/index.ts +++ b/web/ee/src/lib/hooks/useInvocationResult/index.ts @@ -4,7 +4,7 @@ import {useAtomValue} from "jotai" import {renderChatMessages} from "@/oss/components/EvalRunDetails/assets/renderChatMessages" import {evalTypeAtom} from "@/oss/components/EvalRunDetails/state/evalType" -import {useRunId} from "@agenta/oss/src/contexts/RunIdContext" +import {useRunId} from "@/oss/contexts/RunIdContext" import axios from "@/oss/lib/api/assets/axiosConfig" import {snakeToCamelCaseKeys} from "@/oss/lib/helpers/casing" import {readInvocationResponse} from "@/oss/lib/helpers/traceUtils" diff --git a/web/oss/src/lib/hooks/useInvocationResult/types.ts b/web/ee/src/lib/hooks/useInvocationResult/types.ts similarity index 100% rename from web/oss/src/lib/hooks/useInvocationResult/types.ts rename to web/ee/src/lib/hooks/useInvocationResult/types.ts diff --git a/web/oss/src/lib/hooks/usePreviewEvaluations/assets/utils.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewEvaluations/assets/utils.ts rename to web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts diff --git a/web/oss/src/lib/hooks/usePreviewEvaluations/index.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/index.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewEvaluations/index.ts rename to web/ee/src/lib/hooks/usePreviewEvaluations/index.ts diff --git a/web/oss/src/lib/hooks/usePreviewEvaluations/projectVariantConfigs.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/projectVariantConfigs.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewEvaluations/projectVariantConfigs.ts rename to web/ee/src/lib/hooks/usePreviewEvaluations/projectVariantConfigs.ts diff --git a/web/oss/src/lib/hooks/usePreviewEvaluations/states/queryFilterAtoms.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/states/queryFilterAtoms.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewEvaluations/states/queryFilterAtoms.ts rename to web/ee/src/lib/hooks/usePreviewEvaluations/states/queryFilterAtoms.ts diff --git a/web/oss/src/lib/hooks/usePreviewEvaluations/types.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/types.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewEvaluations/types.ts rename to web/ee/src/lib/hooks/usePreviewEvaluations/types.ts diff --git a/web/oss/src/lib/hooks/usePreviewRunningEvaluations/index.ts b/web/ee/src/lib/hooks/usePreviewRunningEvaluations/index.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewRunningEvaluations/index.ts rename to web/ee/src/lib/hooks/usePreviewRunningEvaluations/index.ts diff --git a/web/oss/src/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom.ts b/web/ee/src/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom.ts similarity index 100% rename from web/oss/src/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom.ts rename to web/ee/src/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom.ts diff --git a/web/oss/src/lib/hooks/useRunMetricsMap/index.ts b/web/ee/src/lib/hooks/useRunMetricsMap/index.ts similarity index 100% rename from web/oss/src/lib/hooks/useRunMetricsMap/index.ts rename to web/ee/src/lib/hooks/useRunMetricsMap/index.ts diff --git a/web/oss/src/lib/metricColumnFactory.tsx b/web/ee/src/lib/metricColumnFactory.tsx similarity index 100% rename from web/oss/src/lib/metricColumnFactory.tsx rename to web/ee/src/lib/metricColumnFactory.tsx diff --git a/web/oss/src/lib/metricSorter.ts b/web/ee/src/lib/metricSorter.ts similarity index 100% rename from web/oss/src/lib/metricSorter.ts rename to web/ee/src/lib/metricSorter.ts diff --git a/web/oss/src/lib/metricUtils.ts b/web/ee/src/lib/metricUtils.ts similarity index 100% rename from web/oss/src/lib/metricUtils.ts rename to web/ee/src/lib/metricUtils.ts diff --git a/web/oss/src/lib/metrics/utils.ts b/web/ee/src/lib/metrics/utils.ts similarity index 100% rename from web/oss/src/lib/metrics/utils.ts rename to web/ee/src/lib/metrics/utils.ts diff --git a/web/oss/src/lib/tableUtils.ts b/web/ee/src/lib/tableUtils.ts similarity index 100% rename from web/oss/src/lib/tableUtils.ts rename to web/ee/src/lib/tableUtils.ts diff --git a/web/oss/src/lib/types_ee.ts b/web/ee/src/lib/types_ee.ts similarity index 100% rename from web/oss/src/lib/types_ee.ts rename to web/ee/src/lib/types_ee.ts diff --git a/web/oss/src/lib/evalRunner/bulkWorker.ts b/web/ee/src/lib/workers/evalRunner/bulkWorker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/bulkWorker.ts rename to web/ee/src/lib/workers/evalRunner/bulkWorker.ts diff --git a/web/oss/src/lib/evalRunner/evalRunner.worker.ts b/web/ee/src/lib/workers/evalRunner/evalRunner.worker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/evalRunner.worker.ts rename to web/ee/src/lib/workers/evalRunner/evalRunner.worker.ts diff --git a/web/oss/src/lib/evalRunner/fetchRunMetrics.worker.ts b/web/ee/src/lib/workers/evalRunner/fetchRunMetrics.worker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/fetchRunMetrics.worker.ts rename to web/ee/src/lib/workers/evalRunner/fetchRunMetrics.worker.ts diff --git a/web/oss/src/lib/evalRunner/fetchSteps.worker.ts b/web/ee/src/lib/workers/evalRunner/fetchSteps.worker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/fetchSteps.worker.ts rename to web/ee/src/lib/workers/evalRunner/fetchSteps.worker.ts diff --git a/web/oss/src/lib/evalRunner/pureEnrichment.ts b/web/ee/src/lib/workers/evalRunner/pureEnrichment.ts similarity index 100% rename from web/oss/src/lib/evalRunner/pureEnrichment.ts rename to web/ee/src/lib/workers/evalRunner/pureEnrichment.ts diff --git a/web/oss/src/lib/evalRunner/runMetricsWorker.ts b/web/ee/src/lib/workers/evalRunner/runMetricsWorker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/runMetricsWorker.ts rename to web/ee/src/lib/workers/evalRunner/runMetricsWorker.ts diff --git a/web/oss/src/lib/evalRunner/scenarioListWorker.ts b/web/ee/src/lib/workers/evalRunner/scenarioListWorker.ts similarity index 100% rename from web/oss/src/lib/evalRunner/scenarioListWorker.ts rename to web/ee/src/lib/workers/evalRunner/scenarioListWorker.ts diff --git a/web/oss/src/lib/evalRunner/types.ts b/web/ee/src/lib/workers/evalRunner/types.ts similarity index 89% rename from web/oss/src/lib/evalRunner/types.ts rename to web/ee/src/lib/workers/evalRunner/types.ts index 16f1173255..1b98796efd 100644 --- a/web/oss/src/lib/evalRunner/types.ts +++ b/web/ee/src/lib/workers/evalRunner/types.ts @@ -1,6 +1,6 @@ import {EvaluationStatus} from "@/oss/lib/Types" -import {IStepResponse} from "@agenta/oss/src/lib/hooks/useEvaluationRunScenarioSteps/types" +import {IStepResponse} from "../../hooks/useEvaluationRunScenarioSteps/types" export interface RunEvalMessage { type: "run-invocation" diff --git a/web/oss/src/lib/evalRunner/workerFetch.ts b/web/ee/src/lib/workers/evalRunner/workerFetch.ts similarity index 98% rename from web/oss/src/lib/evalRunner/workerFetch.ts rename to web/ee/src/lib/workers/evalRunner/workerFetch.ts index 2cf69dc70b..2e45a07575 100644 --- a/web/oss/src/lib/evalRunner/workerFetch.ts +++ b/web/ee/src/lib/workers/evalRunner/workerFetch.ts @@ -20,8 +20,8 @@ import {PreviewTestcase, PreviewTestset} from "@/oss/lib/Types" import { deserializeRunIndex, RunIndex, -} from "@agenta/oss/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex" -import {EvalRunDataContextType} from "@agenta/oss/src/lib/hooks/useEvaluationRunData/types" +} from "../../hooks/useEvaluationRunData/assets/helpers/buildRunIndex" +import {EvalRunDataContextType} from "../../hooks/useEvaluationRunData/types" import { buildScenarioCore, diff --git a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx index 97047a940b..df1b8461be 100644 --- a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx +++ b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx @@ -1,3 +1,20 @@ -import EvaluatorConfigureRoute from "@agenta/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id]" +import {useMemo} from "react" + +import {useRouter} from "next/router" + +import ConfigureEvaluatorPage from "@/oss/components/Evaluators/components/ConfigureEvaluator" + +const EvaluatorConfigureRoute = () => { + const router = useRouter() + const evaluatorId = useMemo(() => { + const id = router.query.evaluator_id + if (Array.isArray(id)) { + return id[0] + } + return id ?? null + }, [router.query.evaluator_id]) + + return +} export default EvaluatorConfigureRoute diff --git a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx index a5cb6daf29..7996228a65 100644 --- a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx +++ b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx @@ -1,3 +1,7 @@ -import ProjectEvaluatorsPage from "@agenta/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index" +import EvaluatorsRegistry from "@/oss/components/Evaluators" + +const ProjectEvaluatorsPage = () => { + return +} export default ProjectEvaluatorsPage diff --git a/web/oss/src/services/evaluationRuns/api/index.ts b/web/ee/src/services/evaluationRuns/api/index.ts similarity index 100% rename from web/oss/src/services/evaluationRuns/api/index.ts rename to web/ee/src/services/evaluationRuns/api/index.ts diff --git a/web/oss/src/services/evaluationRuns/api/types.ts b/web/ee/src/services/evaluationRuns/api/types.ts similarity index 100% rename from web/oss/src/services/evaluationRuns/api/types.ts rename to web/ee/src/services/evaluationRuns/api/types.ts diff --git a/web/ee/src/services/evaluationRuns/utils.ts b/web/ee/src/services/evaluationRuns/utils.ts new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/oss/src/services/evaluations/api/index.ts b/web/ee/src/services/evaluations/api/index.ts similarity index 100% rename from web/oss/src/services/evaluations/api/index.ts rename to web/ee/src/services/evaluations/api/index.ts diff --git a/web/oss/src/services/evaluations/api_ee/index.ts b/web/ee/src/services/evaluations/api_ee/index.ts similarity index 97% rename from web/oss/src/services/evaluations/api_ee/index.ts rename to web/ee/src/services/evaluations/api_ee/index.ts index cab7fe809b..4ae4376f4d 100644 --- a/web/oss/src/services/evaluations/api_ee/index.ts +++ b/web/ee/src/services/evaluations/api_ee/index.ts @@ -14,7 +14,7 @@ import { EvaluatorMappingInput, EvaluatorMappingOutput, EvaluatorOutputInterface, -} from "@agenta/oss/src/lib/types_ee" +} from "../../../lib/types_ee" export const createEvaluatorDataMapping = async ( config: EvaluatorMappingInput, diff --git a/web/oss/src/services/evaluations/workerUtils.ts b/web/ee/src/services/evaluations/workerUtils.ts similarity index 100% rename from web/oss/src/services/evaluations/workerUtils.ts rename to web/ee/src/services/evaluations/workerUtils.ts diff --git a/web/oss/src/services/human-evaluations/api/index.ts b/web/ee/src/services/human-evaluations/api/index.ts similarity index 100% rename from web/oss/src/services/human-evaluations/api/index.ts rename to web/ee/src/services/human-evaluations/api/index.ts diff --git a/web/oss/src/services/human-evaluations/hooks/useEvaluationResults.ts b/web/ee/src/services/human-evaluations/hooks/useEvaluationResults.ts similarity index 100% rename from web/oss/src/services/human-evaluations/hooks/useEvaluationResults.ts rename to web/ee/src/services/human-evaluations/hooks/useEvaluationResults.ts diff --git a/web/ee/src/services/onlineEvaluations/api.ts b/web/ee/src/services/onlineEvaluations/api.ts new file mode 100644 index 0000000000..e0650b45d9 --- /dev/null +++ b/web/ee/src/services/onlineEvaluations/api.ts @@ -0,0 +1,188 @@ +import axios from "@/oss/lib/api/assets/axiosConfig" +import {getAgentaApiUrl} from "@/oss/lib/helpers/api" +import {getProjectValues} from "@/oss/state/project" + +type LogicalOperator = "and" | "or" | "not" | "nand" | "nor" + +export interface QueryConditionPayload { + field: string + key?: string + value?: unknown + operator?: string + options?: Record +} + +export interface QueryFilteringPayload { + operator?: LogicalOperator + conditions: (QueryConditionPayload | QueryFilteringPayload)[] +} + +export interface QueryWindowingPayload { + newest?: string + oldest?: string + next?: string + limit?: number + order?: "ascending" | "descending" + interval?: number + rate?: number +} + +export interface QueryRevisionDataPayload { + filtering?: QueryFilteringPayload + windowing?: QueryWindowingPayload +} + +export interface SimpleQueryCreatePayload { + slug: string + name?: string + description?: string + flags?: Record + tags?: Record + meta?: Record + data?: QueryRevisionDataPayload +} + +export interface SimpleQueryCreateRequest { + query: SimpleQueryCreatePayload +} + +export interface SimpleQueryResponse { + count: number + query?: { + id: string + slug?: string + data?: QueryRevisionDataPayload + meta?: Record + } | null +} + +export interface QueryRevisionRetrieveRequest { + query_ref?: {id?: string; slug?: string} | null + query_variant_ref?: {id?: string; slug?: string} | null + query_revision_ref?: {id?: string; slug?: string} | null +} + +export interface QueryRevisionResponse { + count: number + query_revision?: { + id?: string + slug?: string + variant_id?: string + version?: string | number + data?: QueryRevisionDataPayload + } | null +} + +export interface SimpleEvaluationFlagsPayload { + is_live?: boolean + is_closed?: boolean + is_active?: boolean +} + +export interface SimpleEvaluationDataPayload { + status?: string + query_steps?: string[] | Record + testset_steps?: Record + application_steps?: Record + evaluator_steps?: string[] | Record + repeats?: number + // Structured references for online evaluations + query_ref?: {id?: string; slug?: string} | null + query_revision_ref?: {id?: string; slug?: string} | null + evaluator_ref?: {id?: string; slug?: string} | null + configuration?: Record +} + +export interface SimpleEvaluationCreatePayload { + name?: string + description?: string + flags?: SimpleEvaluationFlagsPayload + tags?: Record + meta?: Record + data: SimpleEvaluationDataPayload +} + +export interface SimpleEvaluationCreateRequest { + evaluation: SimpleEvaluationCreatePayload +} + +export interface SimpleEvaluationResponse { + count: number + evaluation?: SimpleEvaluationPayload | null +} + +export interface SimpleEvaluationPayload { + id?: string + slug?: string + name?: string + description?: string + created_at?: string + updated_at?: string + created_by_id?: string + updated_by_id?: string + flags?: SimpleEvaluationFlagsPayload + data?: SimpleEvaluationDataPayload + meta?: Record + tags?: Record +} + +export interface SimpleEvaluationsResponse { + count: number + evaluations: SimpleEvaluationPayload[] +} + +export interface SimpleEvaluationsQueryRequest { + evaluation?: { + flags?: SimpleEvaluationFlagsPayload + ids?: string[] + } + tags?: Record + meta?: Record +} + +const getProjectUrl = (path: string) => { + const {projectId} = getProjectValues() + return `${getAgentaApiUrl()}${path}?project_id=${projectId}` +} + +export const createSimpleQuery = async ( + payload: SimpleQueryCreateRequest, +): Promise => { + const {data} = await axios.post(getProjectUrl("/preview/simple/queries/"), payload) + return data as SimpleQueryResponse +} + +export const retrieveQueryRevision = async ( + payload: QueryRevisionRetrieveRequest, +): Promise => { + const {data} = await axios.post(getProjectUrl("/preview/queries/revisions/retrieve"), payload) + return data as QueryRevisionResponse +} + +export const createSimpleEvaluation = async ( + payload: SimpleEvaluationCreateRequest, +): Promise => { + const {data} = await axios.post(getProjectUrl("/preview/simple/evaluations/"), payload) + return data as SimpleEvaluationResponse +} + +export const querySimpleEvaluations = async ( + payload?: SimpleEvaluationsQueryRequest, +): Promise => { + const url = getProjectUrl("/preview/simple/evaluations/query") + const body = payload ?? {} + const {data} = await axios.post(url, body) + return data as SimpleEvaluationsResponse +} + +export const stopSimpleEvaluation = async (evaluationId: string) => { + const url = getProjectUrl(`/preview/simple/evaluations/${evaluationId}/stop`) + const {data} = await axios.post(url) + return data +} + +export const startSimpleEvaluation = async (evaluationId: string) => { + const url = getProjectUrl(`/preview/simple/evaluations/${evaluationId}/start`) + const {data} = await axios.post(url) + return data +} diff --git a/web/ee/src/services/promptVersioning/api/index.ts b/web/ee/src/services/promptVersioning/api/index.ts new file mode 100644 index 0000000000..d51cd8ac75 --- /dev/null +++ b/web/ee/src/services/promptVersioning/api/index.ts @@ -0,0 +1,41 @@ +import axios from "@/oss/lib/api/assets/axiosConfig" +import {getAgentaApiUrl} from "@/oss/lib/helpers/api" +import {getProjectValues} from "@/oss/state/project" + +//Prefix convention: +// - fetch: GET single entity from server +// - fetchAll: GET all entities from server +// - create: POST data to server +// - update: PUT data to server +// - delete: DELETE data from server + +// versioning +export const fetchAllPromptVersioning = async (variantId: string, ignoreAxiosError = false) => { + const {projectId} = getProjectValues() + + const {data} = await axios.get( + `${getAgentaApiUrl()}/variants/${variantId}/revisions?project_id=${projectId}`, + { + _ignoreError: ignoreAxiosError, + } as any, + ) + + return data +} + +export const fetchPromptRevision = async ( + variantId: string, + revisionNumber: number, + ignoreAxiosError = false, +) => { + const {projectId} = getProjectValues() + + const {data} = await axios.get( + `${getAgentaApiUrl()}/variants/${variantId}/revisions/${revisionNumber}?project_id=${projectId}`, + { + _ignoreError: ignoreAxiosError, + } as any, + ) + + return data +} diff --git a/web/oss/src/services/runMetrics/api/assets/contants.ts b/web/ee/src/services/runMetrics/api/assets/contants.ts similarity index 100% rename from web/oss/src/services/runMetrics/api/assets/contants.ts rename to web/ee/src/services/runMetrics/api/assets/contants.ts diff --git a/web/oss/src/services/runMetrics/api/index.ts b/web/ee/src/services/runMetrics/api/index.ts similarity index 100% rename from web/oss/src/services/runMetrics/api/index.ts rename to web/ee/src/services/runMetrics/api/index.ts diff --git a/web/oss/src/services/runMetrics/api/types.ts b/web/ee/src/services/runMetrics/api/types.ts similarity index 100% rename from web/oss/src/services/runMetrics/api/types.ts rename to web/ee/src/services/runMetrics/api/types.ts diff --git a/web/oss/src/services/variantConfigs/api/index.ts b/web/ee/src/services/variantConfigs/api/index.ts similarity index 100% rename from web/oss/src/services/variantConfigs/api/index.ts rename to web/ee/src/services/variantConfigs/api/index.ts diff --git a/web/ee/src/state/observability/dashboard.ts b/web/ee/src/state/observability/dashboard.ts index 6d040b22d7..e8c0215a71 100644 --- a/web/ee/src/state/observability/dashboard.ts +++ b/web/ee/src/state/observability/dashboard.ts @@ -3,9 +3,9 @@ import {eagerAtom} from "jotai-eager" import {atomWithQuery} from "jotai-tanstack-query" import {GenerationDashboardData} from "@/oss/lib/types_ee" -import {fetchGenerationsDashboardData} from "@/oss/services/tracing/api" import {routerAppIdAtom} from "@/oss/state/app/atoms/fetcher" import {projectIdAtom} from "@/oss/state/project" +import {fetchGenerationsDashboardData} from "@/oss/services/tracing/api" const DEFAULT_RANGE = "30_days" diff --git a/web/oss/src/state/url/focusDrawer.ts b/web/ee/src/state/url/focusDrawer.ts similarity index 100% rename from web/oss/src/state/url/focusDrawer.ts rename to web/ee/src/state/url/focusDrawer.ts diff --git a/web/oss/next.config.ts b/web/oss/next.config.ts index fd65177200..be4f43f167 100644 --- a/web/oss/next.config.ts +++ b/web/oss/next.config.ts @@ -1,13 +1,7 @@ -import {createRequire} from "module" import path from "path" import type {NextConfig} from "next" -const require = createRequire(import.meta.url) -const reduxToolkitCjsEntry = path.join( - path.dirname(require.resolve("@reduxjs/toolkit/package.json")), - "dist/cjs/index.js", -) const isDevelopment = process.env.NODE_ENV === "development" const COMMON_CONFIG: NextConfig = { @@ -68,12 +62,6 @@ const COMMON_CONFIG: NextConfig = { "@ant-design/icons-svg", ], webpack: (config, {webpack, isServer}) => { - config.resolve ??= {} - config.resolve.alias = { - ...(config.resolve.alias ?? {}), - "@reduxjs/toolkit": reduxToolkitCjsEntry, - } - const envs: Record = {} config.cache = false diff --git a/web/oss/package.json b/web/oss/package.json index e5e89e0037..eebe2ffeba 100644 --- a/web/oss/package.json +++ b/web/oss/package.json @@ -1,6 +1,6 @@ { "name": "@agenta/oss", - "version": "0.62.1", + "version": "0.61.0", "private": true, "engines": { "node": ">=18" @@ -113,7 +113,6 @@ "react-resizable": "^3.0.5", "react-syntax-highlighter": "^15.6.0", "react-window": "^1.8.11", - "recharts": "^3.1.0", "semver": "^7.7.2", "shiki": "^3.12.2", "stable-hash": "^0.0.6", diff --git a/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx b/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx index 2b52c69b21..dd8fa3cbbd 100644 --- a/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx +++ b/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx @@ -55,14 +55,14 @@ export const useSidebarConfig = () => { key: "project-evaluators-link", title: "Evaluators", link: `${projectURL}/evaluators`, - // isHidden: !isDemo(), + isHidden: !isDemo(), icon: , }, { key: "project-evaluations-link", title: "Evaluations", link: `${projectURL}/evaluations`, - // isHidden: !isDemo(), + isHidden: !isDemo(), icon: , }, { @@ -96,7 +96,7 @@ export const useSidebarConfig = () => { key: "app-evaluations-link", title: "Evaluations", link: `${appURL || recentlyVisitedAppURL}/evaluations`, - isHidden: !currentApp && !recentlyVisitedAppId, + isHidden: (!currentApp && !recentlyVisitedAppId) || !isDemo(), icon: , }, { diff --git a/web/oss/src/components/TestsetTable/TestsetTable.tsx b/web/oss/src/components/TestsetTable/TestsetTable.tsx index 2235739473..17fa756fa9 100644 --- a/web/oss/src/components/TestsetTable/TestsetTable.tsx +++ b/web/oss/src/components/TestsetTable/TestsetTable.tsx @@ -11,8 +11,8 @@ import { import {type IHeaderParams} from "@ag-grid-community/core" import {CheckCircleFilled} from "@ant-design/icons" -import {Link} from "@phosphor-icons/react" import {Button, Input, Typography, message, Space, Tag, Tooltip} from "antd" +import {Link} from "@phosphor-icons/react" import {NoticeType} from "antd/es/message/interface" import {AxiosResponse} from "axios" import {useRouter} from "next/router" diff --git a/web/oss/src/lib/helpers/evaluate.ts b/web/oss/src/lib/helpers/evaluate.ts index c3c280f3e1..b172237700 100644 --- a/web/oss/src/lib/helpers/evaluate.ts +++ b/web/oss/src/lib/helpers/evaluate.ts @@ -1,341 +1,3 @@ -import {EvaluationType} from "@agenta/oss/src/lib/enums" -import {convertToCsv, downloadCsv} from "@agenta/oss/src/lib/helpers/fileManipulations" -import {formatCurrency, formatLatency} from "@agenta/oss/src/lib/helpers/formatters" -import {isDemo} from "@agenta/oss/src/lib/helpers/utils" -import { - Evaluation, - GenericObject, - TypedValue, - Variant, - _Evaluation, - EvaluationScenario, -} from "@agenta/oss/src/lib/Types" -import dayjs from "dayjs" -import capitalize from "lodash/capitalize" -import round from "lodash/round" - -import AlertPopup from "@/oss/components/AlertPopup/AlertPopup" -import {runningStatuses} from "@/oss/components/pages/evaluations/cellRenderers/cellRenderers" -import { - HumanEvaluationListTableDataType, - SingleModelEvaluationListTableDataType, -} from "@/oss/lib/Types" -import {fetchEvaluatonIdsByResource} from "@/oss/services/evaluations/api" - -export const exportABTestingEvaluationData = ( - evaluation: Evaluation, - scenarios: EvaluationScenario[], - rows: GenericObject[], -) => { - const exportRow = rows.map((data, ix) => { - const inputColumns = evaluation.testset.testsetChatColumn - ? {Input: evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn]} - : data.inputs.reduce( - (columns: any, input: {input_name: string; input_value: string}) => { - columns[`${input.input_name}`] = input.input_value - return columns - }, - {}, - ) - return { - ...inputColumns, - [`App Variant ${evaluation.variants[0].variantName} Output 0`]: data?.columnData0 - ? data?.columnData0 - : data.outputs[0]?.variant_output, - [`App Variant ${evaluation.variants[1].variantName} Output 1`]: data?.columnData1 - ? data?.columnData1 - : data.outputs[1]?.variant_output, - ["Vote"]: - evaluation.variants.find((v: Variant) => v.variantId === data.vote)?.variantName || - data.vote, - ["Expected Output"]: - scenarios[ix]?.correctAnswer || evaluation.testset.csvdata[ix].correct_answer, - ["Additional notes"]: scenarios[ix]?.note, - } - }) - const exportCol = Object.keys(exportRow[0]) - - const csvData = convertToCsv(exportRow, exportCol) - const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.variants[1].variantName}_${evaluation.evaluationType}.csv` - downloadCsv(csvData, filename) -} - -export const exportSingleModelEvaluationData = ( - evaluation: Evaluation, - scenarios: EvaluationScenario[], - rows: GenericObject[], -) => { - const exportRow = rows.map((data, ix) => { - const inputColumns = evaluation.testset.testsetChatColumn - ? {Input: evaluation.testset.csvdata[ix]?.[evaluation.testset.testsetChatColumn]} - : data.inputs.reduce( - (columns: any, input: {input_name: string; input_value: string}) => { - columns[`${input.input_name}`] = input.input_value - return columns - }, - {}, - ) - const numericScore = parseInt(data.score) - return { - ...inputColumns, - [`App Variant ${evaluation.variants[0].variantName} Output 0`]: data?.columnData0 - ? data?.columnData0 - : data.outputs[0]?.variant_output, - ["Score"]: isNaN(numericScore) ? "-" : numericScore, - ["Expected Output"]: - scenarios[ix]?.correctAnswer || evaluation.testset.csvdata[ix].correct_answer, - ["Additional notes"]: scenarios[ix]?.note, - } - }) - const exportCol = Object.keys(exportRow[0]) - - const csvData = convertToCsv(exportRow, exportCol) - const filename = `${evaluation.appName}_${evaluation.variants[0].variantName}_${evaluation.evaluationType}.csv` - downloadCsv(csvData, filename) -} - -export const calculateResultsDataAvg = (resultsData: Record, multiplier = 10) => { - const obj = {...resultsData} - Object.keys(obj).forEach((key) => { - if (isNaN(+key)) delete obj[key] - }) - - const count = Object.values(obj).reduce((acc, value) => acc + +value, 0) - const sum = Object.keys(obj).reduce((acc, key) => acc + (parseFloat(key) || 0) * +obj[key], 0) - return (sum / count) * multiplier -} - -export const getVotesPercentage = (record: HumanEvaluationListTableDataType, index: number) => { - const variant = record.votesData.variants[index] - return record.votesData.variants_votes_data[variant]?.percentage -} - -export const checkIfResourceValidForDeletion = async ( - data: Omit[0], "appId">, -) => { - if (isDemo()) { - const response = await fetchEvaluatonIdsByResource(data) - if (response.data.length > 0) { - const name = - (data.resourceType === "testset" - ? "Testset" - : data.resourceType === "evaluator_config" - ? "Evaluator" - : "Variant") + (data.resourceIds.length > 1 ? "s" : "") - - const suffix = response.data.length > 1 ? "s" : "" - AlertPopup({ - title: `${name} is in use`, - message: `The ${name} is currently in used by ${response.data.length} evaluation${suffix}. Please delete the evaluation${suffix} first.`, - cancelText: null, - okText: "Ok", - }) - return false - } - } +export const checkIfResourceValidForDeletion = async (data: any) => { return true } - -export function getTypedValue(res?: TypedValue) { - const {value, type, error} = res || {} - if (type === "error") { - return error?.message - } - - if (value === undefined) return "-" - - switch (type) { - case "number": - return round(Number(value), 2) - case "boolean": - case "bool": - return capitalize(value?.toString()) - case "cost": - return formatCurrency(Number(value)) - case "latency": - return formatLatency(Number(value)) - case "string": - case "text": - return value?.toString() ?? "-" - case "code": - case "regex": - return value?.toString() ?? "-" - case "object": - return typeof value === "object" - ? JSON.stringify(value, null, 2) - : (value?.toString() ?? "-") - case "messages": - return Array.isArray(value) - ? value - .map((msg) => (typeof msg === "string" ? msg : JSON.stringify(msg))) - .join("\n") - : (value?.toString() ?? "-") - case "multiple_choice": - return Array.isArray(value) ? value.join(", ") : (value?.toString() ?? "-") - case "hidden": - return "-" - default: - return value?.toString() ?? "-" - } -} - -type CellDataType = "number" | "text" | "date" -export function getFilterParams(type: CellDataType) { - const filterParams: GenericObject = {} - if (type == "date") { - filterParams.comparator = function ( - filterLocalDateAtMidnight: Date, - cellValue: string | null, - ) { - if (cellValue == null) return -1 - const cellDate = dayjs(cellValue).startOf("day").toDate() - if (filterLocalDateAtMidnight.getTime() === cellDate.getTime()) { - return 0 - } - if (cellDate < filterLocalDateAtMidnight) { - return -1 - } - if (cellDate > filterLocalDateAtMidnight) { - return 1 - } - } - } - - return { - sortable: true, - floatingFilter: true, - filter: - type === "number" - ? "agNumberColumnFilter" - : type === "date" - ? "agDateColumnFilter" - : "agTextColumnFilter", - cellDataType: type === "number" ? "text" : type, - filterParams, - comparator: getCustomComparator(type), - } -} - -export const calcEvalDuration = (evaluation: _Evaluation) => { - return dayjs( - runningStatuses.includes(evaluation.status.value) ? Date.now() : evaluation.updated_at, - ).diff(dayjs(evaluation.created_at), "milliseconds") -} - -const getCustomComparator = (type: CellDataType) => (valueA: string, valueB: string) => { - const getNumber = (val: string) => { - const num = parseFloat(val || "0") - return isNaN(num) ? 0 : num - } - - valueA = String(valueA) - valueB = String(valueB) - - switch (type) { - case "date": - return dayjs(valueA).diff(dayjs(valueB)) - case "text": - return valueA.localeCompare(valueB) - case "number": - return getNumber(valueA) - getNumber(valueB) - default: - return 0 - } -} - -export const removeCorrectAnswerPrefix = (str: string) => { - return str.replace(/^correctAnswer_/, "") -} - -export const mapTestcaseAndEvalValues = ( - settingsValues: Record, - selectedTestcase: Record, -) => { - const testcaseObj: Record = {} - const evalMapObj: Record = {} - - Object.entries(settingsValues).forEach(([key, value]) => { - if (typeof value === "string" && value.startsWith("testcase.")) { - testcaseObj[key] = selectedTestcase[value.split(".")[1]] - } else { - evalMapObj[key] = value - } - }) - - return {testcaseObj, evalMapObj} -} - -export const transformTraceKeysInSettings = ( - settingsValues: Record, -): Record => { - return Object.keys(settingsValues).reduce( - (acc, curr) => { - if ( - !acc[curr] && - typeof settingsValues[curr] === "string" && - settingsValues[curr].startsWith("trace.") - ) { - acc[curr] = settingsValues[curr].replace("trace.", "") - } else { - acc[curr] = settingsValues[curr] - } - - return acc - }, - {} as Record, - ) -} - -export const getEvaluatorTags = () => { - const evaluatorTags = [ - { - label: "Classifiers", - value: "classifiers", - }, - { - label: "Similarity", - value: "similarity", - }, - { - label: "AI / LLM", - value: "ai_llm", - }, - { - label: "Functional", - value: "functional", - }, - ] - - if (isDemo()) { - evaluatorTags.unshift({ - label: "RAG", - value: "rag", - }) - } - - return evaluatorTags -} - -export const calculateAvgScore = (evaluation: SingleModelEvaluationListTableDataType) => { - let score = 0 - if (evaluation.scoresData) { - score = - ((evaluation.scoresData.correct?.length || evaluation.scoresData.true?.length || 0) / - evaluation.scoresData.nb_of_rows) * - 100 - } else if (evaluation.resultsData) { - const multiplier = { - [EvaluationType.auto_webhook_test]: 100, - [EvaluationType.single_model_test]: 1, - } - score = calculateResultsDataAvg( - evaluation.resultsData, - multiplier[evaluation.evaluationType as keyof typeof multiplier], - ) - score = isNaN(score) ? 0 : score - } else if (evaluation.avgScore) { - score = evaluation.avgScore * 100 - } - - return score -} diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/human_a_b_testing/[evaluation_id]/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/human_a_b_testing/[evaluation_id]/index.tsx deleted file mode 100644 index 76e6526898..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/human_a_b_testing/[evaluation_id]/index.tsx +++ /dev/null @@ -1,115 +0,0 @@ -import {useEffect, useState} from "react" - -import {useAtom, useAtomValue} from "jotai" -import dynamic from "next/dynamic" -import {useRouter} from "next/router" - -// Avoid SSR for this heavy component to prevent server-side ReferenceErrors from client-only libs -const ABTestingEvaluationTable = dynamic( - () => import("@/oss/components/EvaluationTable/ABTestingEvaluationTable"), - {ssr: false}, -) -import useURL from "@/oss/hooks/useURL" -import {evaluationAtom, evaluationScenariosAtom} from "@/oss/lib/atoms/evaluation" -import {getTestsetChatColumn} from "@/oss/lib/helpers/testset" -import {useBreadcrumbsEffect} from "@/oss/lib/hooks/useBreadcrumbs" -import type {Evaluation} from "@/oss/lib/Types" -import { - fetchLoadEvaluation, - fetchAllLoadEvaluationsScenarios, -} from "@/oss/services/human-evaluations/api" -import {fetchTestset} from "@/oss/services/testsets/api" -import {projectIdAtom} from "@/oss/state/project" -import {variantsAtom} from "@/oss/state/variant/atoms/fetcher" - -export default function Evaluation() { - const router = useRouter() - const projectId = useAtomValue(projectIdAtom) - const evaluationTableId = router.query.evaluation_id - ? router.query.evaluation_id.toString() - : "" - const [evaluationScenarios, setEvaluationScenarios] = useAtom(evaluationScenariosAtom) - const [evaluation, setEvaluation] = useAtom(evaluationAtom) - const [isLoading, setIsLoading] = useState(true) - const appId = router.query.app_id as string - const columnsCount = 2 - const {baseAppURL} = useURL() - // variants from global store - const variantsStore = useAtomValue(variantsAtom) - - useEffect(() => { - if (!evaluation || !projectId) { - return - } - const init = async () => { - setIsLoading(true) - try { - const data = await fetchAllLoadEvaluationsScenarios(evaluationTableId, evaluation) - setEvaluationScenarios(data) - } finally { - setTimeout(() => setIsLoading(false), 1000) - } - } - init() - }, [evaluation, projectId]) - - useEffect(() => { - if (!evaluationTableId) { - return - } - const init = async () => { - const evaluation: Evaluation = await fetchLoadEvaluation(evaluationTableId) - const backendVariants = variantsStore - const testset = await fetchTestset(evaluation.testset._id) - // Create a map for faster access to first array elements - const backendVariantsMap = new Map() - backendVariants.forEach((obj) => backendVariantsMap.set(obj.variantId, obj)) - - // Update variants in second object - evaluation.variants = evaluation.variants.map((variant) => { - const backendVariant = backendVariantsMap.get(variant.variantId) - return backendVariant ? backendVariant : variant - }) - evaluation.testset = { - ...evaluation.testset, - ...testset, - testsetChatColumn: getTestsetChatColumn(testset.csvdata), - } - setEvaluation(evaluation) - } - - init() - }, [evaluationTableId]) - - // breadcrumbs - useBreadcrumbsEffect( - { - breadcrumbs: { - appPage: { - label: "human ab testing", - href: `${baseAppURL}/${appId}/evaluations?selectedEvaluation=human_ab_testing`, - }, - "eval-detail": { - label: evaluationTableId, - value: evaluationTableId, - }, - }, - type: "append", - condition: !!evaluationTableId, - }, - [evaluationTableId], - ) - - return ( -
- {evaluationTableId && evaluationScenarios && evaluation && ( - - )} -
- ) -} diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/index.tsx deleted file mode 100644 index 5f9c0ce406..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvaluationsView from "@/oss/components/pages/evaluations/EvaluationsView" - -const AppEvaluationsPage = () => { - return -} - -export default AppEvaluationsPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx deleted file mode 100644 index 8a3e7e4523..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import {useRouter} from "next/router" - -import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" - -const AppEvaluationResultsPage = () => { - const router = useRouter() - const rawType = - (Array.isArray(router.query.eval_type) - ? router.query.eval_type[0] - : router.query.eval_type) || - (Array.isArray(router.query.type) ? router.query.type[0] : router.query.type) - const normalized = - rawType === "online" - ? "online" - : rawType === "human" - ? "human" - : rawType === "custom" - ? "custom" - : "auto" - return -} - -export default AppEvaluationResultsPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/compare/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/compare/index.tsx deleted file mode 100644 index 9a24e505d7..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/compare/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvaluationCompare from "@/oss/components/pages/evaluations/evaluationCompare/EvaluationCompare" - -const EvaluationCompareDetails = () => { - return -} - -export default EvaluationCompareDetails diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/single_model_test/[evaluation_id]/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/single_model_test/[evaluation_id]/index.tsx deleted file mode 100644 index 209e1772ec..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/single_model_test/[evaluation_id]/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" - -const EvaluationPage = () => { - return -} - -export default EvaluationPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/overview/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/overview/index.tsx index 9afd259da5..baa1f07530 100644 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/overview/index.tsx +++ b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/overview/index.tsx @@ -15,6 +15,7 @@ import {openEditAppModalAtom} from "@/oss/components/pages/app-management/modals import DeploymentOverview from "@/oss/components/pages/overview/deployments/DeploymentOverview" import VariantsOverview from "@/oss/components/pages/overview/variants/VariantsOverview" import useURL from "@/oss/hooks/useURL" +import {isDemo} from "@/oss/lib/helpers/utils" import type {JSSTheme} from "@/oss/lib/Types" import {deleteApp} from "@/oss/services/app-selector/api" import {useEnvironments} from "@/oss/services/deployment/hooks/useEnvironments" @@ -125,10 +126,13 @@ const OverviewPage = () => { - - - - + {isDemo() && ( + <> + + + + + )} { - return -} - -export default ProjectEvaluationsPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx deleted file mode 100644 index 8cabe50e3a..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import {useRouter} from "next/router" - -import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" - -const ProjectEvaluationResultsPage = () => { - const router = useRouter() - const rawType = - (Array.isArray(router.query.eval_type) - ? router.query.eval_type[0] - : router.query.eval_type) || - (Array.isArray(router.query.type) ? router.query.type[0] : router.query.type) - const normalized = - rawType === "online" - ? "online" - : rawType === "human" - ? "human" - : rawType === "custom" - ? "custom" - : "auto" - return -} - -export default ProjectEvaluationResultsPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/compare/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/compare/index.tsx deleted file mode 100644 index 4fc96755ce..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/compare/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvaluationCompare from "@/oss/components/pages/evaluations/evaluationCompare/EvaluationCompare" - -const ProjectEvaluationCompareDetails = () => { - return -} - -export default ProjectEvaluationCompareDetails diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/single_model_test/[evaluation_id]/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/single_model_test/[evaluation_id]/index.tsx deleted file mode 100644 index 67c0827984..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluations/single_model_test/[evaluation_id]/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" - -const ProjectHumanEvaluationPage = () => { - return -} - -export default ProjectHumanEvaluationPage diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx deleted file mode 100644 index df1b8461be..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/configure/[evaluator_id].tsx +++ /dev/null @@ -1,20 +0,0 @@ -import {useMemo} from "react" - -import {useRouter} from "next/router" - -import ConfigureEvaluatorPage from "@/oss/components/Evaluators/components/ConfigureEvaluator" - -const EvaluatorConfigureRoute = () => { - const router = useRouter() - const evaluatorId = useMemo(() => { - const id = router.query.evaluator_id - if (Array.isArray(id)) { - return id[0] - } - return id ?? null - }, [router.query.evaluator_id]) - - return -} - -export default EvaluatorConfigureRoute diff --git a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx b/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx deleted file mode 100644 index 7996228a65..0000000000 --- a/web/oss/src/pages/w/[workspace_id]/p/[project_id]/evaluators/index.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import EvaluatorsRegistry from "@/oss/components/Evaluators" - -const ProjectEvaluatorsPage = () => { - return -} - -export default ProjectEvaluatorsPage diff --git a/web/oss/src/services/onlineEvaluations/api.ts b/web/oss/src/services/onlineEvaluations/api.ts index e0650b45d9..3ffa241891 100644 --- a/web/oss/src/services/onlineEvaluations/api.ts +++ b/web/oss/src/services/onlineEvaluations/api.ts @@ -2,179 +2,11 @@ import axios from "@/oss/lib/api/assets/axiosConfig" import {getAgentaApiUrl} from "@/oss/lib/helpers/api" import {getProjectValues} from "@/oss/state/project" -type LogicalOperator = "and" | "or" | "not" | "nand" | "nor" - -export interface QueryConditionPayload { - field: string - key?: string - value?: unknown - operator?: string - options?: Record -} - -export interface QueryFilteringPayload { - operator?: LogicalOperator - conditions: (QueryConditionPayload | QueryFilteringPayload)[] -} - -export interface QueryWindowingPayload { - newest?: string - oldest?: string - next?: string - limit?: number - order?: "ascending" | "descending" - interval?: number - rate?: number -} - -export interface QueryRevisionDataPayload { - filtering?: QueryFilteringPayload - windowing?: QueryWindowingPayload -} - -export interface SimpleQueryCreatePayload { - slug: string - name?: string - description?: string - flags?: Record - tags?: Record - meta?: Record - data?: QueryRevisionDataPayload -} - -export interface SimpleQueryCreateRequest { - query: SimpleQueryCreatePayload -} - -export interface SimpleQueryResponse { - count: number - query?: { - id: string - slug?: string - data?: QueryRevisionDataPayload - meta?: Record - } | null -} - -export interface QueryRevisionRetrieveRequest { - query_ref?: {id?: string; slug?: string} | null - query_variant_ref?: {id?: string; slug?: string} | null - query_revision_ref?: {id?: string; slug?: string} | null -} - -export interface QueryRevisionResponse { - count: number - query_revision?: { - id?: string - slug?: string - variant_id?: string - version?: string | number - data?: QueryRevisionDataPayload - } | null -} - -export interface SimpleEvaluationFlagsPayload { - is_live?: boolean - is_closed?: boolean - is_active?: boolean -} - -export interface SimpleEvaluationDataPayload { - status?: string - query_steps?: string[] | Record - testset_steps?: Record - application_steps?: Record - evaluator_steps?: string[] | Record - repeats?: number - // Structured references for online evaluations - query_ref?: {id?: string; slug?: string} | null - query_revision_ref?: {id?: string; slug?: string} | null - evaluator_ref?: {id?: string; slug?: string} | null - configuration?: Record -} - -export interface SimpleEvaluationCreatePayload { - name?: string - description?: string - flags?: SimpleEvaluationFlagsPayload - tags?: Record - meta?: Record - data: SimpleEvaluationDataPayload -} - -export interface SimpleEvaluationCreateRequest { - evaluation: SimpleEvaluationCreatePayload -} - -export interface SimpleEvaluationResponse { - count: number - evaluation?: SimpleEvaluationPayload | null -} - -export interface SimpleEvaluationPayload { - id?: string - slug?: string - name?: string - description?: string - created_at?: string - updated_at?: string - created_by_id?: string - updated_by_id?: string - flags?: SimpleEvaluationFlagsPayload - data?: SimpleEvaluationDataPayload - meta?: Record - tags?: Record -} - -export interface SimpleEvaluationsResponse { - count: number - evaluations: SimpleEvaluationPayload[] -} - -export interface SimpleEvaluationsQueryRequest { - evaluation?: { - flags?: SimpleEvaluationFlagsPayload - ids?: string[] - } - tags?: Record - meta?: Record -} - const getProjectUrl = (path: string) => { const {projectId} = getProjectValues() return `${getAgentaApiUrl()}${path}?project_id=${projectId}` } -export const createSimpleQuery = async ( - payload: SimpleQueryCreateRequest, -): Promise => { - const {data} = await axios.post(getProjectUrl("/preview/simple/queries/"), payload) - return data as SimpleQueryResponse -} - -export const retrieveQueryRevision = async ( - payload: QueryRevisionRetrieveRequest, -): Promise => { - const {data} = await axios.post(getProjectUrl("/preview/queries/revisions/retrieve"), payload) - return data as QueryRevisionResponse -} - -export const createSimpleEvaluation = async ( - payload: SimpleEvaluationCreateRequest, -): Promise => { - const {data} = await axios.post(getProjectUrl("/preview/simple/evaluations/"), payload) - return data as SimpleEvaluationResponse -} - -export const querySimpleEvaluations = async ( - payload?: SimpleEvaluationsQueryRequest, -): Promise => { - const url = getProjectUrl("/preview/simple/evaluations/query") - const body = payload ?? {} - const {data} = await axios.post(url, body) - return data as SimpleEvaluationsResponse -} - export const stopSimpleEvaluation = async (evaluationId: string) => { const url = getProjectUrl(`/preview/simple/evaluations/${evaluationId}/stop`) const {data} = await axios.post(url) diff --git a/web/package.json b/web/package.json index c7b4ea6ec9..8e5ab2468e 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "agenta-web", - "version": "0.62.1", + "version": "0.61.0", "workspaces": [ "ee", "oss", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 0057554c07..dee9870f08 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -84,7 +84,7 @@ importers: version: 0.1.13(prettier@3.6.0) ts-node: specifier: ^10.9.2 - version: 10.9.2(@swc/core@1.11.8(@swc/helpers@0.5.17))(@types/node@20.19.19)(typescript@5.8.3) + version: 10.9.2(@swc/core@1.11.8(@swc/helpers@0.5.17))(@types/node@20.19.13)(typescript@5.8.3) tsconfig-paths: specifier: ^4.2.0 version: 4.2.0 @@ -267,7 +267,7 @@ importers: version: 21.1.0 swc-loader: specifier: ^0.2.6 - version: 0.2.6(@swc/core@1.11.8(@swc/helpers@0.5.17))(webpack@5.98.0(@swc/core@1.11.8(@swc/helpers@0.5.17))) + version: 0.2.6(@swc/core@1.11.8(@swc/helpers@0.5.17))(webpack@5.98.0(@swc/core@1.11.8(@swc/helpers@0.5.17))(esbuild@0.25.10)) swr: specifier: ^2.3.0 version: 2.3.3(react@19.0.0) @@ -593,9 +593,6 @@ importers: react-window: specifier: ^1.8.11 version: 1.8.11(react-dom@19.0.0(react@19.0.0))(react@19.0.0) - recharts: - specifier: ^3.1.0 - version: 3.1.0(@types/react@19.0.10)(react-dom@19.0.0(react@19.0.0))(react-is@18.3.1)(react@19.0.0)(redux@5.0.1) semver: specifier: ^7.7.2 version: 7.7.2 @@ -613,7 +610,7 @@ importers: version: 21.1.0 swc-loader: specifier: ^0.2.6 - version: 0.2.6(@swc/core@1.11.8(@swc/helpers@0.5.17))(webpack@5.98.0(@swc/core@1.11.8(@swc/helpers@0.5.17))(esbuild@0.25.10)) + version: 0.2.6(@swc/core@1.11.8(@swc/helpers@0.5.17))(webpack@5.98.0(@swc/core@1.11.8(@swc/helpers@0.5.17))) swr: specifier: ^2.3.0 version: 2.3.3(react@19.0.0) @@ -2054,6 +2051,9 @@ packages: '@types/node@20.19.11': resolution: {integrity: sha512-uug3FEEGv0r+jrecvUUpbY8lLisvIjg6AAic6a2bSP5OEOLeJsDSnvhCDov7ipFFMXS3orMpzlmi0ZcuGkBbow==} + '@types/node@20.19.13': + resolution: {integrity: sha512-yCAeZl7a0DxgNVteXFHt9+uyFbqXGy/ShC4BlcHkoE0AfGXYv/BUiplV72DjMYXHDBXFjhvr6DD1NiRVfB4j8g==} + '@types/node@20.19.19': resolution: {integrity: sha512-pb1Uqj5WJP7wrcbLU7Ru4QtA0+3kAXrkutGiD26wUKzSMgNNaPARTUDQmElUXp64kh3cWdou3Q0C7qwwxqSFmg==} @@ -3974,6 +3974,7 @@ packages: resolution: {integrity: sha512-Quz3MvAwHxVYNXsOByL7xI5EB2WYOeFswqaHIA3qOK3isRWTxiplBEocmmru6XmxDB2L7jDNYtYA4FyimoAFEw==} engines: {node: '>=8.17.0'} hasBin: true + bundledDependencies: [] jsonpointer@5.0.1: resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} @@ -7283,6 +7284,10 @@ snapshots: dependencies: undici-types: 6.21.0 + '@types/node@20.19.13': + dependencies: + undici-types: 6.21.0 + '@types/node@20.19.19': dependencies: undici-types: 6.21.0 @@ -11368,14 +11373,14 @@ snapshots: '@swc/core': 1.11.8(@swc/helpers@0.5.17) optional: true - ts-node@10.9.2(@swc/core@1.11.8(@swc/helpers@0.5.17))(@types/node@20.19.19)(typescript@5.8.3): + ts-node@10.9.2(@swc/core@1.11.8(@swc/helpers@0.5.17))(@types/node@20.19.13)(typescript@5.8.3): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.11 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.4 - '@types/node': 20.19.19 + '@types/node': 20.19.13 acorn: 8.15.0 acorn-walk: 8.3.4 arg: 4.1.3