diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
index a704ca9826..2c5a241acc 100644
--- a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
@@ -56,8 +56,7 @@ def create_default_project_for_workspaces(session: Connection):
for workspace in workspaces:
# Create a new default project for each workspace
get_or_create_workspace_default_project(
- session=session,
- workspace=workspace, # type: ignore
+ session=session, workspace=workspace # type: ignore
)
# Commit the changes for the current batch
diff --git a/api/ee/databases/postgres/migrations/core/utils.py b/api/ee/databases/postgres/migrations/core/utils.py
index 4691a38ec0..206e46db64 100644
--- a/api/ee/databases/postgres/migrations/core/utils.py
+++ b/api/ee/databases/postgres/migrations/core/utils.py
@@ -73,9 +73,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
async with engine.connect() as connection:
try:
- result = await connection.execute(
- text("SELECT version_num FROM alembic_version")
- ) # type: ignore
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
# Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
# We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
@@ -85,9 +83,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
return "alembic_version"
migration_heads = [row[0] for row in result.fetchall()]
- assert len(migration_heads) == 1, (
- "There can only be one migration head stored in the database."
- )
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
return migration_heads[0]
diff --git a/api/ee/databases/postgres/migrations/tracing/utils.py b/api/ee/databases/postgres/migrations/tracing/utils.py
index f0d62a3c3d..15f3e66b5f 100644
--- a/api/ee/databases/postgres/migrations/tracing/utils.py
+++ b/api/ee/databases/postgres/migrations/tracing/utils.py
@@ -66,9 +66,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
async with engine.connect() as connection:
try:
- result = await connection.execute(
- text("SELECT version_num FROM alembic_version")
- ) # type: ignore
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
# Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
# We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
@@ -78,9 +76,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
return "alembic_version"
migration_heads = [row[0] for row in result.fetchall()]
- assert len(migration_heads) == 1, (
- "There can only be one migration head stored in the database."
- )
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
return migration_heads[0]
diff --git a/api/ee/src/apis/fastapi/billing/router.py b/api/ee/src/apis/fastapi/billing/router.py
index ff8dbaf476..08762eaa76 100644
--- a/api/ee/src/apis/fastapi/billing/router.py
+++ b/api/ee/src/apis/fastapi/billing/router.py
@@ -35,7 +35,7 @@
stripe.api_key = environ.get("STRIPE_API_KEY")
-MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xFF:02x}" for ele in range(40, -1, -8))
+MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8))
STRIPE_WEBHOOK_SECRET = environ.get("STRIPE_WEBHOOK_SECRET")
STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS
AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}")
diff --git a/api/ee/src/core/subscriptions/service.py b/api/ee/src/core/subscriptions/service.py
index 30e026eded..f69adcbd74 100644
--- a/api/ee/src/core/subscriptions/service.py
+++ b/api/ee/src/core/subscriptions/service.py
@@ -25,7 +25,7 @@
stripe.api_key = environ.get("STRIPE_SECRET_KEY")
-MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xFF:02x}" for ele in range(40, -1, -8))
+MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8))
STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS
AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}")
diff --git a/api/ee/src/services/db_manager_ee.py b/api/ee/src/services/db_manager_ee.py
index f7cb117e65..bc174918b6 100644
--- a/api/ee/src/services/db_manager_ee.py
+++ b/api/ee/src/services/db_manager_ee.py
@@ -645,7 +645,9 @@ async def remove_user_from_workspace(
project = await db_manager.get_project_by_id(project_id=project_id)
async with engine.core_session() as session:
- if not user: # User is an invited user who has not yet created an account and therefore does not have a user object
+ if (
+ not user
+ ): # User is an invited user who has not yet created an account and therefore does not have a user object
pass
else:
# Ensure that a user can not remove the owner of the workspace
diff --git a/api/ee/src/services/workspace_manager.py b/api/ee/src/services/workspace_manager.py
index 49fe01b426..5bbebc78e6 100644
--- a/api/ee/src/services/workspace_manager.py
+++ b/api/ee/src/services/workspace_manager.py
@@ -317,9 +317,9 @@ async def accept_workspace_invitation(
invitation = await check_valid_invitation(project_id, user.email, token)
if invitation is not None:
- assert invitation.role is not None, (
- "Invitation does not have any workspace role"
- )
+ assert (
+ invitation.role is not None
+ ), "Invitation does not have any workspace role"
await db_manager_ee.add_user_to_workspace_and_org(
organization, workspace, user, project_id, invitation.role
)
diff --git a/api/ee/src/utils/entitlements.py b/api/ee/src/utils/entitlements.py
index 99614caec4..13360aad77 100644
--- a/api/ee/src/utils/entitlements.py
+++ b/api/ee/src/utils/entitlements.py
@@ -36,25 +36,25 @@ class EntitlementsException(Exception):
pass
-NOT_ENTITLED_RESPONSE: Callable[[Tracker], JSONResponse] = (
- lambda tracker=None: JSONResponse(
- status_code=403,
- content={
- "detail": (
- "You have reached your monthly quota limit. Please upgrade your plan to continue."
- if tracker == Tracker.COUNTERS
+NOT_ENTITLED_RESPONSE: Callable[
+ [Tracker], JSONResponse
+] = lambda tracker=None: JSONResponse(
+ status_code=403,
+ content={
+ "detail": (
+ "You have reached your monthly quota limit. Please upgrade your plan to continue."
+ if tracker == Tracker.COUNTERS
+ else (
+ "You have reached your quota limit. Please upgrade your plan to continue."
+ if tracker == Tracker.GAUGES
else (
- "You have reached your quota limit. Please upgrade your plan to continue."
- if tracker == Tracker.GAUGES
- else (
- "You do not have access to this feature. Please upgrade your plan to continue."
- if tracker == Tracker.FLAGS
- else "You do not have access to this feature."
- )
+ "You do not have access to this feature. Please upgrade your plan to continue."
+ if tracker == Tracker.FLAGS
+ else "You do not have access to this feature."
)
- ),
- },
- )
+ )
+ ),
+ },
)
@@ -163,7 +163,7 @@ async def check_entitlements(
# TODO: remove this line
log.info(
- f"adjusting: {organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'allow' if check else 'deny '} | {meter.key}: {meter.value - meter.synced} [{meter.value}]"
+ f"adjusting: {organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'allow' if check else 'deny '} | {meter.key}: {meter.value-meter.synced} [{meter.value}]"
)
return check is True, meter, _
diff --git a/api/ee/src/utils/permissions.py b/api/ee/src/utils/permissions.py
index 4454a0ac6b..312bcb05b6 100644
--- a/api/ee/src/utils/permissions.py
+++ b/api/ee/src/utils/permissions.py
@@ -218,17 +218,17 @@ async def check_rbac_permission(
bool: True if the user belongs to the workspace and has the specified permission, False otherwise.
"""
- assert project_id is not None, (
- "Project_ID is required to check object-level permissions"
- )
+ assert (
+ project_id is not None
+ ), "Project_ID is required to check object-level permissions"
# Assert that either permission or role is provided, but not both
- assert (permission is not None) or (role is not None), (
- "Either 'permission' or 'role' must be provided, but neither is provided"
- )
- assert not ((permission is not None) and (role is not None)), (
- "'permission' and 'role' cannot both be provided at the same time"
- )
+ assert (permission is not None) or (
+ role is not None
+ ), "Either 'permission' or 'role' must be provided, but neither is provided"
+ assert not (
+ (permission is not None) and (role is not None)
+ ), "'permission' and 'role' cannot both be provided at the same time"
if project_id is not None:
project = await db_manager.get_project_by_id(project_id)
@@ -281,9 +281,9 @@ async def check_project_has_role_or_permission(
if not check:
return True
- assert role is not None or permission is not None, (
- "Either role or permission must be provided"
- )
+ assert (
+ role is not None or permission is not None
+ ), "Either role or permission must be provided"
project_members = await db_manager_ee.get_project_members(
project_id=str(project.id)
diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
index e5db291b3b..5ed91ee5f9 100644
--- a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
+++ b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
@@ -131,13 +131,13 @@ def add_default_evaluators_to_project(session: Session, project_id: str):
}
for setting_name, default_value in settings_values.items():
- assert default_value != "", (
- f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
- )
+ assert (
+ default_value != ""
+ ), f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
- assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), (
- f"'name' and 'key' does not exist in the evaluator: {evaluator}"
- )
+ assert hasattr(evaluator, "name") and hasattr(
+ evaluator, "key"
+ ), f"'name' and 'key' does not exist in the evaluator: {evaluator}"
evaluator_config = EvaluatorConfigDB(
project_id=uuid.UUID(project_id),
diff --git a/api/oss/databases/postgres/migrations/core/utils.py b/api/oss/databases/postgres/migrations/core/utils.py
index adeb34bd50..9994ce2e54 100644
--- a/api/oss/databases/postgres/migrations/core/utils.py
+++ b/api/oss/databases/postgres/migrations/core/utils.py
@@ -73,9 +73,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
async with engine.connect() as connection:
try:
- result = await connection.execute(
- text("SELECT version_num FROM alembic_version")
- ) # type: ignore
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
# Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
# We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
@@ -85,9 +83,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
return "alembic_version"
migration_heads = [row[0] for row in result.fetchall()]
- assert len(migration_heads) == 1, (
- "There can only be one migration head stored in the database."
- )
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
return migration_heads[0]
diff --git a/api/oss/databases/postgres/migrations/tracing/utils.py b/api/oss/databases/postgres/migrations/tracing/utils.py
index 6966d0e1c8..db61035477 100644
--- a/api/oss/databases/postgres/migrations/tracing/utils.py
+++ b/api/oss/databases/postgres/migrations/tracing/utils.py
@@ -65,9 +65,7 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
async with engine.connect() as connection:
try:
- result = await connection.execute(
- text("SELECT version_num FROM alembic_version")
- ) # type: ignore
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
# Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
# We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
@@ -77,9 +75,9 @@ async def get_current_migration_head_from_db(engine: AsyncEngine):
return "alembic_version"
migration_heads = [row[0] for row in result.fetchall()]
- assert len(migration_heads) == 1, (
- "There can only be one migration head stored in the database."
- )
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
return migration_heads[0]
diff --git a/api/oss/docker/Dockerfile.gh b/api/oss/docker/Dockerfile.gh
index e26bfb7c9a..a9bd7c8365 100644
--- a/api/oss/docker/Dockerfile.gh
+++ b/api/oss/docker/Dockerfile.gh
@@ -18,11 +18,11 @@ RUN pip install --upgrade pip \
#
COPY ./oss /app/oss/
COPY ./entrypoint.py ./pyproject.toml /app/
-# COPY ./sdk /sdk/
+COPY ./sdk /sdk/
RUN poetry config virtualenvs.create false \
- && poetry install --no-interaction --no-ansi
- # && pip install --force-reinstall --upgrade /sdk/
+ && poetry install --no-interaction --no-ansi \
+ && pip install --force-reinstall --upgrade /sdk/
#
diff --git a/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py b/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py
index 7203208ede..f9664d80ba 100644
--- a/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py
+++ b/api/oss/src/apis/fastapi/observability/extractors/adapters/default_agenta_adapter.py
@@ -40,7 +40,9 @@ def process(self, attributes: CanonicalAttributes, features: SpanFeatures) -> No
# Exceptions - Rebuilt from attributes.events to match previous output structure
exception_events = attributes.get_events_by_name("exception")
- if exception_events: # Process the first one if multiple exist, or adapt if all should be processed
+ if (
+ exception_events
+ ): # Process the first one if multiple exist, or adapt if all should be processed
event_data = exception_events[0]
# Ensure timestamp is decoded and formatted as previously (likely to string by decode_value if it's datetime)
decoded_ts = decode_value(event_data.timestamp)
diff --git a/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py b/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py
index f71b18fa72..c1106e118d 100644
--- a/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py
+++ b/api/oss/src/apis/fastapi/observability/extractors/adapters/logfire_adapter.py
@@ -95,9 +95,12 @@ def process(self, bag: CanonicalAttributes, features: SpanFeatures) -> None:
and transformed_attributes.get("ag.metrics.unit.tokens.completion")
and not transformed_attributes.get("ag.metrics.unit.tokens.total")
):
- transformed_attributes["ag.metrics.unit.tokens.total"] = (
- transformed_attributes.get("ag.metrics.unit.tokens.prompt")
- + transformed_attributes.get("ag.metrics.unit.tokens.completion")
+ transformed_attributes[
+ "ag.metrics.unit.tokens.total"
+ ] = transformed_attributes.get(
+ "ag.metrics.unit.tokens.prompt"
+ ) + transformed_attributes.get(
+ "ag.metrics.unit.tokens.completion"
)
if not has_logfire_data:
return
diff --git a/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py b/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py
index bbbc95d9a9..899bf0765c 100644
--- a/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py
+++ b/api/oss/src/apis/fastapi/observability/opentelemetry/traces_proto.py
@@ -3,7 +3,6 @@
# NO CHECKED-IN PROTOBUF GENCODE
# source: traces.proto
"""Generated protocol buffer code."""
-
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
diff --git a/api/oss/src/apis/fastapi/shared/utils.py b/api/oss/src/apis/fastapi/shared/utils.py
index 6982ddb8f8..b9accb73b9 100644
--- a/api/oss/src/apis/fastapi/shared/utils.py
+++ b/api/oss/src/apis/fastapi/shared/utils.py
@@ -12,11 +12,7 @@ def parse_metadata(
flags: Optional[str] = None,
tags: Optional[str] = None,
meta: Optional[str] = None,
-) -> Tuple[
- Optional[Flags],
- Optional[Tags],
- Optional[Meta],
-]:
+) -> Tuple[Optional[Flags], Optional[Tags], Optional[Meta],]:
_flags = None
try:
_flags = loads(flags) if flags else None
diff --git a/api/oss/src/apis/fastapi/testsets/router.py b/api/oss/src/apis/fastapi/testsets/router.py
index 3c8ba795c4..9fcd8bcab6 100644
--- a/api/oss/src/apis/fastapi/testsets/router.py
+++ b/api/oss/src/apis/fastapi/testsets/router.py
@@ -1162,23 +1162,19 @@ async def fetch_simple_testset(
if testset is None:
return SimpleTestsetResponse()
- testset_variant = (
- await self.simple_testsets_service.testsets_service.fetch_testset_variant(
- project_id=UUID(request.state.project_id),
- #
- testset_ref=Reference(id=testset.id),
- )
+ testset_variant = await self.simple_testsets_service.testsets_service.fetch_testset_variant(
+ project_id=UUID(request.state.project_id),
+ #
+ testset_ref=Reference(id=testset.id),
)
if testset_variant is None:
return SimpleTestsetResponse()
- testset_revision = (
- await self.simple_testsets_service.testsets_service.fetch_testset_revision(
- project_id=UUID(request.state.project_id),
- #
- testset_variant_ref=Reference(id=testset_variant.id),
- )
+ testset_revision = await self.simple_testsets_service.testsets_service.fetch_testset_revision(
+ project_id=UUID(request.state.project_id),
+ #
+ testset_variant_ref=Reference(id=testset_variant.id),
)
if testset_revision is None:
diff --git a/api/oss/src/core/annotations/service.py b/api/oss/src/core/annotations/service.py
index 3d9d734955..c7229ffbdc 100644
--- a/api/oss/src/core/annotations/service.py
+++ b/api/oss/src/core/annotations/service.py
@@ -146,12 +146,10 @@ async def create(
)
if simple_evaluator:
- evaluator_revision = (
- await self.evaluators_service.fetch_evaluator_revision(
- project_id=project_id,
- #
- evaluator_ref=Reference(id=simple_evaluator.id),
- )
+ evaluator_revision = await self.evaluators_service.fetch_evaluator_revision(
+ project_id=project_id,
+ #
+ evaluator_ref=Reference(id=simple_evaluator.id),
)
elif evaluator_revision.evaluator_id:
simple_evaluator = await self.simple_evaluators_service.fetch(
@@ -335,12 +333,10 @@ async def edit(
)
if simple_evaluator:
- evaluator_revision = (
- await self.evaluators_service.fetch_evaluator_revision(
- project_id=project_id,
- #
- evaluator_ref=Reference(id=simple_evaluator.id),
- )
+ evaluator_revision = await self.evaluators_service.fetch_evaluator_revision(
+ project_id=project_id,
+ #
+ evaluator_ref=Reference(id=simple_evaluator.id),
)
if not evaluator_revision or not evaluator_revision.data:
diff --git a/api/oss/src/core/evaluations/service.py b/api/oss/src/core/evaluations/service.py
index 5aa820cec6..b36a9d47ef 100644
--- a/api/oss/src/core/evaluations/service.py
+++ b/api/oss/src/core/evaluations/service.py
@@ -1804,12 +1804,10 @@ async def _make_evaluation_run_data(
)
return None
- testset_revision = (
- await self.testsets_service.fetch_testset_revision(
- project_id=project_id,
- #
- testset_ref=testset_ref,
- )
+ testset_revision = await self.testsets_service.fetch_testset_revision(
+ project_id=project_id,
+ #
+ testset_ref=testset_ref,
)
if (
@@ -2037,12 +2035,10 @@ async def _make_evaluation_run_data(
)
return None
- evaluator_revision = (
- await self.evaluators_service.fetch_evaluator_revision(
- project_id=project_id,
- #
- evaluator_ref=evaluator_ref,
- )
+ evaluator_revision = await self.evaluators_service.fetch_evaluator_revision(
+ project_id=project_id,
+ #
+ evaluator_ref=evaluator_ref,
)
if (
@@ -2062,12 +2058,10 @@ async def _make_evaluation_run_data(
for evaluator_revision_id, origin in (evaluator_steps or {}).items():
evaluator_revision_ref = Reference(id=evaluator_revision_id)
- evaluator_revision = (
- await self.evaluators_service.fetch_evaluator_revision(
- project_id=project_id,
- #
- evaluator_revision_ref=evaluator_revision_ref,
- )
+ evaluator_revision = await self.evaluators_service.fetch_evaluator_revision(
+ project_id=project_id,
+ #
+ evaluator_revision_ref=evaluator_revision_ref,
)
if not evaluator_revision or not evaluator_revision.slug:
@@ -2086,12 +2080,10 @@ async def _make_evaluation_run_data(
evaluator_variant_ref = Reference(id=evaluator_revision.variant_id)
- evaluator_variant = (
- await self.evaluators_service.fetch_evaluator_variant(
- project_id=project_id,
- #
- evaluator_variant_ref=evaluator_variant_ref,
- )
+ evaluator_variant = await self.evaluators_service.fetch_evaluator_variant(
+ project_id=project_id,
+ #
+ evaluator_variant_ref=evaluator_variant_ref,
)
if not evaluator_variant:
diff --git a/api/oss/src/core/testsets/service.py b/api/oss/src/core/testsets/service.py
index 6bc781ef85..0d34def73d 100644
--- a/api/oss/src/core/testsets/service.py
+++ b/api/oss/src/core/testsets/service.py
@@ -490,12 +490,10 @@ async def create_testset_revision(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
return testset_revision
@@ -558,12 +556,10 @@ async def fetch_testset_revision(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
return testset_revision
@@ -599,12 +595,10 @@ async def edit_testset_revision(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
return testset_revision
@@ -697,12 +691,10 @@ async def query_testset_revisions(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
testset_revisions.append(testset_revision)
@@ -758,12 +750,10 @@ async def commit_testset_revision(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
return testset_revision
@@ -795,12 +785,10 @@ async def log_testset_revisions(
)
if testset_revision.data and testset_revision.data.testcase_ids:
- testset_revision.data.testcases = (
- await self.testcases_service.fetch_testcases(
- project_id=project_id,
- #
- testcase_ids=testset_revision.data.testcase_ids,
- )
+ testset_revision.data.testcases = await self.testcases_service.fetch_testcases(
+ project_id=project_id,
+ #
+ testcase_ids=testset_revision.data.testcase_ids,
)
testset_revisions.append(testset_revision)
diff --git a/api/oss/src/core/workflows/dtos.py b/api/oss/src/core/workflows/dtos.py
index 880da2a44a..c6b71caccf 100644
--- a/api/oss/src/core/workflows/dtos.py
+++ b/api/oss/src/core/workflows/dtos.py
@@ -172,9 +172,9 @@ class WorkflowServiceVersion(BaseModel):
class WorkflowServiceInterface(WorkflowServiceVersion):
uri: Optional[str] = None # str (Enum) w/ validation
url: Optional[str] = None # str w/ validation
- headers: Optional[Dict[str, Union[Reference, str]]] = (
- None # either hardcoded or a secret
- )
+ headers: Optional[
+ Dict[str, Union[Reference, str]]
+ ] = None # either hardcoded or a secret
# handler: Optional[Callable] = None
schemas: Optional[Dict[str, Schema]] = None # json-schema instead of pydantic
diff --git a/api/oss/src/core/workflows/service.py b/api/oss/src/core/workflows/service.py
index 866fddf38e..18e4835b97 100644
--- a/api/oss/src/core/workflows/service.py
+++ b/api/oss/src/core/workflows/service.py
@@ -732,10 +732,7 @@ async def invoke_workflow(
request: WorkflowServiceRequest,
#
**kwargs,
- ) -> Union[
- WorkflowServiceBatchResponse,
- WorkflowServiceStreamResponse,
- ]:
+ ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse,]:
project = await get_project_by_id(
project_id=str(project_id),
)
diff --git a/api/oss/src/dbs/postgres/git/dao.py b/api/oss/src/dbs/postgres/git/dao.py
index 8d275d6dae..f1bb94f45f 100644
--- a/api/oss/src/dbs/postgres/git/dao.py
+++ b/api/oss/src/dbs/postgres/git/dao.py
@@ -342,9 +342,7 @@ async def query_artifacts(
if artifact_query.description:
stmt = stmt.filter(
- self.ArtifactDBE.description.ilike(
- f"%{artifact_query.description}%"
- ), # type: ignore
+ self.ArtifactDBE.description.ilike(f"%{artifact_query.description}%"), # type: ignore
)
if include_archived is not True:
@@ -1175,9 +1173,7 @@ async def query_revisions(
if revision_query.description:
stmt = stmt.filter(
- self.RevisionDBE.description.ilike(
- f"%{revision_query.description}%"
- ) # type: ignore
+ self.RevisionDBE.description.ilike(f"%{revision_query.description}%") # type: ignore
)
if include_archived is not True:
diff --git a/api/oss/src/models/converters.py b/api/oss/src/models/converters.py
index b7e9080dbf..768fded064 100644
--- a/api/oss/src/models/converters.py
+++ b/api/oss/src/models/converters.py
@@ -143,8 +143,7 @@ async def environment_db_to_output(
) -> EnvironmentOutput:
deployed_app_variant_id = (
str(environment_db.deployed_app_variant_id)
- if environment_db.deployed_app_variant_id
- and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore
+ if environment_db.deployed_app_variant_id and isinstance(environment_db.deployed_app_variant_id, uuid.UUID) # type: ignore
else None
)
if deployed_app_variant_id:
diff --git a/api/oss/src/models/db/models.py b/api/oss/src/models/db/models.py
index da7a99c099..4e78e945ac 100644
--- a/api/oss/src/models/db/models.py
+++ b/api/oss/src/models/db/models.py
@@ -58,14 +58,4 @@
]
if is_ee():
- models.extend(
- [
- OrganizationDB,
- WorkspaceDB,
- APIKeyDB,
- InvitationDB,
- OrganizationMemberDB,
- ProjectMemberDB,
- WorkspaceMemberDB,
- ]
- ) # type: ignore
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, OrganizationMemberDB, ProjectMemberDB, WorkspaceMemberDB]) # type: ignore
diff --git a/api/oss/src/models/deprecated_models.py b/api/oss/src/models/deprecated_models.py
index 85ac4be0c4..070536fa13 100644
--- a/api/oss/src/models/deprecated_models.py
+++ b/api/oss/src/models/deprecated_models.py
@@ -442,7 +442,9 @@ class DeprecatedEvaluationDB(DeprecatedBase):
)
average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result
total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result
- average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # type: ignore # Result
+ average_latency = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # type: ignore # Result
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
diff --git a/api/oss/src/resources/evaluators/evaluators.py b/api/oss/src/resources/evaluators/evaluators.py
index cbca48d4fc..53a2d48542 100644
--- a/api/oss/src/resources/evaluators/evaluators.py
+++ b/api/oss/src/resources/evaluators/evaluators.py
@@ -229,12 +229,12 @@
"description": "Extract information from the user's response.",
"type": "object",
"properties": {
- "score": {
+ "correctness": {
"type": "boolean",
"description": "The grade results",
}
},
- "required": ["score"],
+ "required": ["correctness"],
"strict": True,
},
},
@@ -264,12 +264,12 @@
"description": "Extract information from the user's response.",
"type": "object",
"properties": {
- "score": {
+ "correctness": {
"type": "boolean",
"description": "The hallucination detection result",
}
},
- "required": ["score"],
+ "required": ["correctness"],
"strict": True,
},
},
@@ -339,12 +339,12 @@
"description": "Extract information from the user's response.",
"type": "object",
"properties": {
- "score": {
+ "correctness": {
"type": "boolean",
"description": "The grade results",
}
},
- "required": ["score"],
+ "required": ["correctness"],
"strict": True,
},
},
diff --git a/api/oss/src/routers/app_router.py b/api/oss/src/routers/app_router.py
index d0a60affca..52e724ee83 100644
--- a/api/oss/src/routers/app_router.py
+++ b/api/oss/src/routers/app_router.py
@@ -389,9 +389,7 @@ async def list_apps(
"""
if is_ee():
- user_org_workspace_data = await get_user_org_and_workspace_id(
- request.state.user_id
- ) # type: ignore
+ user_org_workspace_data = await get_user_org_and_workspace_id(request.state.user_id) # type: ignore
has_permission = await check_rbac_permission( # type: ignore
user_org_workspace_data=user_org_workspace_data,
project_id=request.state.project_id,
diff --git a/api/oss/src/routers/configs_router.py b/api/oss/src/routers/configs_router.py
index 2271fc41bc..8c432b31d6 100644
--- a/api/oss/src/routers/configs_router.py
+++ b/api/oss/src/routers/configs_router.py
@@ -94,9 +94,9 @@ async def get_config(
"parameters": found_variant.config_parameters,
}
- assert "name" and "parameters" in config, (
- "'name' and 'parameters' not found in configuration"
- )
+ assert (
+ "name" and "parameters" in config
+ ), "'name' and 'parameters' not found in configuration"
return GetConfigResponse(
config_name=config["name"], # type: ignore
current_version=variant_revision, # type: ignore
diff --git a/api/oss/src/routers/user_profile.py b/api/oss/src/routers/user_profile.py
index 0b082467b6..67a66f21ae 100644
--- a/api/oss/src/routers/user_profile.py
+++ b/api/oss/src/routers/user_profile.py
@@ -39,9 +39,9 @@ async def user_profile(request: Request):
user = await db_manager.get_user_with_id(user_id=request.state.user_id)
- assert user is not None, (
- "User not found. Please ensure that the user_id is specified correctly."
- )
+ assert (
+ user is not None
+ ), "User not found. Please ensure that the user_id is specified correctly."
user = User(
id=str(user.id),
diff --git a/api/oss/src/routers/variants_router.py b/api/oss/src/routers/variants_router.py
index 405910d540..8edeef5d3c 100644
--- a/api/oss/src/routers/variants_router.py
+++ b/api/oss/src/routers/variants_router.py
@@ -393,9 +393,9 @@ async def get_variant_revision(
revision_number: int,
request: Request,
):
- assert variant_id != "undefined", (
- "Variant id is required to retrieve variant revision"
- )
+ assert (
+ variant_id != "undefined"
+ ), "Variant id is required to retrieve variant revision"
app_variant = await db_manager.fetch_app_variant_by_id(app_variant_id=variant_id)
if is_ee():
diff --git a/api/oss/src/services/app_manager.py b/api/oss/src/services/app_manager.py
index 7128b2f54e..f644399ebd 100644
--- a/api/oss/src/services/app_manager.py
+++ b/api/oss/src/services/app_manager.py
@@ -135,12 +135,12 @@ async def terminate_and_remove_app_variant(
Exception: Any other exception raised during the operation.
"""
- assert app_variant_id or app_variant_db, (
- "Either app_variant_id or app_variant_db must be provided"
- )
- assert not (app_variant_id and app_variant_db), (
- "Only one of app_variant_id or app_variant_db must be provided"
- )
+ assert (
+ app_variant_id or app_variant_db
+ ), "Either app_variant_id or app_variant_db must be provided"
+ assert not (
+ app_variant_id and app_variant_db
+ ), "Only one of app_variant_id or app_variant_db must be provided"
if app_variant_id:
app_variant_db = await db_manager.fetch_app_variant_by_id(app_variant_id)
diff --git a/api/oss/src/services/auth_helper.py b/api/oss/src/services/auth_helper.py
index fb1bd08bd2..9f49188cb3 100644
--- a/api/oss/src/services/auth_helper.py
+++ b/api/oss/src/services/auth_helper.py
@@ -483,9 +483,9 @@ async def verify_bearer_token(
else:
workspaces = await db_manager.get_workspaces()
- assert len(workspaces) == 1, (
- "You can only have a single workspace in OSS."
- )
+ assert (
+ len(workspaces) == 1
+ ), "You can only have a single workspace in OSS."
workspace_id = str(workspaces[0].id)
project_id = await db_manager.get_default_project_id_from_workspace(
diff --git a/api/oss/src/services/converters.py b/api/oss/src/services/converters.py
index ad9cb64169..8ead9b7df4 100644
--- a/api/oss/src/services/converters.py
+++ b/api/oss/src/services/converters.py
@@ -13,6 +13,7 @@
HumanEvaluationScenario,
EvaluationScenarioOutput,
)
+from oss.src.services import db_manager
from oss.src.models.db_models import (
EvaluationDB,
HumanEvaluationDB,
diff --git a/api/oss/src/services/db_manager.py b/api/oss/src/services/db_manager.py
index 09e4ef2a0a..9833c104dc 100644
--- a/api/oss/src/services/db_manager.py
+++ b/api/oss/src/services/db_manager.py
@@ -223,12 +223,8 @@ async def fetch_app_variant_by_id(app_variant_id: str) -> Optional[AppVariantDB]
assert app_variant_id is not None, "app_variant_id cannot be None"
async with engine.core_session() as session:
query = select(AppVariantDB).options(
- joinedload(AppVariantDB.app.of_type(AppDB)).load_only(
- AppDB.id, AppDB.app_name
- ), # type: ignore
- joinedload(AppVariantDB.base.of_type(VariantBaseDB))
- .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB))
- .load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore
+ joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore
+ joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore
)
result = await session.execute(
@@ -568,9 +564,9 @@ async def create_new_app_variant(
AppVariantDB: The created variant.
"""
- assert config.parameters == {}, (
- "Parameters should be empty when calling create_new_app_variant (otherwise revision should not be set to 0)"
- )
+ assert (
+ config.parameters == {}
+ ), "Parameters should be empty when calling create_new_app_variant (otherwise revision should not be set to 0)"
async with engine.core_session() as session:
variant = AppVariantDB(
@@ -1873,9 +1869,7 @@ async def add_variant_from_base_and_config(
app_variant_for_base = await list_variants_for_base(base_db)
already_exists = any(
- av
- for av in app_variant_for_base
- if av.config_name == new_config_name # type: ignore
+ av for av in app_variant_for_base if av.config_name == new_config_name # type: ignore
)
if already_exists:
raise ValueError("App variant with the same name already exists")
@@ -1960,12 +1954,8 @@ async def list_app_variants(app_id: str):
result = await session.execute(
select(AppVariantDB)
.options(
- joinedload(AppVariantDB.app.of_type(AppDB)).load_only(
- AppDB.id, AppDB.app_name
- ), # type: ignore
- joinedload(AppVariantDB.base.of_type(VariantBaseDB))
- .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB))
- .load_only(DeploymentDB.uri), # type: ignore
+ joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore
+ joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.uri), # type: ignore
)
.where(AppVariantDB.hidden.is_not(True))
.filter_by(app_id=uuid.UUID(app_uuid))
@@ -2263,9 +2253,7 @@ async def fetch_app_environment_revision_by_app_variant_revision_id(
)
if is_ee():
query = query.options(
- joinedload(
- AppEnvironmentRevisionDB.deployed_app_variant.of_type(AppVariantDB)
- ), # type: ignore
+ joinedload(AppEnvironmentRevisionDB.deployed_app_variant.of_type(AppVariantDB)), # type: ignore
)
result = await session.execute(query)
app_environment = result.scalars().one_or_none()
@@ -2288,9 +2276,7 @@ async def fetch_app_variant_revision_by_id(
result = await session.execute(
select(AppVariantRevisionsDB)
.options(
- joinedload(AppVariantRevisionsDB.base.of_type(VariantBaseDB))
- .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB))
- .load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore
+ joinedload(AppVariantRevisionsDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.id, DeploymentDB.uri), # type: ignore
)
.filter_by(id=uuid.UUID(variant_revision_id))
)
@@ -2317,7 +2303,9 @@ async def fetch_environment_revisions_for_environment(environment: AppEnvironmen
query = query.options(
joinedload(
AppEnvironmentRevisionDB.modified_by.of_type(UserDB)
- ).load_only(UserDB.username) # type: ignore
+ ).load_only(
+ UserDB.username
+ ) # type: ignore
)
else:
query = query.options(
@@ -2507,9 +2495,9 @@ async def create_environment_revision(
)
if kwargs:
- assert "deployed_app_variant_revision" in kwargs, (
- "Deployed app variant revision is required"
- )
+ assert (
+ "deployed_app_variant_revision" in kwargs
+ ), "Deployed app variant revision is required"
assert (
isinstance(
kwargs.get("deployed_app_variant_revision"), AppVariantRevisionsDB
@@ -2524,9 +2512,9 @@ async def create_environment_revision(
)
deployment = kwargs.get("deployment")
- assert isinstance(deployment, DeploymentDB) == True, (
- "Type of deployment in kwargs is not correct"
- )
+ assert (
+ isinstance(deployment, DeploymentDB) == True
+ ), "Type of deployment in kwargs is not correct"
if deployment is not None:
environment_revision.deployment_id = deployment.id # type: ignore
@@ -2594,7 +2582,9 @@ async def fetch_app_variant_revision(
)
else:
query = base_query.options(
- joinedload(AppVariantRevisionsDB.modified_by).load_only(UserDB.username) # type: ignore
+ joinedload(AppVariantRevisionsDB.modified_by).load_only(
+ UserDB.username
+ ) # type: ignore
)
result = await session.execute(query)
app_variant_revision = result.scalars().first()
@@ -2787,12 +2777,8 @@ async def get_app_variant_instance_by_id(
result = await session.execute(
select(AppVariantDB)
.options(
- joinedload(AppVariantDB.app.of_type(AppDB)).load_only(
- AppDB.id, AppDB.app_name
- ), # type: ignore
- joinedload(AppVariantDB.base.of_type(VariantBaseDB))
- .joinedload(VariantBaseDB.deployment.of_type(DeploymentDB))
- .load_only(DeploymentDB.uri), # type: ignore
+ joinedload(AppVariantDB.app.of_type(AppDB)).load_only(AppDB.id, AppDB.app_name), # type: ignore
+ joinedload(AppVariantDB.base.of_type(VariantBaseDB)).joinedload(VariantBaseDB.deployment.of_type(DeploymentDB)).load_only(DeploymentDB.uri), # type: ignore
)
.filter_by(id=uuid.UUID(variant_id), project_id=uuid.UUID(project_id)),
)
@@ -3280,9 +3266,9 @@ async def get_object_uuid(object_id: str, table_name: str) -> str:
# Use the object_id directly if it is not a valid MongoDB ObjectId
object_uuid_as_str = object_id
- assert object_uuid_as_str is not None, (
- f"{table_name} Object UUID cannot be none. Is the object_id {object_id} a valid MongoDB ObjectId?"
- )
+ assert (
+ object_uuid_as_str is not None
+ ), f"{table_name} Object UUID cannot be none. Is the object_id {object_id} a valid MongoDB ObjectId?"
return object_uuid_as_str
@@ -3406,19 +3392,13 @@ async def fetch_evaluation_by_id(
id=uuid.UUID(evaluation_id),
)
query = base_query.options(
- joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(
- TestsetDB.id, TestsetDB.name
- ), # type: ignore
+ joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore
)
result = await session.execute(
query.options(
- joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(
- AppVariantDB.id, AppVariantDB.variant_name
- ), # type: ignore
- joinedload(
- EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)
- ).load_only(AppVariantRevisionsDB.revision), # type: ignore
+ joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore
joinedload(
EvaluationDB.aggregated_results.of_type(
EvaluationAggregatedResultDB
@@ -3445,9 +3425,7 @@ async def list_human_evaluations(app_id: str, project_id: str):
.filter(HumanEvaluationDB.testset_id.isnot(None))
)
query = base_query.options(
- joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(
- TestsetDB.id, TestsetDB.name
- ), # type: ignore
+ joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore
)
result = await session.execute(query)
@@ -3510,12 +3488,8 @@ async def fetch_human_evaluation_variants(human_evaluation_id: str):
human_evaluation_id=uuid.UUID(human_evaluation_id)
)
query = base_query.options(
- joinedload(
- HumanEvaluationVariantDB.variant.of_type(AppVariantDB)
- ).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
- joinedload(
- HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB)
- ).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore
)
result = await session.execute(query)
@@ -3544,9 +3518,7 @@ async def create_human_evaluation_variants(
variants_revisions_dict = {}
for variant_id, variant in variants_dict.items():
variant_revision = await fetch_app_variant_revision_by_variant(
- app_variant_id=str(variant.id),
- project_id=str(variant.project_id),
- revision=variant.revision, # type: ignore
+ app_variant_id=str(variant.id), project_id=str(variant.project_id), revision=variant.revision # type: ignore
)
if variant_revision:
variants_revisions_dict[variant_id] = variant_revision
@@ -3585,9 +3557,7 @@ async def fetch_human_evaluation_by_id(
async with engine.core_session() as session:
base_query = select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
query = base_query.options(
- joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(
- TestsetDB.id, TestsetDB.name
- ), # type: ignore
+ joinedload(HumanEvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore
)
result = await session.execute(query)
evaluation = result.scalars().first()
@@ -3863,19 +3833,13 @@ async def list_evaluations(app_id: str, project_id: str):
app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id)
)
query = base_query.options(
- joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(
- TestsetDB.id, TestsetDB.name
- ), # type: ignore
+ joinedload(EvaluationDB.testset.of_type(TestsetDB)).load_only(TestsetDB.id, TestsetDB.name), # type: ignore
)
result = await session.execute(
query.options(
- joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(
- AppVariantDB.id, AppVariantDB.variant_name
- ), # type: ignore
- joinedload(
- EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)
- ).load_only(AppVariantRevisionsDB.revision), # type: ignore
+ joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore
joinedload(
EvaluationDB.aggregated_results.of_type(
EvaluationAggregatedResultDB
diff --git a/api/oss/src/services/evaluation_service.py b/api/oss/src/services/evaluation_service.py
index ba0e146678..ca40a70cef 100644
--- a/api/oss/src/services/evaluation_service.py
+++ b/api/oss/src/services/evaluation_service.py
@@ -71,7 +71,7 @@ async def prepare_csvdata_and_create_evaluation_scenario(
msg = f"""
Columns in the testset should match the names of the inputs in the variant.
Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while
- columns in testset are: {[col for col in datum.keys() if col != "correct_answer"]}
+ columns in testset are: {[col for col in datum.keys() if col != 'correct_answer']}
"""
raise HTTPException(
status_code=400,
@@ -404,9 +404,9 @@ async def create_new_evaluation(
variant_revision_id=revision_id
)
- assert variant_revision and variant_revision.revision is not None, (
- f"Variant revision with {revision_id} cannot be None"
- )
+ assert (
+ variant_revision and variant_revision.revision is not None
+ ), f"Variant revision with {revision_id} cannot be None"
assert testset is not None, f"Testset with id {testset_id} does not exist"
diff --git a/api/oss/src/services/evaluator_manager.py b/api/oss/src/services/evaluator_manager.py
index 375eec953a..68f44ee3d4 100644
--- a/api/oss/src/services/evaluator_manager.py
+++ b/api/oss/src/services/evaluator_manager.py
@@ -138,13 +138,13 @@ async def create_ready_to_use_evaluators(project_id: str):
}
for setting_name, default_value in settings_values.items():
- assert default_value != "", (
- f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
- )
+ assert (
+ default_value != ""
+ ), f"Default value for ground truth key '{setting_name}' in Evaluator is empty"
- assert hasattr(evaluator, "name") and hasattr(evaluator, "key"), (
- f"'name' and 'key' does not exist in the evaluator: {evaluator}"
- )
+ assert hasattr(evaluator, "name") and hasattr(
+ evaluator, "key"
+ ), f"'name' and 'key' does not exist in the evaluator: {evaluator}"
await db_manager.create_evaluator_config(
project_id=project_id,
name=evaluator.name,
diff --git a/api/oss/src/services/evaluators_service.py b/api/oss/src/services/evaluators_service.py
index cbb4643c13..13e1538bc4 100644
--- a/api/oss/src/services/evaluators_service.py
+++ b/api/oss/src/services/evaluators_service.py
@@ -734,10 +734,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac
raise e
if (
- (input.settings.get("version") == "4")
- and ( # this check is used when running in the background (celery)
- type(input.settings.get("prompt_template", "")) is not str
- )
+ input.settings.get("version") == "4"
+ ) and ( # this check is used when running in the background (celery)
+ type(input.settings.get("prompt_template", "")) is not str
): # this check is used when running in the frontend (since in that case we'll alway have version 2)
try:
parameters = input.settings or dict()
@@ -931,10 +930,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac
except Exception as e:
raise RuntimeError(f"Evaluation failed: {str(e)}")
elif (
- (input.settings.get("version") == "3")
- and ( # this check is used when running in the background (celery)
- type(input.settings.get("prompt_template", "")) is not str
- )
+ input.settings.get("version") == "3"
+ ) and ( # this check is used when running in the background (celery)
+ type(input.settings.get("prompt_template", "")) is not str
): # this check is used when running in the frontend (since in that case we'll alway have version 2)
try:
parameters = input.settings or dict()
@@ -1091,10 +1089,9 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac
except Exception as e:
raise RuntimeError(f"Evaluation failed: {str(e)}")
elif (
- (input.settings.get("version") == "2")
- and ( # this check is used when running in the background (celery)
- type(input.settings.get("prompt_template", "")) is not str
- )
+ input.settings.get("version") == "2"
+ ) and ( # this check is used when running in the background (celery)
+ type(input.settings.get("prompt_template", "")) is not str
): # this check is used when running in the frontend (since in that case we'll alway have version 2)
try:
prompt_template = input.settings.get("prompt_template", "")
@@ -1550,9 +1547,9 @@ async def json_diff(input: EvaluatorInputInterface) -> EvaluatorOutputInterface:
# 1. extract llm app output if app output format is v2+
app_output = input.inputs["prediction"]
- assert isinstance(app_output, (str, dict)), (
- "App output is expected to be a string or a JSON object"
- )
+ assert isinstance(
+ app_output, (str, dict)
+ ), "App output is expected to be a string or a JSON object"
app_output = (
app_output.get("data", "") if isinstance(app_output, dict) else app_output
)
@@ -1560,7 +1557,9 @@ async def json_diff(input: EvaluatorInputInterface) -> EvaluatorOutputInterface:
try:
app_output = json.loads(app_output)
except json.JSONDecodeError:
- app_output = {} # we will return 0 score for json diff in case we cannot parse the output as json
+ app_output = (
+ {}
+ ) # we will return 0 score for json diff in case we cannot parse the output as json
score = compare_jsons(
ground_truth=ground_truth,
diff --git a/api/oss/src/services/llm_apps_service.py b/api/oss/src/services/llm_apps_service.py
index d5ba69f965..b1d8ab5995 100644
--- a/api/oss/src/services/llm_apps_service.py
+++ b/api/oss/src/services/llm_apps_service.py
@@ -146,9 +146,9 @@ async def make_payload(
payload["ag_config"] = parameters
elif param["type"] == "input":
item = datapoint.get(param["name"], parameters.get(param["name"], ""))
- assert param["name"] != "ag_config", (
- "ag_config should be handled separately"
- )
+ assert (
+ param["name"] != "ag_config"
+ ), "ag_config should be handled separately"
payload[param["name"]] = item
# in case of dynamic inputs (as in our templates)
diff --git a/api/oss/src/tasks/evaluations/legacy.py b/api/oss/src/tasks/evaluations/legacy.py
index e034439691..d3bc69f9cc 100644
--- a/api/oss/src/tasks/evaluations/legacy.py
+++ b/api/oss/src/tasks/evaluations/legacy.py
@@ -292,9 +292,9 @@ async def setup_evaluation(
testset_id=UUID(testset_id),
)
- assert testset_response.count != 0, (
- f"Testset with id {testset_id} not found!"
- )
+ assert (
+ testset_response.count != 0
+ ), f"Testset with id {testset_id} not found!"
testset = testset_response.testset
testcases = testset.data.testcases
@@ -334,9 +334,9 @@ async def setup_evaluation(
query_ref=query_ref,
)
- assert query_revision is not None, (
- f"Query revision with id {query_id} not found!"
- )
+ assert (
+ query_revision is not None
+ ), f"Query revision with id {query_id} not found!"
query_revision_ref = Reference(
id=query_revision.id,
@@ -352,9 +352,9 @@ async def setup_evaluation(
),
)
- assert query_variant is not None, (
- f"Query variant with id {query_revision.variant_id} not found!"
- )
+ assert (
+ query_variant is not None
+ ), f"Query variant with id {query_revision.variant_id} not found!"
query_variant_ref = Reference(
id=query_variant.id,
@@ -374,9 +374,9 @@ async def setup_evaluation(
if revision_id:
revision = await fetch_app_variant_revision_by_id(revision_id)
- assert revision is not None, (
- f"App revision with id {revision_id} not found!"
- )
+ assert (
+ revision is not None
+ ), f"App revision with id {revision_id} not found!"
application_references["revision"] = Reference(
id=UUID(str(revision.id)),
@@ -384,9 +384,9 @@ async def setup_evaluation(
variant = await fetch_app_variant_by_id(str(revision.variant_id))
- assert variant is not None, (
- f"App variant with id {revision.variant_id} not found!"
- )
+ assert (
+ variant is not None
+ ), f"App variant with id {revision.variant_id} not found!"
application_references["variant"] = Reference(
id=UUID(str(variant.id)),
@@ -402,9 +402,9 @@ async def setup_evaluation(
deployment = await get_deployment_by_id(str(revision.base.deployment_id))
- assert deployment is not None, (
- f"Deployment with id {revision.base.deployment_id} not found!"
- )
+ assert (
+ deployment is not None
+ ), f"Deployment with id {revision.base.deployment_id} not found!"
uri = parse_url(url=deployment.uri)
@@ -412,9 +412,9 @@ async def setup_evaluation(
revision_parameters = revision.config_parameters
- assert revision_parameters is not None, (
- f"Revision parameters for variant {variant.id} not found!"
- )
+ assert (
+ revision_parameters is not None
+ ), f"Revision parameters for variant {variant.id} not found!"
invocation_steps_keys.append(
get_slug_from_name_and_id(app.app_name, revision.id)
@@ -498,18 +498,18 @@ async def setup_evaluation(
workflow_ref=workflow_ref,
)
- assert workflow_revision is not None, (
- f"Workflow revision with id {workflow_ref.id} not found!"
- )
+ assert (
+ workflow_revision is not None
+ ), f"Workflow revision with id {workflow_ref.id} not found!"
workflow_revision_ref = Reference(
id=workflow_revision.id,
slug=workflow_revision.slug,
)
- evaluator_references[annotation_step_key]["revision"] = (
- workflow_revision_ref
- )
+ evaluator_references[annotation_step_key][
+ "revision"
+ ] = workflow_revision_ref
evaluators[annotation_step_key]["revision"] = workflow_revision
@@ -520,9 +520,9 @@ async def setup_evaluation(
),
)
- assert workflow_variant is not None, (
- f"Workflow variant with id {workflow_revision.variant_id} not found!"
- )
+ assert (
+ workflow_variant is not None
+ ), f"Workflow variant with id {workflow_revision.variant_id} not found!"
workflow_variant_ref = Reference(
id=workflow_variant.id,
@@ -893,9 +893,9 @@ def annotate(
fetch_app_variant_by_id(str(revision.variant_id)),
)
- assert variant is not None, (
- f"App variant with id {revision.variant_id} not found!"
- )
+ assert (
+ variant is not None
+ ), f"App variant with id {revision.variant_id} not found!"
app = loop.run_until_complete(
fetch_app_by_id(str(variant.app_id)),
@@ -907,9 +907,9 @@ def annotate(
get_deployment_by_id(str(revision.base.deployment_id)),
)
- assert deployment is not None, (
- f"Deployment with id {revision.base.deployment_id} not found!"
- )
+ assert (
+ deployment is not None
+ ), f"Deployment with id {revision.base.deployment_id} not found!"
uri = parse_url(url=deployment.uri)
@@ -917,9 +917,9 @@ def annotate(
revision_parameters = revision.config_parameters
- assert revision_parameters is not None, (
- f"Revision parameters for variant {variant.id} not found!"
- )
+ assert (
+ revision_parameters is not None
+ ), f"Revision parameters for variant {variant.id} not found!"
# ----------------------------------------------------------------------
# fetch evaluators -----------------------------------------------------
@@ -997,9 +997,9 @@ def annotate(
)
)
- assert len(scenarios) == nof_testcases, (
- f"Failed to create evaluation scenarios for run {run_id}!"
- )
+ assert (
+ len(scenarios) == nof_testcases
+ ), f"Failed to create evaluation scenarios for run {run_id}!"
# ----------------------------------------------------------------------
# create input steps ---------------------------------------------------
@@ -1025,9 +1025,9 @@ def annotate(
)
)
- assert len(steps) == nof_testcases, (
- f"Failed to create evaluation steps for run {run_id}!"
- )
+ assert (
+ len(steps) == nof_testcases
+ ), f"Failed to create evaluation steps for run {run_id}!"
# ----------------------------------------------------------------------
# flatten testcases ----------------------------------------------------
@@ -1101,9 +1101,9 @@ def annotate(
)
)
- assert len(steps) == nof_testcases, (
- f"Failed to create evaluation steps for run {run_id}!"
- )
+ assert (
+ len(steps) == nof_testcases
+ ), f"Failed to create evaluation steps for run {run_id}!"
# ----------------------------------------------------------------------
run_has_errors = 0
@@ -1431,9 +1431,9 @@ def annotate(
)
)
- assert len(steps) == 1, (
- f"Failed to create evaluation step for scenario with id {scenario.id}!"
- )
+ assert (
+ len(steps) == 1
+ ), f"Failed to create evaluation step for scenario with id {scenario.id}!"
# ------------------------------------------------------------------
scenario_edit = EvaluationScenarioEdit(
@@ -1452,9 +1452,9 @@ def annotate(
)
)
- assert scenario, (
- f"Failed to edit evaluation scenario with id {scenario.id}!"
- )
+ assert (
+ scenario
+ ), f"Failed to edit evaluation scenario with id {scenario.id}!"
if scenario_status != EvaluationStatus.FAILURE:
try:
diff --git a/api/oss/src/tasks/evaluations/live.py b/api/oss/src/tasks/evaluations/live.py
index 932ae57ebd..43208bd42d 100644
--- a/api/oss/src/tasks/evaluations/live.py
+++ b/api/oss/src/tasks/evaluations/live.py
@@ -253,9 +253,7 @@ def evaluate(
steps = run.data.steps
input_steps = {
- step.key: step
- for step in steps
- if step.type == "input" # --------
+ step.key: step for step in steps if step.type == "input" # --------
}
invocation_steps = {
step.key: step for step in steps if step.type == "invocation"
@@ -492,9 +490,9 @@ def evaluate(
)
)
- assert len(results) == nof_traces, (
- f"Failed to create evaluation results for run {run_id}!"
- )
+ assert (
+ len(results) == nof_traces
+ ), f"Failed to create evaluation results for run {run_id}!"
# ------------------------------------------------------------------
scenario_has_errors: Dict[int, int] = dict()
@@ -774,9 +772,9 @@ def evaluate(
)
)
- assert len(results) == 1, (
- f"Failed to create evaluation result for scenario with id {scenario.id}!"
- )
+ assert (
+ len(results) == 1
+ ), f"Failed to create evaluation result for scenario with id {scenario.id}!"
# --------------------------------------------------------------
scenario_edit = EvaluationScenarioEdit(
diff --git a/api/oss/tests/legacy/admin/tests.py b/api/oss/tests/legacy/admin/tests.py
index 27d4aeb883..356aa29581 100644
--- a/api/oss/tests/legacy/admin/tests.py
+++ b/api/oss/tests/legacy/admin/tests.py
@@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/legacy/apps/tests.py b/api/oss/tests/legacy/apps/tests.py
index 59afc1b7d7..d1be36c456 100644
--- a/api/oss/tests/legacy/apps/tests.py
+++ b/api/oss/tests/legacy/apps/tests.py
@@ -39,12 +39,12 @@ async def test_create_without_default_params(self, http_client):
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
- assert response_data["app_name"] == app_data["app_name"], (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
+ assert (
+ response_data["app_name"] == app_data["app_name"]
+ ), f"Failed for case: {description}"
# Cleanup: Remove application
await delete_application(http_client, response_data["app_id"], headers)
@@ -64,9 +64,9 @@ async def test_create_invalid_params(self, http_client):
response = await http_client.post("/apps", json=app_data, headers=headers)
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -86,9 +86,9 @@ async def test_create_conflicts(self, http_client):
response = await http_client.post("/apps", json=app_data, headers=headers)
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup: Remove application
app_cleanup_response = await http_client.get("/apps", headers=headers)
@@ -120,9 +120,9 @@ async def test_permissions_principal_not_in_scope_post(self, http_client):
)
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -213,9 +213,9 @@ async def test_list_query_filter_no_element(self, http_client):
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == len(elements), f"Failed for case: {description}"
@pytest.mark.asyncio
@@ -236,9 +236,9 @@ async def test_list_query_filter_one_element(self, http_client):
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 1, f"Failed for case: {description}"
# Cleanup: Remove application
@@ -263,9 +263,9 @@ async def test_list_query_filter_many_elements_small_data(self, http_client):
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 3, f"Failed for case: {description}"
# Cleanup: Remove applications
@@ -291,9 +291,9 @@ async def test_list_query_filter_many_elements_big_data(self, http_client):
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 6, f"Failed for case: {description}"
# Cleanup: Remove applications
@@ -326,9 +326,9 @@ async def test_permissions_principal_not_in_scope(self, http_client):
)
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup: Delete the application with valid principal
await delete_application(http_client, app["app_id"], owner_headers)
@@ -361,9 +361,9 @@ async def test_permissions_allowed(self, http_client):
list_of_status_codes.append(response.status_code)
# Assert: Verify the response
- assert list_of_status_codes.count(expected_status) == 3, (
- f"Failed for case: {description}"
- )
+ assert (
+ list_of_status_codes.count(expected_status) == 3
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
diff --git a/api/oss/tests/legacy/auth/tests.py b/api/oss/tests/legacy/auth/tests.py
index 1a61f0a034..b09b4b4c61 100644
--- a/api/oss/tests/legacy/auth/tests.py
+++ b/api/oss/tests/legacy/auth/tests.py
@@ -16,9 +16,9 @@ async def test_api_authentication_missing_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -35,9 +35,9 @@ async def test_api_authentication_unsupported_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -54,6 +54,6 @@ async def test_api_authentication_invalid_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/legacy/old_tests/models.py b/api/oss/tests/legacy/old_tests/models.py
index 8e24e032d9..5c1d0ad49b 100644
--- a/api/oss/tests/legacy/old_tests/models.py
+++ b/api/oss/tests/legacy/old_tests/models.py
@@ -62,14 +62,4 @@
]
if is_ee():
- models.extend(
- [
- OrganizationDB,
- WorkspaceDB,
- APIKeyDB,
- InvitationDB,
- OrganizationMemberDB,
- ProjectMemberDB,
- WorkspaceMemberDB,
- ]
- ) # type: ignore
+ models.extend([OrganizationDB, WorkspaceDB, APIKeyDB, InvitationDB, OrganizationMemberDB, ProjectMemberDB, WorkspaceMemberDB]) # type: ignore
diff --git a/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py b/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py
index 8ad085a396..af2757cf09 100644
--- a/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py
+++ b/api/oss/tests/legacy/old_tests/unit/test_llm_apps_service.py
@@ -20,16 +20,14 @@ async def test_batch_invoke_success():
to simulate successful invocations. It verifies that the batch_invoke
function correctly returns the expected results for the given test data.
"""
- with (
- patch(
- "src.services.llm_apps_service.get_parameters_from_openapi",
- new_callable=AsyncMock,
- ) as mock_get_parameters_from_openapi,
- patch(
- "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
- ) as mock_invoke_app,
- patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep,
- ):
+ with patch(
+ "src.services.llm_apps_service.get_parameters_from_openapi",
+ new_callable=AsyncMock,
+ ) as mock_get_parameters_from_openapi, patch(
+ "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
+ ) as mock_invoke_app, patch(
+ "asyncio.sleep", new_callable=AsyncMock
+ ) as mock_sleep:
mock_get_parameters_from_openapi.return_value = [
{"name": "param1", "type": "input"},
{"name": "param2", "type": "input"},
@@ -92,16 +90,14 @@ async def test_batch_invoke_retries_and_failure():
function correctly retries the specified number of times and returns an error
result after reaching the maximum retries.
"""
- with (
- patch(
- "src.services.llm_apps_service.get_parameters_from_openapi",
- new_callable=AsyncMock,
- ) as mock_get_parameters_from_openapi,
- patch(
- "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
- ) as mock_invoke_app,
- patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep,
- ):
+ with patch(
+ "src.services.llm_apps_service.get_parameters_from_openapi",
+ new_callable=AsyncMock,
+ ) as mock_get_parameters_from_openapi, patch(
+ "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
+ ) as mock_invoke_app, patch(
+ "asyncio.sleep", new_callable=AsyncMock
+ ) as mock_sleep:
mock_get_parameters_from_openapi.return_value = [
{"name": "param1", "type": "input"},
{"name": "param2", "type": "input"},
@@ -159,16 +155,14 @@ async def test_batch_invoke_generic_exception():
batch_invoke function correctly handles the exception and returns an error
result with the appropriate error message.
"""
- with (
- patch(
- "src.m_apps_service.get_parameters_from_openapi",
- new_callable=AsyncMock,
- ) as mock_get_parameters_from_openapi,
- patch(
- "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
- ) as mock_invoke_app,
- patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep,
- ):
+ with patch(
+ "src.m_apps_service.get_parameters_from_openapi",
+ new_callable=AsyncMock,
+ ) as mock_get_parameters_from_openapi, patch(
+ "src.services.llm_apps_service.invoke_app", new_callable=AsyncMock
+ ) as mock_invoke_app, patch(
+ "asyncio.sleep", new_callable=AsyncMock
+ ) as mock_sleep:
mock_get_parameters_from_openapi.return_value = [
{"name": "param1", "type": "input"},
{"name": "param2", "type": "input"},
diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py
index dc670b8589..90e18ea27f 100644
--- a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py
+++ b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_evaluators_router.py
@@ -183,9 +183,9 @@ async def wait_for_evaluation_to_finish(evaluation_id):
return
await asyncio.sleep(intervals)
- assert False, (
- f"Evaluation status did not become '{EvaluationStatusEnum.EVALUATION_FINISHED}' within the specified polling time"
- )
+ assert (
+ False
+ ), f"Evaluation status did not become '{EvaluationStatusEnum.EVALUATION_FINISHED}' within the specified polling time"
async def create_evaluation_with_evaluator(evaluator_config_name):
diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py
index 004b54e48f..9f9a87672b 100644
--- a/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py
+++ b/api/oss/tests/legacy/old_tests/variants_main_router/test_variant_versioning_deployment.py
@@ -80,6 +80,6 @@ async def test_deploy_to_environment(deploy_to_environment_payload):
)
list_of_response_status_codes.append(response.status_code)
- assert list_of_response_status_codes.count(200) == 3, (
- "The list does not contain 3 occurrences of 200 status code"
- )
+ assert (
+ list_of_response_status_codes.count(200) == 3
+ ), "The list does not contain 3 occurrences of 200 status code"
diff --git a/api/oss/tests/legacy/sdk/apps/tests.py b/api/oss/tests/legacy/sdk/apps/tests.py
index d53a001e99..3d379d5d5f 100644
--- a/api/oss/tests/legacy/sdk/apps/tests.py
+++ b/api/oss/tests/legacy/sdk/apps/tests.py
@@ -44,9 +44,9 @@ async def test_create_app_successfully(self, http_client, setup_class_fixture):
# ASSERT
assert response.app_name == app_name
- assert isinstance(response.model_dump(), dict), (
- "Response data is not a dictionary."
- )
+ assert isinstance(
+ response.model_dump(), dict
+ ), "Response data is not a dictionary."
# CLEANUP
await delete_application(
diff --git a/api/oss/tests/legacy/testsets/tests.py b/api/oss/tests/legacy/testsets/tests.py
index 68931fd204..edf1474f15 100644
--- a/api/oss/tests/legacy/testsets/tests.py
+++ b/api/oss/tests/legacy/testsets/tests.py
@@ -77,9 +77,9 @@ async def test_upload_file_validation_failure(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# @pytest.mark.asyncio
# @pytest.mark.typical
@@ -148,9 +148,9 @@ async def test_get_testset_owner_access(self, http_client):
response = await http_client.get(f"/testsets/{testset['id']}", headers=headers)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert "id" in response.json(), f"Failed for case: {description}"
# Cleanup
@@ -191,9 +191,9 @@ async def test_create_testset_success(self, http_client):
await delete_testset(http_client, response_data["id"], headers)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert "id" in response_data, f"Failed for case: {description}"
@pytest.mark.asyncio
@@ -213,9 +213,9 @@ async def test_create_testset_validation_failure(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -241,9 +241,9 @@ async def test_create_testset_non_member_access(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -260,9 +260,9 @@ async def test_no_element(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 0, f"Failed for case: {description}"
@pytest.mark.asyncio
@@ -282,9 +282,9 @@ async def test_one_element(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 1, f"Failed for case: {description}"
# Cleanup
@@ -308,9 +308,9 @@ async def test_many_elements_small_data(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 3, f"Failed for case: {description}"
# Cleanup
@@ -335,9 +335,9 @@ async def test_many_elements_big_data(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 6, f"Failed for case: {description}"
# Cleanup
@@ -368,9 +368,9 @@ async def test_permissions_principal_not_in_scope(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup
await delete_testset(http_client, testset["id"], owner_headers)
@@ -391,9 +391,9 @@ async def test_permissions_allowed(self, http_client):
response = await http_client.get("/testsets", headers=owner_headers)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -410,9 +410,9 @@ async def test_no_element(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 0, f"Failed for case: {description}"
@pytest.mark.asyncio
@@ -432,9 +432,9 @@ async def test_one_element(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 1, f"Failed for case: {description}"
# Cleanup
@@ -458,9 +458,9 @@ async def test_many_elements_small_data(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 3, f"Failed for case: {description}"
# Cleanup
@@ -485,9 +485,9 @@ async def test_many_elements_big_data(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert len(response_data) == 6, f"Failed for case: {description}"
# Cleanup
@@ -518,9 +518,9 @@ async def test_permissions_principal_not_in_scope(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup
await delete_testset(http_client, testset["id"], owner_headers)
@@ -541,9 +541,9 @@ async def test_permissions_allowed(self, http_client):
response = await http_client.get("/testsets", headers=owner_headers)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -565,9 +565,9 @@ async def test_update_success(self, http_client):
response_data = response.json()
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
assert response_data["_id"] == testset["id"], f"Failed for case: {description}"
# Cleanup
@@ -592,9 +592,9 @@ async def test_update_validation_failure(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup
await delete_testset(http_client, testset["id"], headers)
@@ -622,9 +622,9 @@ async def test_update_non_member_access(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
# Cleanup
await delete_testset(http_client, testset["id"], member_headers)
@@ -650,9 +650,9 @@ async def test_delete_success(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -674,9 +674,9 @@ async def test_delete_validation_failure(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -698,6 +698,6 @@ async def test_delete_non_existent(self, http_client):
)
# Assert
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py b/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py
index 0de50397f6..245b5affc1 100644
--- a/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py
+++ b/api/oss/tests/legacy/vault_router/test_vault_secrets_apis.py
@@ -41,9 +41,9 @@ async def test_create_secret_with_viewer_role(
json=valid_secret_payload,
)
- assert create_response.status_code == 403, (
- "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role."
- )
+ assert (
+ create_response.status_code == 403
+ ), "Secret creation cannot be successful. Given that apikey belongs to a user with 'viewer' role."
created_secret_message = create_response.json()["detail"]
assert (
@@ -84,9 +84,9 @@ async def test_create_secret_with_invalid_secret_kind(self, async_client):
"secrets",
json=invalid_payload,
)
- assert response.status_code == 422, (
- "Should reject payload with invalid secret kind"
- )
+ assert (
+ response.status_code == 422
+ ), "Should reject payload with invalid secret kind"
@pytest.mark.asyncio
@pytest.mark.secret_creation
@@ -104,9 +104,9 @@ async def test_create_secret_with_invalid_provider_kind(self, async_client):
"secrets",
json=invalid_payload,
)
- assert response.status_code == 422, (
- "Should reject payload with invalid secret provider kind"
- )
+ assert (
+ response.status_code == 422
+ ), "Should reject payload with invalid secret provider kind"
@pytest.mark.asyncio
@pytest.mark.secret_retrieval
@@ -204,9 +204,9 @@ async def test_update_secret_with_viewer_role(
json=update_payload,
)
- assert update_response.status_code == 403, (
- "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role."
- )
+ assert (
+ update_response.status_code == 403
+ ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role."
update_response_message = update_response.json()["detail"]
assert (
@@ -233,9 +233,9 @@ async def test_delete_secret(self, async_client, valid_secret_payload):
get_response = await async_client.get(
f"secrets/{secret_id}",
)
- assert get_response.status_code == 404, (
- "Deleted secret should not be retrievable"
- )
+ assert (
+ get_response.status_code == 404
+ ), "Deleted secret should not be retrievable"
@pytest.mark.asyncio
@pytest.mark.secret_deletion
@@ -254,9 +254,9 @@ async def test_delete_secret_with_viewer_role(
f"secrets/{secret_id}",
headers={"Authorization": f"ApiKey {os.environ.get('VIEWER_API_KEY', '')}"},
)
- assert delete_response.status_code == 403, (
- "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role."
- )
+ assert (
+ delete_response.status_code == 403
+ ), "Secret update cannot be successful. Given that apikey belongs to a user with 'viewer' role."
delete_response_message = delete_response.json()["detail"]
assert (
@@ -272,6 +272,6 @@ async def test_delete_nonexistent_secret(self, async_client):
response = await async_client.delete(
f"secrets/{non_existent_id}",
)
- assert response.status_code == 204, (
- "Should always return 204 since the endpoint is idempotent"
- )
+ assert (
+ response.status_code == 204
+ ), "Should always return 204 since the endpoint is idempotent"
diff --git a/api/oss/tests/legacy/workflows/admin/tests.py b/api/oss/tests/legacy/workflows/admin/tests.py
index 3b695fe6d5..efa0a31025 100644
--- a/api/oss/tests/legacy/workflows/admin/tests.py
+++ b/api/oss/tests/legacy/workflows/admin/tests.py
@@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client):
response = await http_client.get("admin/accounts", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/legacy/workflows/auth/tests.py b/api/oss/tests/legacy/workflows/auth/tests.py
index 5dbfeb795e..32dee3c697 100644
--- a/api/oss/tests/legacy/workflows/auth/tests.py
+++ b/api/oss/tests/legacy/workflows/auth/tests.py
@@ -17,9 +17,9 @@ async def test_api_authentication_missing_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -37,9 +37,9 @@ async def test_api_authentication_unsupported_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
@pytest.mark.asyncio
@pytest.mark.typical
@@ -57,6 +57,6 @@ async def test_api_authentication_invalid_token(self, http_client):
response = await http_client.get("apps", headers=headers)
# ASSERT: verify response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/legacy/workflows/observability/tests.py b/api/oss/tests/legacy/workflows/observability/tests.py
index d0bc109555..8b6bf6b47a 100644
--- a/api/oss/tests/legacy/workflows/observability/tests.py
+++ b/api/oss/tests/legacy/workflows/observability/tests.py
@@ -70,6 +70,6 @@ async def test_completion_generate_observability_tree(
)
is_match = exact_match(workflow_nodes, observability_nodes)
- assert is_match is True, (
- "Workflow nodes does not match nodes from observability"
- )
+ assert (
+ is_match is True
+ ), "Workflow nodes does not match nodes from observability"
diff --git a/api/oss/tests/legacy/workflows/permissions/tests.py b/api/oss/tests/legacy/workflows/permissions/tests.py
index 541b8e67a5..50a98eef6e 100644
--- a/api/oss/tests/legacy/workflows/permissions/tests.py
+++ b/api/oss/tests/legacy/workflows/permissions/tests.py
@@ -67,9 +67,9 @@ async def test_permissions_principal_not_in_scope(
response_data = response.json()
# Assert: Verify the response
- assert response.status_code == expected_status, (
- f"Failed for case: {description}"
- )
- assert response.json().get("detail") == "Service execution not allowed.", (
- f"Failed for case: {description}"
- )
+ assert (
+ response.status_code == expected_status
+ ), f"Failed for case: {description}"
+ assert (
+ response.json().get("detail") == "Service execution not allowed."
+ ), f"Failed for case: {description}"
diff --git a/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py b/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py
index 751011e23c..3106a372b8 100644
--- a/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py
+++ b/api/oss/tests/manual/tracing/ingestion/agenta_streaming_response.py
@@ -9,7 +9,6 @@
2. Batch size configuration issues with OpenTelemetry environment variables
"""
-
from dotenv import load_dotenv
import asyncio
diff --git a/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py b/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py
index 53d193eda2..d5a828df80 100644
--- a/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py
+++ b/api/oss/tests/manual/tracing/ingestion/openinference_dspy.py
@@ -72,10 +72,9 @@ def forward(self, topic: str):
outline = self.build_outline(topic=topic)
sections = []
for heading, subheadings in outline.section_subheadings.items():
- section, subheadings = (
- f"## {heading}",
- [f"### {subheading}" for subheading in subheadings],
- )
+ section, subheadings = f"## {heading}", [
+ f"### {subheading}" for subheading in subheadings
+ ]
section = self.draft_section(
topic=outline.title,
section_heading=section,
diff --git a/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py b/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py
index 2f4121c8e3..abf22c9dc7 100644
--- a/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py
+++ b/api/oss/tests/pytest/workflows/test_workflow_revisions_queries.py
@@ -221,7 +221,7 @@ def test_query_paginated_workflow_revisions(
# ACT ------------------------------------------------------------------
response = authed_api(
"GET",
- "/preview/workflows/revisions/?include_archived=true&limit=1",
+ "/preview/workflows/revisions/?include_archived=true" "&limit=1",
)
# ----------------------------------------------------------------------
diff --git a/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py b/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py
index b294b45116..19eae82dbd 100644
--- a/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py
+++ b/api/oss/tests/pytest/workflows/test_workflow_variants_queries.py
@@ -185,7 +185,7 @@ def test_query_paginated_workflow_variants(
# ACT ------------------------------------------------------------------
response = authed_api(
"GET",
- "/preview/workflows/variants/?include_archived=true&limit=1",
+ "/preview/workflows/variants/?include_archived=true" "&limit=1",
)
# ----------------------------------------------------------------------
diff --git a/api/oss/tests/pytest/workflows/test_workflows_queries.py b/api/oss/tests/pytest/workflows/test_workflows_queries.py
index 91de434e0b..57163be02b 100644
--- a/api/oss/tests/pytest/workflows/test_workflows_queries.py
+++ b/api/oss/tests/pytest/workflows/test_workflows_queries.py
@@ -144,7 +144,7 @@ def test_query_paginated_workflows(
# ACT ------------------------------------------------------------------
response = authed_api(
"GET",
- "/preview/workflows/?include_archived=true&limit=1",
+ "/preview/workflows/?include_archived=true" "&limit=1",
)
# ----------------------------------------------------------------------
diff --git a/api/oss/tests/pytest/workflows/test_workflows_retrieve.py b/api/oss/tests/pytest/workflows/test_workflows_retrieve.py
index fa6df8ea4b..8d824dc30f 100644
--- a/api/oss/tests/pytest/workflows/test_workflows_retrieve.py
+++ b/api/oss/tests/pytest/workflows/test_workflows_retrieve.py
@@ -201,7 +201,8 @@ def test_retrieve_by_revision_id(self, authed_api, mock_data):
response = authed_api(
"GET",
- f"/preview/workflows/revisions/retrieve?workflow_revision_id={revision_id}",
+ f"/preview/workflows/revisions/retrieve"
+ f"?workflow_revision_id={revision_id}",
)
assert response.status_code == 200
@@ -288,7 +289,8 @@ def test_retrieve_by_variant_id(self, authed_api, mock_data):
response = authed_api(
"GET",
- f"/preview/workflows/revisions/retrieve?workflow_variant_id={variant_id}",
+ f"/preview/workflows/revisions/retrieve"
+ f"?workflow_variant_id={variant_id}",
)
assert response.status_code == 200
diff --git a/api/pyproject.toml b/api/pyproject.toml
index e8c38e1e19..0b922bbfb8 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "api"
-version = "0.62.1"
+version = "0.62.0"
description = "Agenta API"
authors = [
{ name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" },
diff --git a/docs/blog/entries/customize-llm-as-a-judge-output-schemas.mdx b/docs/blog/entries/customize-llm-as-a-judge-output-schemas.mdx
deleted file mode 100644
index 033e29371b..0000000000
--- a/docs/blog/entries/customize-llm-as-a-judge-output-schemas.mdx
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: "Customize LLM-as-a-Judge Output Schemas"
-slug: customize-llm-as-a-judge-output-schemas
-date: 2025-11-10
-tags: [v0.62.0]
-description: "Learn how to customize LLM-as-a-Judge evaluator output schemas with binary, multiclass, or custom JSON formats. Enable reasoning for better evaluation quality and structure feedback to match your workflow needs."
----
-
-import Image from "@theme/IdealImage";
-
-The LLM-as-a-Judge evaluator now supports custom output schemas. You can define exactly what feedback structure you need for your evaluations.
-
-
-
-
-
-
-
-## What's New
-
-### **Flexible Output Types**
-Configure the evaluator to return different types of outputs:
-- **Binary**: Return a simple yes/no or pass/fail score
-- **Multiclass**: Choose from multiple predefined categories
-- **Custom JSON**: Define any structure that fits your use case
-
-### **Include Reasoning for Better Quality**
-Enable the reasoning option to have the LLM explain its evaluation. This improves prediction quality because the model thinks through its assessment before providing a score.
-
-When you include reasoning, the evaluator returns both the score and a detailed explanation of how it arrived at that judgment.
-
-### **Advanced: Raw JSON Schema**
-For complete control, provide a raw JSON schema. The evaluator will return responses that match your exact structure.
-
-This lets you capture multiple scores, categorical labels, confidence levels, and custom fields in a single evaluation pass. You can structure the output however your workflow requires.
-
-### **Use Custom Schemas in Evaluation**
-Once configured, your custom schemas work seamlessly in the evaluation workflow. The results display in the evaluation dashboard with all your custom fields visible.
-
-This makes it easy to analyze multiple dimensions of quality in a single evaluation run.
-
-## Example Use Cases
-
-**Binary Score with Reasoning:**
-Return a simple correct/incorrect judgment along with an explanation of why the output succeeded or failed.
-
-**Multi-dimensional Feedback:**
-Capture separate scores for accuracy, relevance, completeness, and tone in one evaluation. Include reasoning for each dimension.
-
-**Structured Classification:**
-Return categorical labels (excellent/good/fair/poor) along with specific issues found and suggestions for improvement.
-
-## Getting Started
-
-To use custom output schemas with LLM-as-a-Judge:
-
-1. Open the evaluator configuration
-2. Select your desired output type (binary, multiclass, or custom)
-3. Enable reasoning if you want explanations
-4. For advanced use, provide your JSON schema
-5. Run your evaluation
-
-Learn more in the [LLM-as-a-Judge documentation](/evaluation/configure-evaluators/llm-as-a-judge).
diff --git a/docs/blog/main.mdx b/docs/blog/main.mdx
index 66a0256cb0..e55eed8a9c 100644
--- a/docs/blog/main.mdx
+++ b/docs/blog/main.mdx
@@ -10,33 +10,6 @@ import Image from "@theme/IdealImage";
-### [Customize LLM-as-a-Judge Output Schemas](/changelog/customize-llm-as-a-judge-output-schemas)
-
-_10 November 2025_
-
-**v0.62.0**
-
-
+
+ Welcome to Agenta Cloud! Our platform helps you build, deploy, and manage AI
+ applications with ease. Take this quick 2-minute tour to discover how to create your
+ first AI app, set up deployments, and monitor performanceβall in one place. Or, feel
+ free to skip and explore at your own pace.
+
+