diff --git a/.all-contributorsrc b/.all-contributorsrc index 0bbf050227..df1ad81797 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -512,6 +512,15 @@ "doc", "example" ] + }, + { + "login": "adityadewan22-hub", + "name": "adityadewan22-hub", + "avatar_url": "https://avatars.githubusercontent.com/u/225586510?v=4", + "profile": "https://github.com/adityadewan22-hub", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/AGENTS.md b/AGENTS.md index e76a9c540d..63dd92b381 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -377,6 +377,119 @@ export const createItemAtom = atom( --- +### Entity Controller Pattern + +For entities requiring CRUD operations with draft state, loading indicators, and cache management, use the **Entity Controller Pattern**. This provides a unified API that abstracts multiple atoms into a single cohesive interface. + +**Full documentation:** `web/oss/src/state/entities/shared/README.md` + +**Quick Decision - Which API to Use:** + +| Need | API | Returns | +|------|-----|---------| +| Full state + actions | `entity.controller(id)` | `[state, dispatch]` | +| Data only | `entity.selectors.data(id)` | `T \| null` | +| Loading/error | `entity.selectors.query(id)` | `QueryState` | +| Dirty indicator | `entity.selectors.isDirty(id)` | `boolean` | +| Single cell (tables) | `entity.selectors.cell({id, col})` | `unknown` | +| Dispatch in atoms | `entity.actions.update/discard` | Write atom | + +**Basic Usage:** + +```typescript +import {testcase} from "@/oss/state/entities/testcase" + +// Full controller - state + dispatch +function TestcaseEditor({testcaseId}: {testcaseId: string}) { + const [state, dispatch] = useAtom(testcase.controller(testcaseId)) + + if (state.isPending) return + if (!state.data) return + + return ( + dispatch({ + type: "update", + changes: {input: e.target.value} + })} + /> + ) +} + +// Fine-grained selector - only re-renders on data change +function TestcaseDisplay({testcaseId}: {testcaseId: string}) { + const data = useAtomValue(testcase.selectors.data(testcaseId)) + if (!data) return null + return
{data.input}
+} +``` + +**Reading Multiple Entities:** + +```typescript +// Create a derived atom that subscribes to all selected entities +const useMultipleTestcases = (ids: string[]) => { + const dataAtom = useMemo( + () => atom((get) => ids.map(id => get(testcase.selectors.data(id))).filter(Boolean)), + [ids.join(",")] + ) + return useAtomValue(dataAtom) +} +``` + +**Anti-Patterns to Avoid:** + +```typescript +// BAD - No reactivity, snapshot read +const globalStore = getDefaultStore() +const data = globalStore.get(testcase.selectors.data(id)) + +// GOOD - Proper subscription +const data = useAtomValue(testcase.selectors.data(id)) +``` + +```typescript +// BAD - Variable shadowing +import {testcase} from "@/oss/state/entities/testcase" +const {testcase, ...rest} = entity // Shadows import! + +// GOOD - Rename destructured variable +const {testcase: testcaseField, ...rest} = entity +``` + +**Available Controllers:** + +| Entity | Import | Description | +|--------|--------|-------------| +| Testcase | `testcase` from `@/oss/state/entities/testcase` | Testcase with cell subscriptions + drill-in | +| Trace Span | `traceSpan` from `@/oss/state/entities/trace` | Trace span with attribute drill-in | +| Revision | `revision` from `@/oss/state/entities/testset` | Revision with column management | +| Testset | `testset` from `@/oss/state/entities/testset` | Testset with list/detail queries | + +**Architecture:** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Controller │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Query │ │ Draft │ │ isDirty │ │ +│ │ (server) │→ │ (local) │→ │ (derived) │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ ↓ ↓ │ +│ ┌─────────────────────────────────────────────────────────────┐│ +│ │ Entity Atom (merged) ││ +│ └─────────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────────┘ +``` + +- **Query atoms** are the single source of truth for server data +- **Draft atoms** store local changes only +- **Entity atoms** merge: `query.data + draft → merged entity` +- **Dirty detection** compares draft to server data + +--- + **Legacy: SWR Pattern (avoid for new code)** We previously used SWR with Axios for data fetching. This pattern is still present in older code but should not be used for new features. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000..f6097ac718 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,7 @@ +# Instructions for Claude + +Please read and follow all instructions in: + +@AGENTS.md + +Project conventions, guidelines, and best practices are documented there. diff --git a/README.md b/README.md index d87e7616aa..26322bb662 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ We welcome contributions of all kinds — from filing issues and sharing ideas t ## Contributors ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-54-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-55-orange.svg?style=flat-square)](#contributors-) Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): @@ -270,6 +270,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Vahant Sharma
Vahant Sharma

📖 Muhammad Muzammil
Muhammad Muzammil

💻 Sirous Namjoo
Sirous Namjoo

📖 💡 + adityadewan22-hub
adityadewan22-hub

💻 diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py index ac96ece37d..3734374db4 100644 --- a/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py +++ b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py @@ -38,8 +38,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, @@ -81,9 +81,10 @@ async def _fetch_project_owner( WorkspaceMemberDBE.role == "owner", ProjectDBE.id == project_id, ) + .order_by(WorkspaceMemberDBE.created_at.asc()) ) result = await connection.execute(workspace_owner_query) - owner = result.scalar_one_or_none() + owner = result.scalars().first() return owner diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py index 90dca62adf..a3fbfa164c 100644 --- a/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py +++ b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py @@ -69,9 +69,10 @@ async def _fetch_project_owner( WorkspaceMemberDBE.role == "owner", ProjectDBE.id == project_id, ) + .order_by(WorkspaceMemberDBE.created_at.asc()) ) result = await connection.execute(workspace_owner_query) - owner = result.scalar_one_or_none() + owner = result.scalars().first() return owner diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py index cca11d88e0..22c34367a4 100644 --- a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py +++ b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py @@ -28,7 +28,7 @@ def get_or_create_workspace_default_project( if project is None: statement = insert(ProjectDB).values( - project_name="Default Project", + project_name="Default", is_default=True, workspace_id=workspace.id, organization_id=workspace.organization_id, diff --git a/api/ee/databases/postgres/migrations/core/utils.py b/api/ee/databases/postgres/migrations/core/utils.py index 58e4b75fb8..11d92b2114 100644 --- a/api/ee/databases/postgres/migrations/core/utils.py +++ b/api/ee/databases/postgres/migrations/core/utils.py @@ -118,7 +118,9 @@ async def get_pending_migration_head(): def run_alembic_migration(): """ - Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users. + Applies migration for first-time users and also checks the environment variable + "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether + to apply migrations for returning users. """ try: diff --git a/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py b/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py new file mode 100644 index 0000000000..a22123f1e7 --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py @@ -0,0 +1,51 @@ +"""add slug to organizations + +Revision ID: 12d23a8f7dde +Revises: 59b85eb7516c +Create Date: 2025-12-25 00:00:00.000000+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision: str = "12d23a8f7dde" +down_revision: Union[str, None] = "59b85eb7516c" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add slug column to organizations table + op.add_column( + "organizations", + sa.Column( + "slug", + sa.String(), + nullable=True, + ), + ) + + # Add unique constraint on slug + op.create_unique_constraint( + "uq_organizations_slug", + "organizations", + ["slug"], + ) + + # Add index for faster lookups + op.create_index( + "ix_organizations_slug", + "organizations", + ["slug"], + ) + + +def downgrade() -> None: + # Drop in reverse order + op.drop_index("ix_organizations_slug", table_name="organizations") + op.drop_constraint("uq_organizations_slug", "organizations", type_="unique") + op.drop_column("organizations", "slug") diff --git a/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py b/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py new file mode 100644 index 0000000000..5a9743f1d3 --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py @@ -0,0 +1,361 @@ +"""add sso oidc tables + +Revision ID: 59b85eb7516c +Revises: 80910d2fa9a4 +Create Date: 2025-12-10 08:53:56.000000+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = "59b85eb7516c" +down_revision: Union[str, None] = "80910d2fa9a4" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # 1. user_identities table + op.create_table( + "user_identities", + sa.Column( + "id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "user_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "method", + sa.String(), + nullable=False, + ), + sa.Column( + "subject", + sa.String(), + nullable=False, + ), + sa.Column( + "domain", + sa.String(), + nullable=True, + ), + sa.Column( + "created_at", + sa.TIMESTAMP(timezone=True), + server_default=sa.text("CURRENT_TIMESTAMP"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "deleted_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "created_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "updated_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "deleted_by_id", + sa.UUID(), + nullable=True, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "method", + "subject", + name="uq_user_identities_method_subject", + ), + sa.Index( + "ix_user_identities_user_method", + "user_id", + "method", + ), + sa.Index( + "ix_user_identities_domain", + "domain", + ), + ) + + # 2. organization_domains table + op.create_table( + "organization_domains", + sa.Column( + "id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "organization_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "slug", + sa.String(), + nullable=False, + ), + sa.Column( + "name", + sa.String(), + nullable=True, + ), + sa.Column( + "description", + sa.String(), + nullable=True, + ), + sa.Column( + "token", + sa.String(), + nullable=True, + ), + sa.Column( + "flags", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "tags", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "meta", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "created_at", + sa.TIMESTAMP(timezone=True), + server_default=sa.text("CURRENT_TIMESTAMP"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "deleted_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "created_by_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "updated_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "deleted_by_id", + sa.UUID(), + nullable=True, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + ondelete="CASCADE", + ), + sa.Index( + "ix_organization_domains_org", + "organization_id", + ), + sa.Index( + "ix_organization_domains_flags", + "flags", + postgresql_using="gin", + ), + ) + op.create_index( + "uq_organization_domains_slug_verified", + "organization_domains", + ["slug"], + unique=True, + postgresql_where=sa.text("(flags->>'is_verified') = 'true'"), + ) + + # 3. organization_providers table + op.create_table( + "organization_providers", + sa.Column( + "id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "organization_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "slug", + sa.String(), + nullable=False, + ), + sa.Column( + "name", + sa.String(), + nullable=True, + ), + sa.Column( + "description", + sa.String(), + nullable=True, + ), + sa.Column( + "secret_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "flags", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "tags", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "meta", + postgresql.JSONB(none_as_null=True), + nullable=True, + ), + sa.Column( + "created_at", + sa.TIMESTAMP(timezone=True), + server_default=sa.text("CURRENT_TIMESTAMP"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "deleted_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "created_by_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "updated_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "deleted_by_id", + sa.UUID(), + nullable=True, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint( + ["secret_id"], + ["secrets.id"], + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "organization_id", + "slug", + name="uq_organization_providers_org_slug", + ), + sa.Index( + "ix_organization_providers_org", + "organization_id", + ), + sa.Index( + "ix_organization_providers_flags", + "flags", + postgresql_using="gin", + ), + ) + + # 4. Add is_active to users table + op.add_column( + "users", + sa.Column( + "is_active", + sa.Boolean(), + nullable=False, + server_default="true", + ), + ) + + +def downgrade() -> None: + # Drop in reverse order + op.drop_column("users", "is_active") + + op.drop_index( + "ix_organization_providers_flags", + table_name="organization_providers", + ) + op.drop_index( + "ix_organization_providers_org", + table_name="organization_providers", + ) + op.drop_table("organization_providers") + + op.drop_index( + "uq_organization_domains_slug_verified", + table_name="organization_domains", + ) + op.drop_index( + "ix_organization_domains_flags", + table_name="organization_domains", + ) + op.drop_index( + "ix_organization_domains_org", + table_name="organization_domains", + ) + op.drop_table("organization_domains") + + op.drop_index( + "ix_user_identities_domain", + table_name="user_identities", + ) + op.drop_index( + "ix_user_identities_user_method", + table_name="user_identities", + ) + op.drop_table("user_identities") diff --git a/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py index 1edfdda8cc..57582fe92b 100644 --- a/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py +++ b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py @@ -14,6 +14,7 @@ from alembic import context from sqlalchemy import Connection, func, insert, select, update +from sqlalchemy.orm import load_only import stripe @@ -21,10 +22,10 @@ from oss.src.utils.env import env from oss.src.models.db_models import UserDB from oss.src.models.db_models import AppDB -from oss.src.models.db_models import OrganizationDB from ee.src.models.db_models import OrganizationMemberDB from oss.src.models.db_models import ProjectDB from ee.src.models.db_models import ProjectMemberDB +from ee.src.models.extended.deprecated_models import DeprecatedOrganizationDB from ee.src.dbs.postgres.subscriptions.dbes import SubscriptionDBE from ee.src.dbs.postgres.meters.dbes import MeterDBE from ee.src.core.subscriptions.types import FREE_PLAN @@ -48,7 +49,7 @@ def upgrade() -> None: now = datetime.now(timezone.utc) # --> GET ORGANIZATION COUNT - query = select(func.count()).select_from(OrganizationDB) + query = select(func.count()).select_from(DeprecatedOrganizationDB) nof_organizations = session.execute(query).scalar() # <-- GET ORGANIZATION COUNT @@ -60,7 +61,12 @@ def upgrade() -> None: while True: # --> GET ORGANIZATION BATCH query = ( - select(OrganizationDB) + select(DeprecatedOrganizationDB) + .options( + load_only( + DeprecatedOrganizationDB.id, DeprecatedOrganizationDB.owner + ) + ) .limit(organization_batch_size) .offset(organization_batch_index * organization_batch_size) ) diff --git a/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py b/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py new file mode 100644 index 0000000000..727e3b362e --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py @@ -0,0 +1,620 @@ +"""clean up organizations + +Revision ID: a9f3e8b7c5d1 +Revises: 12d23a8f7dde +Create Date: 2025-12-26 00:00:00.000000 + +""" + +from typing import Sequence, Union +from alembic import op +import sqlalchemy as sa +from sqlalchemy import text +from oss.src.utils.env import env +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = "a9f3e8b7c5d1" +down_revision: Union[str, None] = "12d23a8f7dde" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """ + Clean up organizations table and introduce new schema. + + Changes: + - Add flags (JSONB, nullable) with is_personal and is_demo fields + - Migrate type='view-only' to flags.is_demo=true + - Drop type column + - Convert owner (String) to owner_id (UUID, NOT NULL) + - Add created_by_id (UUID, NOT NULL) + - Ensure created_at is NOT NULL, remove default from updated_at + - Add updated_by_id (UUID, nullable) + - Add deleted_at (DateTime, nullable) + - Add deleted_by_id (UUID, nullable) + - Add role field to organization_members (String, default="member") + - Populate role='owner' for organization owners + - Add LegacyLifecycle fields to organization_members (created_at, updated_at, updated_by_id - all nullable) + - Add updated_by_id to workspace_members (nullable) + - Add updated_by_id to project_members (nullable) + - Drop user_organizations table (replaced by organization_members) + - Drop invitations table (obsolete) + + EE Mode: + - Organizations with >1 member → is_personal=false + - Organizations with =1 member and user owns it → is_personal=true + - Create missing personal orgs for users without one + - Normalize names: personal orgs → "Personal", slug → NULL + """ + conn = op.get_bind() + + # Step 1: Add JSONB columns (flags, tags, meta - all nullable) + op.add_column( + "organizations", + sa.Column( + "flags", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + op.add_column( + "organizations", + sa.Column( + "tags", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + op.add_column( + "organizations", + sa.Column( + "meta", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + + # Step 2: Add new UUID columns (all nullable initially for migration) + op.add_column( + "organizations", + sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("created_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("updated_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("deleted_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + + # Step 3: Get member counts for all organizations + conn.execute( + text(""" + CREATE TEMP TABLE org_member_counts AS + SELECT + o.id as org_id, + COUNT(om.id) as member_count, + o.owner as owner_str + FROM organizations o + LEFT JOIN organization_members om ON om.organization_id = o.id + GROUP BY o.id, o.owner + """) + ) + + # Step 4: Migrate type='view-only' to is_demo=true for all orgs + # and mark multi-member orgs as is_personal=false + conn.execute( + text(""" + UPDATE organizations o + SET flags = jsonb_build_object( + 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END, + 'is_personal', false + ) + FROM org_member_counts omc + WHERE o.id = omc.org_id + AND omc.member_count > 1 + """) + ) + + # Step 5: Mark single-member orgs owned by that member as personal + # NOTE: owner is String type, needs casting for comparison + conn.execute( + text(""" + UPDATE organizations o + SET flags = jsonb_build_object( + 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END, + 'is_personal', true + ) + FROM org_member_counts omc + WHERE o.id = omc.org_id + AND omc.member_count = 1 + AND EXISTS ( + SELECT 1 FROM organization_members om + WHERE om.organization_id = o.id + AND om.user_id::text = o.owner + ) + """) + ) + + # Step 6: Mark remaining single-member orgs as collaborative (is_personal=false) + conn.execute( + text(""" + UPDATE organizations o + SET flags = jsonb_build_object( + 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END, + 'is_personal', false + ) + FROM org_member_counts omc + WHERE o.id = omc.org_id + AND omc.member_count = 1 + AND (o.flags IS NULL OR o.flags = '{}'::jsonb) + """) + ) + + # Step 7: Migrate owner (String) to owner_id (UUID) + # Set owner_id = owner::uuid for existing orgs + conn.execute( + text(""" + UPDATE organizations + SET owner_id = owner::uuid + WHERE owner IS NOT NULL + """) + ) + + # Step 8: Set created_by_id = owner_id for existing orgs + conn.execute( + text(""" + UPDATE organizations + SET created_by_id = owner_id + WHERE owner_id IS NOT NULL + """) + ) + + # Step 9: Set updated_by_id = owner_id for existing orgs + conn.execute( + text(""" + UPDATE organizations + SET updated_by_id = owner_id + WHERE owner_id IS NOT NULL + """) + ) + + # Step 10: Create missing personal organizations for users without one + conn.execute( + text(""" + INSERT INTO organizations ( + id, + name, + slug, + description, + owner, + owner_id, + created_at, + created_by_id, + updated_at, + updated_by_id, + flags + ) + SELECT + gen_random_uuid(), + 'Personal', + NULL, + NULL, + u.id::text, + u.id, + NOW(), + u.id, + NOW(), + u.id, + '{"is_demo": false, "is_personal": true}'::jsonb + FROM users u + WHERE NOT EXISTS ( + SELECT 1 FROM organizations o + WHERE o.owner_id = u.id + AND o.flags->>'is_personal' = 'true' + ) + """) + ) + + # Step 10b: Add role column to organization_members + op.add_column( + "organization_members", + sa.Column( + "role", + sa.String(), + nullable=False, + server_default="member", + ), + ) + + # Step 10c: Set role='owner' for organization owners based on owner_id + conn.execute( + text(""" + UPDATE organization_members om + SET role = 'owner' + FROM organizations o + WHERE om.organization_id = o.id + AND om.user_id = o.owner_id + """) + ) + + # Step 10d: Add LegacyLifecycle fields to organization_members + op.add_column( + "organization_members", + sa.Column("created_at", sa.TIMESTAMP(timezone=True), nullable=True), + ) + op.add_column( + "organization_members", + sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True), + ) + op.add_column( + "organization_members", + sa.Column("updated_by_id", sa.UUID(), nullable=True), + ) + + # Step 10e: Add updated_by_id to workspace_members + op.add_column( + "workspace_members", + sa.Column("updated_by_id", sa.UUID(), nullable=True), + ) + + # Step 10f: Add updated_by_id to project_members + op.add_column( + "project_members", + sa.Column("updated_by_id", sa.UUID(), nullable=True), + ) + + # Step 11: Add users as members to their new personal orgs + conn.execute( + text(""" + INSERT INTO organization_members (id, user_id, organization_id, role) + SELECT + gen_random_uuid(), + o.owner_id, + o.id, + 'owner' + FROM organizations o + WHERE o.flags->>'is_personal' = 'true' + AND NOT EXISTS ( + SELECT 1 FROM organization_members om + WHERE om.organization_id = o.id + AND om.user_id = o.owner_id + ) + """) + ) + + # Step 12: Normalize personal organizations + conn.execute( + text(""" + UPDATE organizations + SET + name = 'Personal', + slug = NULL + WHERE flags->>'is_personal' = 'true' + """) + ) + + # Step 13: Ensure any remaining orgs have flags set + conn.execute( + text(""" + UPDATE organizations + SET flags = jsonb_build_object( + 'is_demo', CASE WHEN type = 'view-only' THEN true ELSE false END, + 'is_personal', false + ) + WHERE flags IS NULL OR flags = '{}'::jsonb + """) + ) + + # Step 13b: Ensure all organizations have complete flag defaults + # This ensures all auth and access control flags are set with defaults + allow_email_default = "true" if env.auth.email_enabled else "false" + allow_social_default = "true" if env.auth.oidc_enabled else "false" + allow_sso_default = "false" + allow_root_default = "false" + + conn.execute( + text(f""" + UPDATE organizations + SET flags = flags || + jsonb_build_object( + 'allow_email', COALESCE((flags->>'allow_email')::boolean, {allow_email_default}), + 'allow_social', COALESCE((flags->>'allow_social')::boolean, {allow_social_default}), + 'allow_sso', COALESCE((flags->>'allow_sso')::boolean, {allow_sso_default}), + 'allow_root', COALESCE((flags->>'allow_root')::boolean, {allow_root_default}), + 'domains_only', COALESCE((flags->>'domains_only')::boolean, false), + 'auto_join', COALESCE((flags->>'auto_join')::boolean, false) + ) + WHERE flags IS NOT NULL + """) + ) + + # Step 13c: Add unique constraint: one personal org per owner + op.create_index( + "uq_organizations_owner_personal", + "organizations", + ["owner_id"], + unique=True, + postgresql_where=sa.text("(flags->>'is_personal') = 'true'"), + ) + + # Clean up temp table + conn.execute(text("DROP TABLE IF EXISTS org_member_counts")) + + # Step 14: Ensure created_at has a value for all existing records + conn.execute( + text(""" + UPDATE organizations + SET created_at = COALESCE(created_at, NOW()) + WHERE created_at IS NULL + """) + ) + + # Step 15: Make owner_id, created_by_id, and created_at NOT NULL; remove updated_at default + op.alter_column("organizations", "owner_id", nullable=False) + op.alter_column("organizations", "created_by_id", nullable=False) + op.alter_column("organizations", "created_at", nullable=False) + op.alter_column("organizations", "updated_at", server_default=None) + + # Step 16: Add foreign key constraints + op.create_foreign_key( + "fk_organizations_owner_id_users", + "organizations", + "users", + ["owner_id"], + ["id"], + ondelete="RESTRICT", + ) + op.create_foreign_key( + "fk_organizations_created_by_id_users", + "organizations", + "users", + ["created_by_id"], + ["id"], + ondelete="RESTRICT", + ) + op.create_foreign_key( + "fk_organizations_updated_by_id_users", + "organizations", + "users", + ["updated_by_id"], + ["id"], + ondelete="SET NULL", + ) + op.create_foreign_key( + "fk_organizations_deleted_by_id_users", + "organizations", + "users", + ["deleted_by_id"], + ["id"], + ondelete="SET NULL", + ) + + # Step 16b: Ensure organization_members cascade on organization delete + op.drop_constraint( + "organization_members_organization_id_fkey", + "organization_members", + type_="foreignkey", + ) + op.create_foreign_key( + "organization_members_organization_id_fkey", + "organization_members", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + # Step 16c: Ensure workspaces cascade on organization delete + try: + op.drop_constraint( + "workspaces_organization_id_fkey", + "workspaces", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "workspaces_organization_id_fkey", + "workspaces", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + # Step 16c2: Ensure workspace_members cascade on workspace delete + try: + op.drop_constraint( + "workspace_members_workspace_id_fkey", + "workspace_members", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "workspace_members_workspace_id_fkey", + "workspace_members", + "workspaces", + ["workspace_id"], + ["id"], + ondelete="CASCADE", + ) + + # Step 16d: Ensure projects cascade on organization delete + try: + op.drop_constraint( + "projects_organization_id_fkey", + "projects", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "projects_organization_id_fkey", + "projects", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + # Note: Other tables (testsets, evaluations, scenarios, etc.) are linked to + # organizations via projects, so they will cascade delete through projects. + # They should keep SET NULL on organization_id for direct references. + + # Step 17: Drop type and owner columns + op.drop_column("organizations", "type") + op.drop_column("organizations", "owner") + + # Step 18: Drop obsolete tables + conn.execute(text("DROP TABLE IF EXISTS user_organizations CASCADE")) + conn.execute(text("DROP TABLE IF EXISTS invitations CASCADE")) + + +def downgrade() -> None: + """Restore organizations type and owner columns and revert schema changes.""" + conn = op.get_bind() + + # Drop foreign key constraints + op.drop_constraint( + "fk_organizations_deleted_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_updated_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_created_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_owner_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "organization_members_organization_id_fkey", + "organization_members", + type_="foreignkey", + ) + + # Recreate type column + op.add_column("organizations", sa.Column("type", sa.String(), nullable=True)) + + # Migrate flags back to type + conn.execute( + text(""" + UPDATE organizations + SET type = CASE + WHEN flags->>'is_demo' = 'true' THEN 'view-only' + ELSE 'default' + END + """) + ) + + op.alter_column("organizations", "type", nullable=False) + + # Recreate owner column + op.add_column("organizations", sa.Column("owner", sa.String(), nullable=True)) + + # Migrate owner_id back to owner (UUID to String) + conn.execute( + text(""" + UPDATE organizations + SET owner = owner_id::text + WHERE owner_id IS NOT NULL + """) + ) + + # Restore updated_at default + conn.execute( + text(""" + UPDATE organizations + SET updated_at = COALESCE(updated_at, NOW()) + WHERE updated_at IS NULL + """) + ) + op.alter_column( + "organizations", + "updated_at", + server_default=sa.text("NOW()"), + nullable=False, + ) + + # Restore organization_members FK without cascade + op.create_foreign_key( + "organization_members_organization_id_fkey", + "organization_members", + "organizations", + ["organization_id"], + ["id"], + ) + + # Restore workspaces FK without cascade + op.drop_constraint( + "workspaces_organization_id_fkey", + "workspaces", + type_="foreignkey", + ) + op.create_foreign_key( + "workspaces_organization_id_fkey", + "workspaces", + "organizations", + ["organization_id"], + ["id"], + ) + + # Restore projects FK without cascade + op.drop_constraint( + "projects_organization_id_fkey", + "projects", + type_="foreignkey", + ) + op.create_foreign_key( + "projects_organization_id_fkey", + "projects", + "organizations", + ["organization_id"], + ["id"], + ) + + # Drop unique constraint for personal orgs + op.drop_index( + "uq_organizations_owner_personal", + table_name="organizations", + ) + + # Drop role column from organization_members + op.drop_column("organization_members", "role") + + # Drop LegacyLifecycle columns from organization_members + op.drop_column("organization_members", "updated_by_id") + op.drop_column("organization_members", "updated_at") + op.drop_column("organization_members", "created_at") + + # Drop updated_by_id from workspace_members + op.drop_column("workspace_members", "updated_by_id") + + # Drop updated_by_id from project_members + op.drop_column("project_members", "updated_by_id") + + # Drop new columns + op.drop_column("organizations", "deleted_by_id") + op.drop_column("organizations", "deleted_at") + op.drop_column("organizations", "updated_by_id") + op.drop_column("organizations", "created_by_id") + op.drop_column("organizations", "owner_id") + op.drop_column("organizations", "meta") + op.drop_column("organizations", "tags") + op.drop_column("organizations", "flags") + + # Note: We don't recreate user_organizations and invitations tables + # as they contain no data at this point diff --git a/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py b/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py new file mode 100644 index 0000000000..3a97162a1d --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py @@ -0,0 +1,161 @@ +"""add organization scope to secrets and link sso providers + +Revision ID: c3b2a1d4e5f6 +Revises: a9f3e8b7c5d1 +Create Date: 2025-01-10 00:00:00.000000 + +""" + +from typing import Sequence, Union +import json + +import sqlalchemy as sa +from alembic import op +from sqlalchemy import text +import uuid_utils.compat as uuid + +from oss.src.utils.env import env + + +# revision identifiers, used by Alembic. +revision: str = "c3b2a1d4e5f6" +down_revision: Union[str, None] = "a9f3e8b7c5d1" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + connection = op.get_bind() + + op.execute("ALTER TYPE secretkind_enum ADD VALUE IF NOT EXISTS 'SSO_PROVIDER'") + + inspector = sa.inspect(connection) + secrets_columns = {col["name"] for col in inspector.get_columns("secrets")} + + if "organization_id" not in secrets_columns: + op.add_column("secrets", sa.Column("organization_id", sa.UUID(), nullable=True)) + + op.alter_column("secrets", "project_id", nullable=True) + + secrets_fks = {fk["name"] for fk in inspector.get_foreign_keys("secrets")} + if "secrets_organization_id_fkey" not in secrets_fks: + op.create_foreign_key( + "secrets_organization_id_fkey", + "secrets", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + org_providers_columns = { + col["name"] for col in inspector.get_columns("organization_providers") + } + if "secret_id" not in org_providers_columns: + op.add_column( + "organization_providers", sa.Column("secret_id", sa.UUID(), nullable=True) + ) + + org_providers_fks = { + fk["name"] for fk in inspector.get_foreign_keys("organization_providers") + } + if "organization_providers_secret_id_fkey" not in org_providers_fks: + op.create_foreign_key( + "organization_providers_secret_id_fkey", + "organization_providers", + "secrets", + ["secret_id"], + ["id"], + ondelete="CASCADE", + ) + + if "settings" in org_providers_columns: + encryption_key = env.agenta.crypt_key + if not encryption_key: + raise RuntimeError( + "Encryption key not found. Cannot migrate organization provider secrets." + ) + + providers = connection.execute( + sa.text( + """ + SELECT id, organization_id, slug, name, description, settings, created_at, updated_at + FROM organization_providers + WHERE secret_id IS NULL + """ + ) + ).fetchall() + + for provider in providers: + settings = provider.settings or {} + settings.setdefault("client_id", "") + settings.setdefault("client_secret", "") + settings.setdefault("issuer_url", "") + settings.setdefault("scopes", []) + settings.setdefault("extra", {}) + + secret_data = json.dumps({"provider": settings}) + secret_id = uuid.uuid7() + + connection.execute( + text( + """ + INSERT INTO secrets ( + id, kind, data, organization_id, project_id, created_at, updated_at, name, description + ) + VALUES ( + :id, + 'SSO_PROVIDER', + pgp_sym_encrypt(:data, :key), + :organization_id, + NULL, + :created_at, + :updated_at, + :name, + :description + ) + """ + ), + { + "id": secret_id, + "data": secret_data, + "key": encryption_key, + "organization_id": provider.organization_id, + "created_at": provider.created_at, + "updated_at": provider.updated_at, + "name": provider.slug, + "description": provider.description, + }, + ) + + connection.execute( + sa.text( + "UPDATE organization_providers SET secret_id = :secret_id WHERE id = :provider_id" + ), + {"secret_id": secret_id, "provider_id": provider.id}, + ) + + op.drop_column("organization_providers", "settings") + + op.alter_column("organization_providers", "secret_id", nullable=False) + + +def downgrade() -> None: + op.drop_constraint( + "organization_providers_secret_id_fkey", + "organization_providers", + type_="foreignkey", + ) + op.add_column( + "organization_providers", + sa.Column( + "settings", + sa.JSON(), + nullable=True, + ), + ) + op.drop_column("organization_providers", "secret_id") + + op.drop_constraint("secrets_organization_id_fkey", "secrets", type_="foreignkey") + op.drop_column("secrets", "organization_id") + op.alter_column("secrets", "project_id", nullable=False) diff --git a/api/ee/databases/postgres/migrations/tracing/utils.py b/api/ee/databases/postgres/migrations/tracing/utils.py index 00f55a7315..13a32a0679 100644 --- a/api/ee/databases/postgres/migrations/tracing/utils.py +++ b/api/ee/databases/postgres/migrations/tracing/utils.py @@ -110,7 +110,9 @@ async def get_pending_migration_head(): def run_alembic_migration(): """ - Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users. + Applies migration for first-time users and also checks the environment variable + "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether + to apply migrations for returning users. """ try: diff --git a/api/ee/docs/ORGANIZATION_FLAGS.md b/api/ee/docs/ORGANIZATION_FLAGS.md new file mode 100644 index 0000000000..34234deae4 --- /dev/null +++ b/api/ee/docs/ORGANIZATION_FLAGS.md @@ -0,0 +1,76 @@ +# Organization Flags Reference + +This document defines the canonical default values for all organization flags in the system. + +## Flag Definitions + +### Identity Flags +- **`is_demo`**: `false` - Marks the organization as a demo organization +- **`is_personal`**: `false` - Marks the organization as a personal organization (single-user) + +### Authentication Method Flags +- **`allow_email`**: defaults to `env.auth.email_enabled` - Allow email/password or email/OTP authentication +- **`allow_social`**: defaults to `env.auth.oidc_enabled` - Allow social authentication (Google, GitHub, etc.) +- **`allow_sso`**: `false` - Allow SSO/OIDC authentication + +### Access Control Flags +- **`allow_root`**: `false` - Allow organization owner to bypass authentication restrictions +- **`domains_only`**: `false` - Restrict access to verified email domains only +- **`auto_join`**: `false` - Allow users with verified email domains to automatically join the organization (when `true`) + +## Default Behavior + +### When flags is `null` or missing +All flags default to their specified default values above. + +### When flags is partially populated +- Flags explicitly set to `null` use the default value +- Flags with non-null values use those values +- Missing flags use the default value + +### Example +```json +{ + "flags": { + "is_demo": true, + "is_personal": false + // All other flags default as specified above + } +} +``` +This would result in: +- `is_demo`: `true` (explicit) +- `is_personal`: `false` (explicit) +- `allow_email`: defaults to `env.auth.email_enabled` +- `allow_social`: defaults to `env.auth.oidc_enabled` +- `allow_sso`: `false` (default) +- `allow_root`: `false` (default) +- `domains_only`: `false` (default) +- `auto_join`: `false` (default) + +## Implementation Notes + +### Backend +- Auth service uses `.get(key, default_value)` pattern for all flags +- See: `api/oss/src/core/auth/service.py` + +### Frontend +- UI components use `?? default_value` pattern for all flags +- See: `web/oss/src/components/pages/settings/Organization/index.tsx` + +### Safety Mechanisms +- If all authentication methods are disabled (`allow_email`, `allow_social`, `allow_sso` all `false`), the system automatically enables `allow_root` to prevent complete lockout +- A confirmation dialog warns users when attempting to disable all auth methods + +## Related Files + +### Backend +- `api/ee/src/models/api/organization_models.py` - API models +- `api/oss/src/core/auth/service.py` - Authentication service with flag logic +- `api/ee/src/services/db_manager_ee.py` - Organization update logic with validation +- `api/ee/src/routers/organization_router.py` - Organization API endpoints + +### Frontend +- `web/oss/src/components/pages/settings/Organization/index.tsx` - Organization settings UI +- `web/oss/src/services/organization/api/index.ts` - API client functions +- `web/oss/src/lib/Types.ts` - TypeScript type definitions diff --git a/api/ee/src/apis/fastapi/billing/router.py b/api/ee/src/apis/fastapi/billing/router.py index 6917870490..0c1f2ff64c 100644 --- a/api/ee/src/apis/fastapi/billing/router.py +++ b/api/ee/src/apis/fastapi/billing/router.py @@ -263,7 +263,7 @@ async def handle_events( organization_id = metadata.get("organization_id") log.info( - "Stripe event: %s | %s | %s", + "[billing] [stripe] %s | %s | %s", organization_id, stripe_event.type, target, @@ -446,7 +446,7 @@ async def create_checkout( ) user = await get_user_with_id( - user_id=organization.owner, + user_id=str(organization.owner_id), ) if not user: diff --git a/api/oss/src/services/security/__init__.py b/api/ee/src/apis/fastapi/organizations/__init__.py similarity index 100% rename from api/oss/src/services/security/__init__.py rename to api/ee/src/apis/fastapi/organizations/__init__.py diff --git a/api/ee/src/apis/fastapi/organizations/models.py b/api/ee/src/apis/fastapi/organizations/models.py new file mode 100644 index 0000000000..d113d0cbb7 --- /dev/null +++ b/api/ee/src/apis/fastapi/organizations/models.py @@ -0,0 +1,84 @@ +"""API models for organization security features (domains and SSO providers).""" + +from typing import Optional +from datetime import datetime +from pydantic import BaseModel, Field + + +# Domain Verification Models +class OrganizationDomainCreate(BaseModel): + """Request model for creating a domain.""" + + domain: str = Field(..., description="Domain name to verify (e.g., 'company.com')") + name: Optional[str] = Field(None, description="Friendly name for the domain") + description: Optional[str] = Field(None, description="Optional description") + + +class OrganizationDomainVerify(BaseModel): + """Request model for verifying a domain.""" + + domain_id: str = Field(..., description="ID of the domain to verify") + + +class OrganizationDomainResponse(BaseModel): + """Response model for a domain.""" + + id: str + organization_id: str + slug: str # The actual domain (e.g., "company.com") + name: Optional[str] + description: Optional[str] + token: Optional[str] # Verification token + flags: dict # Contains is_verified flag + created_at: datetime + updated_at: Optional[datetime] + + class Config: + from_attributes = True + + +# SSO Provider Models +class OrganizationProviderCreate(BaseModel): + """Request model for creating an SSO provider.""" + + slug: str = Field( + ..., + description="Provider slug (lowercase letters and hyphens only)", + pattern="^[a-z-]+$", + ) + name: Optional[str] = Field(None, description="Friendly name for the provider") + description: Optional[str] = Field(None, description="Optional description") + settings: dict = Field( + ..., + description="Provider settings (client_id, client_secret, issuer_url, scopes)", + ) + flags: Optional[dict] = Field( + default=None, description="Provider flags (is_active, is_valid)" + ) + + +class OrganizationProviderUpdate(BaseModel): + """Request model for updating an SSO provider.""" + + slug: Optional[str] = Field(None, description="Provider slug", pattern="^[a-z-]+$") + name: Optional[str] = None + description: Optional[str] = None + settings: Optional[dict] = None + flags: Optional[dict] = None + + +class OrganizationProviderResponse(BaseModel): + """Response model for an SSO provider.""" + + id: str + organization_id: str + slug: str # Provider identifier + name: Optional[str] + description: Optional[str] + settings: dict # Contains client_id, client_secret, issuer_url, scopes + flags: dict # Contains is_valid, is_active + created_at: datetime + updated_at: Optional[datetime] + + class Config: + from_attributes = True diff --git a/api/ee/src/apis/fastapi/organizations/router.py b/api/ee/src/apis/fastapi/organizations/router.py new file mode 100644 index 0000000000..458e2abe4b --- /dev/null +++ b/api/ee/src/apis/fastapi/organizations/router.py @@ -0,0 +1,283 @@ +"""FastAPI router for organization security features.""" + +from typing import List +from fastapi import APIRouter, Request, HTTPException +from fastapi.responses import JSONResponse, Response + +from ee.src.apis.fastapi.organizations.models import ( + OrganizationDomainCreate, + OrganizationDomainVerify, + OrganizationDomainResponse, + OrganizationProviderCreate, + OrganizationProviderUpdate, + OrganizationProviderResponse, +) +from ee.src.services.organization_security_service import ( + DomainVerificationService, + SSOProviderService, +) +from ee.src.services import db_manager_ee +from ee.src.utils.permissions import check_user_org_access +from ee.src.services.selectors import get_user_org_and_workspace_id + + +router = APIRouter() +domain_service = DomainVerificationService() +provider_service = SSOProviderService() + + +async def verify_user_org_access(user_id: str, organization_id: str) -> None: + """Helper to verify user has access to organization.""" + user_org_data = await get_user_org_and_workspace_id(user_id) + has_access = await check_user_org_access(user_org_data, organization_id) + if not has_access: + raise HTTPException( + status_code=403, detail="You do not have access to this organization" + ) + + +async def require_email_or_social_or_root_enabled(organization_id: str) -> None: + """Block domain/provider changes when SSO is the only allowed method.""" + organization = await db_manager_ee.get_organization(organization_id) + flags = organization.flags or {} + allow_email = flags.get("allow_email", False) + allow_social = flags.get("allow_social", False) + allow_root = flags.get("allow_root", False) + if not (allow_email or allow_social or allow_root): + raise HTTPException( + status_code=400, + detail=( + "To modify domains or SSO providers, enable email or social authentication " + "for this organization, or enable root access for owners." + ), + ) + + +async def require_domains_and_auto_join_disabled(organization_id: str) -> None: + """Block edits to verified domains when domains-only or auto-join is enabled.""" + organization = await db_manager_ee.get_organization(organization_id) + flags = organization.flags or {} + if flags.get("domains_only") or flags.get("auto_join"): + raise HTTPException( + status_code=400, + detail=( + "Disable domains-only and auto-join before modifying verified domains." + ), + ) + + +# Domain Verification Endpoints + + +@router.post("/domains", response_model=OrganizationDomainResponse, status_code=201) +async def create_domain( + payload: OrganizationDomainCreate, + request: Request, +): + """ + Create a new domain for verification. + + This endpoint initiates the domain verification process by: + 1. Creating a domain record + 2. Generating a unique verification token + 3. Returning DNS configuration instructions + + The user must add a DNS TXT record to verify ownership. + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + + domain = await domain_service.create_domain(organization_id, payload, user_id) + + return JSONResponse( + status_code=201, + content=domain.model_dump(mode="json"), + ) + + +@router.post("/domains/verify", response_model=OrganizationDomainResponse) +async def verify_domain( + payload: OrganizationDomainVerify, + request: Request, +): + """ + Verify domain ownership via DNS TXT record. + + This endpoint checks for the presence of the verification TXT record + and marks the domain as verified if found. + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_domains_and_auto_join_disabled(organization_id) + + return await domain_service.verify_domain( + organization_id, payload.domain_id, user_id + ) + + +@router.get("/domains", response_model=List[OrganizationDomainResponse]) +async def list_domains( + request: Request, +): + """List all domains for the organization.""" + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + + return await domain_service.list_domains(organization_id) + + +@router.post("/domains/{domain_id}/refresh", response_model=OrganizationDomainResponse) +async def refresh_domain_token( + domain_id: str, + request: Request, +): + """ + Refresh the verification token for an unverified domain. + + Generates a new token and resets the 48-hour expiry window. + This is useful when the original token has expired. + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_domains_and_auto_join_disabled(organization_id) + + return await domain_service.refresh_token(organization_id, domain_id, user_id) + + +@router.post("/domains/{domain_id}/reset", response_model=OrganizationDomainResponse) +async def reset_domain( + domain_id: str, + request: Request, +): + """ + Reset a verified domain to unverified state for re-verification. + + Generates a new token and marks the domain as unverified. + This allows re-verification of already verified domains. + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_domains_and_auto_join_disabled(organization_id) + + return await domain_service.reset_domain(organization_id, domain_id, user_id) + + +@router.delete("/domains/{domain_id}", status_code=204) +async def delete_domain( + domain_id: str, + request: Request, +): + """Delete a domain.""" + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_domains_and_auto_join_disabled(organization_id) + + await domain_service.delete_domain(organization_id, domain_id, user_id) + return Response(status_code=204) + + +# SSO Provider Endpoints + + +@router.post("/providers", response_model=OrganizationProviderResponse, status_code=201) +async def create_provider( + payload: OrganizationProviderCreate, + request: Request, +): + """ + Create a new SSO provider configuration. + + Supported provider types: + - oidc: OpenID Connect + - saml: SAML 2.0 (coming soon) + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_email_or_social_or_root_enabled(organization_id) + + return await provider_service.create_provider(organization_id, payload, user_id) + + +@router.patch("/providers/{provider_id}", response_model=OrganizationProviderResponse) +async def update_provider( + provider_id: str, + payload: OrganizationProviderUpdate, + request: Request, +): + """Update an SSO provider configuration.""" + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_email_or_social_or_root_enabled(organization_id) + + return await provider_service.update_provider( + organization_id, provider_id, payload, user_id + ) + + +@router.get("/providers", response_model=List[OrganizationProviderResponse]) +async def list_providers( + request: Request, +): + """List all SSO providers for the organization.""" + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + + return await provider_service.list_providers(organization_id) + + +@router.post( + "/providers/{provider_id}/test", response_model=OrganizationProviderResponse +) +async def test_provider( + provider_id: str, + request: Request, +): + """ + Test SSO provider connection. + + This endpoint tests the OIDC provider configuration by fetching the + discovery document and validating required endpoints exist. + If successful, marks the provider as valid (is_valid=true). + If failed, marks as invalid and deactivates (is_valid=false, is_active=false). + """ + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_email_or_social_or_root_enabled(organization_id) + + return await provider_service.test_provider(organization_id, provider_id, user_id) + + +@router.delete("/providers/{provider_id}", status_code=204) +async def delete_provider( + provider_id: str, + request: Request, +): + """Delete an SSO provider configuration.""" + organization_id = request.state.organization_id + user_id = request.state.user_id + + await verify_user_org_access(user_id, organization_id) + await require_email_or_social_or_root_enabled(organization_id) + + await provider_service.delete_provider(organization_id, provider_id, user_id) + return Response(status_code=204) diff --git a/api/ee/src/core/organizations/__init__.py b/api/ee/src/core/organizations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/ee/src/core/organizations/types.py b/api/ee/src/core/organizations/types.py new file mode 100644 index 0000000000..639ba5065f --- /dev/null +++ b/api/ee/src/core/organizations/types.py @@ -0,0 +1,79 @@ +from datetime import datetime +from uuid import UUID +from pydantic import BaseModel +from typing import Optional, Dict, Any + + +# ============================================================================ +# ORGANIZATION DOMAINS +# ============================================================================ + + +class OrganizationDomain(BaseModel): + id: UUID + organization_id: UUID + slug: str + name: Optional[str] = None + description: Optional[str] = None + token: Optional[str] = None + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + created_at: datetime + updated_at: Optional[datetime] = None + + class Config: + from_attributes = True + + +class OrganizationDomainCreate(BaseModel): + organization_id: UUID + slug: str + name: Optional[str] = None + description: Optional[str] = None + token: Optional[str] = None + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + + +# ============================================================================ +# ORGANIZATION PROVIDERS +# ============================================================================ + + +class OrganizationProvider(BaseModel): + id: UUID + organization_id: UUID + slug: str + name: Optional[str] = None + description: Optional[str] = None + settings: Dict[str, Any] + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + created_at: datetime + updated_at: Optional[datetime] = None + + class Config: + from_attributes = True + + +class OrganizationProviderCreate(BaseModel): + organization_id: UUID + slug: str + name: Optional[str] = None + description: Optional[str] = None + settings: Dict[str, Any] + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + + +class OrganizationProviderUpdate(BaseModel): + name: Optional[str] = None + description: Optional[str] = None + settings: Optional[Dict[str, Any]] = None + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None diff --git a/api/ee/src/core/subscriptions/service.py b/api/ee/src/core/subscriptions/service.py index 5247cc283f..ffdc35c3c6 100644 --- a/api/ee/src/core/subscriptions/service.py +++ b/api/ee/src/core/subscriptions/service.py @@ -153,6 +153,39 @@ async def start_reverse_trial( return subscription + async def start_free_plan( + self, + *, + organization_id: str, + ) -> Optional[SubscriptionDTO]: + """Start a free/hobby plan for an organization without trial. + + Args: + organization_id: The organization ID + + Returns: + SubscriptionDTO: The created subscription or None if already exists + """ + now = datetime.now(tz=timezone.utc) + + subscription = await self.read(organization_id=organization_id) + + if subscription: + return None + + subscription = await self.create( + subscription=SubscriptionDTO( + organization_id=organization_id, + plan=FREE_PLAN, + active=True, + anchor=now.day, + ) + ) + + log.info("✓ Free plan started for organization %s", organization_id) + + return subscription + async def process_event( self, *, @@ -165,7 +198,7 @@ async def process_event( **kwargs, ) -> SubscriptionDTO: log.info( - "Billing event: %s | %s | %s", + "[billing] [internal] %s | %s | %s", organization_id, event, plan, diff --git a/api/ee/src/crons/meters.sh b/api/ee/src/crons/meters.sh index e5144d6947..f04812643d 100644 --- a/api/ee/src/crons/meters.sh +++ b/api/ee/src/crons/meters.sh @@ -1,7 +1,8 @@ #!/bin/sh set -eu -AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-) +AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2- || true) +AGENTA_AUTH_KEY="${AGENTA_AUTH_KEY:-replace-me}" echo "--------------------------------------------------------" echo "[$(date)] meters.sh running from cron" >> /proc/1/fd/1 @@ -36,4 +37,4 @@ else fi fi -echo "[$(date)] meters.sh done" >> /proc/1/fd/1 \ No newline at end of file +echo "[$(date)] meters.sh done" >> /proc/1/fd/1 diff --git a/api/ee/src/dbs/postgres/organizations/__init__.py b/api/ee/src/dbs/postgres/organizations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/ee/src/dbs/postgres/organizations/dao.py b/api/ee/src/dbs/postgres/organizations/dao.py new file mode 100644 index 0000000000..faf8acad6d --- /dev/null +++ b/api/ee/src/dbs/postgres/organizations/dao.py @@ -0,0 +1,399 @@ +"""Data Access Objects for organization domains and SSO providers.""" + +from typing import Optional, List +from sqlalchemy import select, and_ +from sqlalchemy.ext.asyncio import AsyncSession + +from oss.src.dbs.postgres.shared.engine import engine +from ee.src.dbs.postgres.organizations.dbes import ( + OrganizationDomainDBE, + OrganizationProviderDBE, +) + + +class OrganizationDomainsDAO: + """DAO for organization_domains table. + + Can be used in two ways: + 1. With a session (for service layer): OrganizationDomainsDAO(session) + 2. Without a session (creates own sessions): OrganizationDomainsDAO() + """ + + def __init__(self, session: Optional[AsyncSession] = None): + self.session = session + + async def create( + self, + organization_id: str, + slug: str, + name: Optional[str], + description: Optional[str], + token: str, + created_by_id: str, + ) -> OrganizationDomainDBE: + """Create a new domain for an organization.""" + if self.session: + domain = OrganizationDomainDBE( + organization_id=organization_id, + slug=slug, + name=name, + description=description, + token=token, + flags={"is_verified": False}, + created_by_id=created_by_id, + ) + self.session.add(domain) + await self.session.flush() + await self.session.refresh(domain) + return domain + else: + async with engine.core_session() as session: + domain = OrganizationDomainDBE( + organization_id=organization_id, + slug=slug, + name=name, + description=description, + token=token, + flags={"is_verified": False}, + created_by_id=created_by_id, + ) + session.add(domain) + await session.commit() + await session.refresh(domain) + return domain + + async def get_by_id( + self, domain_id: str, organization_id: str + ) -> Optional[OrganizationDomainDBE]: + """Get a domain by ID.""" + if self.session: + result = await self.session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.id == domain_id, + OrganizationDomainDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.id == domain_id, + OrganizationDomainDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + + async def get_by_slug( + self, slug: str, organization_id: str + ) -> Optional[OrganizationDomainDBE]: + """Get a domain by slug (domain name).""" + if self.session: + result = await self.session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.slug == slug, + OrganizationDomainDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.slug == slug, + OrganizationDomainDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + + async def get_verified_by_slug(self, slug: str) -> Optional[OrganizationDomainDBE]: + """Get a verified domain by slug (domain name), across organizations.""" + is_verified = OrganizationDomainDBE.flags["is_verified"].astext == "true" + if self.session: + result = await self.session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.slug == slug, + is_verified, + ) + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDomainDBE).where( + and_( + OrganizationDomainDBE.slug == slug, + is_verified, + ) + ) + ) + return result.scalars().first() + + async def list_by_organization( + self, organization_id: str + ) -> List[OrganizationDomainDBE]: + """List all domains for an organization.""" + if self.session: + result = await self.session.execute( + select(OrganizationDomainDBE).where( + OrganizationDomainDBE.organization_id == organization_id + ) + ) + return list(result.scalars().all()) + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDomainDBE).where( + OrganizationDomainDBE.organization_id == organization_id + ) + ) + return list(result.scalars().all()) + + async def update_flags( + self, domain_id: str, flags: dict, updated_by_id: str + ) -> Optional[OrganizationDomainDBE]: + """Update domain flags (e.g., mark as verified).""" + domain = await self.get_by_id(domain_id, organization_id="") + if self.session: + if domain: + domain.flags = flags + domain.updated_by_id = updated_by_id + await self.session.flush() + await self.session.refresh(domain) + return domain + else: + async with engine.core_session() as session: + if domain: + # Re-attach to new session + domain = await session.get(OrganizationDomainDBE, domain_id) + if domain: + domain.flags = flags + domain.updated_by_id = updated_by_id + await session.commit() + await session.refresh(domain) + return domain + + async def delete(self, domain_id: str, deleted_by_id: str) -> bool: + """Hard delete a domain.""" + if self.session: + domain = await self.session.get(OrganizationDomainDBE, domain_id) + if domain: + await self.session.delete(domain) + await self.session.flush() + return True + return False + else: + async with engine.core_session() as session: + domain = await session.get(OrganizationDomainDBE, domain_id) + if domain: + await session.delete(domain) + await session.commit() + return True + return False + + +class OrganizationProvidersDAO: + """DAO for organization_providers table. + + Can be used in two ways: + 1. With a session (for service layer): OrganizationProvidersDAO(session) + 2. Without a session (creates own sessions): OrganizationProvidersDAO() + """ + + def __init__(self, session: Optional[AsyncSession] = None): + self.session = session + + async def create( + self, + organization_id: str, + slug: str, + secret_id: str, + created_by_id: str, + name: Optional[str], + description: Optional[str] = None, + flags: Optional[dict] = None, + ) -> OrganizationProviderDBE: + """Create a new SSO provider for an organization.""" + if self.session: + provider = OrganizationProviderDBE( + organization_id=organization_id, + slug=slug, + name=name, + description=description, + secret_id=secret_id, + flags=flags or {"is_active": True, "is_valid": False}, + created_by_id=created_by_id, + ) + self.session.add(provider) + await self.session.flush() + await self.session.refresh(provider) + return provider + else: + async with engine.core_session() as session: + provider = OrganizationProviderDBE( + organization_id=organization_id, + slug=slug, + name=name, + description=description, + secret_id=secret_id, + flags=flags or {"is_active": True, "is_valid": False}, + created_by_id=created_by_id, + ) + session.add(provider) + await session.commit() + await session.refresh(provider) + return provider + + async def get_by_id( + self, provider_id: str, organization_id: str + ) -> Optional[OrganizationProviderDBE]: + """Get a provider by ID.""" + if self.session: + result = await self.session.execute( + select(OrganizationProviderDBE).where( + and_( + OrganizationProviderDBE.id == provider_id, + OrganizationProviderDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationProviderDBE).where( + and_( + OrganizationProviderDBE.id == provider_id, + OrganizationProviderDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + + async def get_by_id_any( + self, provider_id: str + ) -> Optional[OrganizationProviderDBE]: + """Get a provider by ID without organization scoping.""" + if self.session: + result = await self.session.execute( + select(OrganizationProviderDBE).where( + OrganizationProviderDBE.id == provider_id + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationProviderDBE).where( + OrganizationProviderDBE.id == provider_id + ) + ) + return result.scalars().first() + + async def get_by_slug( + self, slug: str, organization_id: str + ) -> Optional[OrganizationProviderDBE]: + """Get a provider by slug.""" + if self.session: + result = await self.session.execute( + select(OrganizationProviderDBE).where( + and_( + OrganizationProviderDBE.slug == slug, + OrganizationProviderDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationProviderDBE).where( + and_( + OrganizationProviderDBE.slug == slug, + OrganizationProviderDBE.organization_id == organization_id, + ) + ) + ) + return result.scalars().first() + + async def list_by_organization( + self, organization_id: str + ) -> List[OrganizationProviderDBE]: + """List all SSO providers for an organization.""" + if self.session: + result = await self.session.execute( + select(OrganizationProviderDBE).where( + OrganizationProviderDBE.organization_id == organization_id + ) + ) + return list(result.scalars().all()) + else: + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationProviderDBE).where( + OrganizationProviderDBE.organization_id == organization_id + ) + ) + return list(result.scalars().all()) + + async def update( + self, + provider_id: str, + secret_id: Optional[str] = None, + flags: Optional[dict] = None, + updated_by_id: Optional[str] = None, + ) -> Optional[OrganizationProviderDBE]: + """Update a provider's secret reference or flags.""" + if self.session: + provider = await self.session.get(OrganizationProviderDBE, provider_id) + if provider: + if secret_id is not None: + provider.secret_id = secret_id + if flags is not None: + provider.flags = flags + if updated_by_id: + provider.updated_by_id = updated_by_id + await self.session.flush() + await self.session.refresh(provider) + return provider + else: + async with engine.core_session() as session: + provider = await session.get(OrganizationProviderDBE, provider_id) + if provider: + if secret_id is not None: + provider.secret_id = secret_id + if flags is not None: + provider.flags = flags + if updated_by_id: + provider.updated_by_id = updated_by_id + await session.commit() + await session.refresh(provider) + return provider + + async def delete(self, provider_id: str, deleted_by_id: str) -> bool: + """Hard delete a provider.""" + if self.session: + provider = await self.session.get(OrganizationProviderDBE, provider_id) + if provider: + await self.session.delete(provider) + await self.session.flush() + return True + return False + else: + async with engine.core_session() as session: + provider = await session.get(OrganizationProviderDBE, provider_id) + if provider: + await session.delete(provider) + await session.commit() + return True + return False diff --git a/api/ee/src/dbs/postgres/organizations/dbas.py b/api/ee/src/dbs/postgres/organizations/dbas.py new file mode 100644 index 0000000000..31fcc1cd5f --- /dev/null +++ b/api/ee/src/dbs/postgres/organizations/dbas.py @@ -0,0 +1,81 @@ +import uuid_utils.compat as uuid +from sqlalchemy import Column, String, UUID +from sqlalchemy.dialects.postgresql import JSONB + +from oss.src.dbs.postgres.shared.dbas import ( + LifecycleDBA, + HeaderDBA, + OrganizationScopeDBA, +) + + +class OrganizationDomainDBA(OrganizationScopeDBA, LifecycleDBA): + __abstract__ = True + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + slug = Column( + String, + nullable=False, + ) + name = Column( + String, + nullable=False, + ) + description = Column( + String, + nullable=True, + ) + token = Column( + String, + nullable=True, + ) + flags = Column( + JSONB(none_as_null=True), + nullable=True, + ) + tags = Column( + JSONB(none_as_null=True), + nullable=True, + ) + meta = Column( + JSONB(none_as_null=True), + nullable=True, + ) + + +class OrganizationProviderDBA(OrganizationScopeDBA, HeaderDBA, LifecycleDBA): + __abstract__ = True + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + slug = Column( + String, + nullable=False, + ) + secret_id = Column( + UUID(as_uuid=True), + nullable=False, + ) + flags = Column( + JSONB(none_as_null=True), + nullable=True, + ) + tags = Column( + JSONB(none_as_null=True), + nullable=True, + ) + meta = Column( + JSONB(none_as_null=True), + nullable=True, + ) diff --git a/api/ee/src/dbs/postgres/organizations/dbes.py b/api/ee/src/dbs/postgres/organizations/dbes.py new file mode 100644 index 0000000000..84abab7703 --- /dev/null +++ b/api/ee/src/dbs/postgres/organizations/dbes.py @@ -0,0 +1,70 @@ +from sqlalchemy import ( + ForeignKeyConstraint, + UniqueConstraint, + Index, + text, +) + +from oss.src.dbs.postgres.shared.base import Base +from ee.src.dbs.postgres.organizations.dbas import ( + OrganizationDomainDBA, + OrganizationProviderDBA, +) + + +class OrganizationDomainDBE(Base, OrganizationDomainDBA): + __tablename__ = "organization_domains" + + __table_args__ = ( + ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + ondelete="CASCADE", + ), + Index( + "uq_organization_domains_slug_verified", + "slug", + unique=True, + postgresql_where=text("(flags->>'is_verified') = 'true'"), + ), + Index( + "ix_organization_domains_org", + "organization_id", + ), + Index( + "ix_organization_domains_flags", + "flags", + postgresql_using="gin", + ), + ) + + +class OrganizationProviderDBE(Base, OrganizationProviderDBA): + __tablename__ = "organization_providers" + + __table_args__ = ( + ForeignKeyConstraint( + ["organization_id"], + ["organizations.id"], + ondelete="CASCADE", + ), + ForeignKeyConstraint( + ["secret_id"], + ["secrets.id"], + ondelete="CASCADE", + ), + UniqueConstraint( + "organization_id", + "slug", + name="uq_organization_providers_org_slug", + ), + Index( + "ix_organization_providers_org", + "organization_id", + ), + Index( + "ix_organization_providers_flags", + "flags", + postgresql_using="gin", + ), + ) diff --git a/api/ee/src/main.py b/api/ee/src/main.py index 036bda6f0f..499ef9137d 100644 --- a/api/ee/src/main.py +++ b/api/ee/src/main.py @@ -11,6 +11,10 @@ from ee.src.core.subscriptions.service import SubscriptionsService from ee.src.apis.fastapi.billing.router import SubscriptionsRouter +from ee.src.apis.fastapi.organizations.router import ( + router as organization_security_router, +) +from oss.src.apis.fastapi.auth.router import auth_router # DBS -------------------------------------------------------------------------- @@ -56,6 +60,14 @@ def extend_main(app: FastAPI): # ROUTES (more) ------------------------------------------------------------ + # Register security router BEFORE organization router to avoid route conflicts + # (specific routes must come before catch-all /{organization_id} route) + app.include_router( + organization_security_router, + prefix="/organizations", + tags=["Organizations", "Security"], + ) + app.include_router( organization_router.router, prefix="/organizations", @@ -66,6 +78,14 @@ def extend_main(app: FastAPI): prefix="/workspaces", ) + # Auth router at root level (no /api prefix) for OAuth callbacks + app.include_router( + auth_router, + prefix="/auth", + tags=["Auth"], + include_in_schema=False, + ) + # -------------------------------------------------------------------------- return app diff --git a/api/ee/src/models/api/organization_models.py b/api/ee/src/models/api/organization_models.py index 1002d81b5a..7d1a60735f 100644 --- a/api/ee/src/models/api/organization_models.py +++ b/api/ee/src/models/api/organization_models.py @@ -1,32 +1,51 @@ -from typing import Optional, List +from typing import Optional, List, Dict, Any +from uuid import UUID -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict class Organization(BaseModel): - id: str - name: str - description: str - type: Optional[str] = None - owner: str - workspaces: List[str] = Field(default_factory=list) + model_config = ConfigDict(from_attributes=True) + id: UUID + slug: Optional[str] = None + # + name: Optional[str] = None + description: Optional[str] = None + # + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + # + owner_id: UUID + # members: List[str] = Field(default_factory=list) invitations: List = Field(default_factory=list) + workspaces: List[str] = Field(default_factory=list) class CreateOrganization(BaseModel): - name: str - owner: str + name: Optional[str] = None description: Optional[str] = None - type: Optional[str] = None + # + is_demo: bool = False + is_personal: bool = False + # + owner_id: UUID class OrganizationUpdate(BaseModel): + slug: Optional[str] = None name: Optional[str] = None description: Optional[str] = None + flags: Optional[Dict[str, Any]] = None updated_at: Optional[str] = None class OrganizationOutput(BaseModel): id: str - name: str + name: Optional[str] = None + + +class CreateCollaborativeOrganization(BaseModel): + name: Optional[str] = None + description: Optional[str] = None diff --git a/api/ee/src/models/api/workspace_models.py b/api/ee/src/models/api/workspace_models.py index 56218eb38a..56f5768681 100644 --- a/api/ee/src/models/api/workspace_models.py +++ b/api/ee/src/models/api/workspace_models.py @@ -25,7 +25,7 @@ class WorkspaceMemberResponse(BaseModel): class Workspace(BaseModel): id: Optional[str] = None - name: str + name: Optional[str] = None description: Optional[str] = None type: Optional[str] members: Optional[List[WorkspaceMember]] = None @@ -33,7 +33,7 @@ class Workspace(BaseModel): class WorkspaceResponse(TimestampModel): id: str - name: str + name: Optional[str] = None description: Optional[str] = None type: Optional[str] organization: str @@ -41,7 +41,7 @@ class WorkspaceResponse(TimestampModel): class CreateWorkspace(BaseModel): - name: str + name: Optional[str] = None description: Optional[str] = None type: Optional[str] = None diff --git a/api/ee/src/models/db_models.py b/api/ee/src/models/db_models.py index fb57520c92..bc78201a61 100644 --- a/api/ee/src/models/db_models.py +++ b/api/ee/src/models/db_models.py @@ -28,6 +28,11 @@ class OrganizationMemberDB(Base): UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="CASCADE"), ) + role = Column( + String, + nullable=False, + server_default="member", + ) user = relationship( "oss.src.models.db_models.UserDB", diff --git a/api/ee/src/models/extended/deprecated_models.py b/api/ee/src/models/extended/deprecated_models.py index c68a07e851..79993c6089 100644 --- a/api/ee/src/models/extended/deprecated_models.py +++ b/api/ee/src/models/extended/deprecated_models.py @@ -78,6 +78,32 @@ class UserOrganizationDB(DeprecatedBase): organization_id = Column(UUID(as_uuid=True), ForeignKey("organizations.id")) +class DeprecatedOrganizationDB(DeprecatedBase): + """ + Deprecated OrganizationDB model with 'owner' field. + Used by migrations that ran before the schema was changed to use 'owner_id'. + """ + + __tablename__ = "organizations" + __table_args__ = {"extend_existing": True} + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + name = Column(String) + owner = Column(String) # Deprecated: replaced by owner_id (UUID) + created_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + updated_at = Column( + DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + ) + + class OldInvitationDB(DeprecatedBase): __tablename__ = "invitations" __table_args__ = {"extend_existing": True} diff --git a/api/ee/src/routers/organization_router.py b/api/ee/src/routers/organization_router.py index 91302c9b7e..2f205dd04d 100644 --- a/api/ee/src/routers/organization_router.py +++ b/api/ee/src/routers/organization_router.py @@ -21,12 +21,24 @@ check_rbac_permission, ) from ee.src.models.api.organization_models import ( + Organization, OrganizationUpdate, OrganizationOutput, + CreateCollaborativeOrganization, ) from ee.src.services.organization_service import ( update_an_organization, get_organization_details, + transfer_organization_ownership as transfer_ownership_service, +) +from ee.src.services.organization_security_service import SSOProviderService +from ee.src.dbs.postgres.organizations.dao import ( + OrganizationDomainsDAO, +) +from ee.src.core.organizations.types import ( + OrganizationDomainCreate, + OrganizationProviderCreate, + OrganizationProviderUpdate, ) @@ -68,30 +80,46 @@ async def fetch_organization_details( """ try: - workspace_id = await db_manager_ee.get_default_workspace_id_from_organization( - organization_id=organization_id - ) + # Get workspace and project IDs for permission checking + workspace_id = None + project_id = None + try: + workspace_id = ( + await db_manager_ee.get_default_workspace_id_from_organization( + organization_id=organization_id + ) + ) + project_id = await db_manager.get_default_project_id_from_workspace( + workspace_id=workspace_id + ) + except Exception: + # Organization has no workspace or project - check org-level permission directly + log.warning( + f"Organization {organization_id} has no workspace or project, checking org-level access", + exc_info=True, + ) - project_id = await db_manager.get_default_project_id_from_workspace( - workspace_id=workspace_id - ) + # If we have a project, check project membership + if project_id: + project_memberships = ( + await db_manager_ee.fetch_project_memberships_by_user_id( + user_id=str(request.state.user_id) + ) + ) - project_memberships = await db_manager_ee.fetch_project_memberships_by_user_id( - user_id=str(request.state.user_id) - ) + membership = None + for project_membership in project_memberships: + if str(project_membership.project_id) == project_id: + membership = project_membership + break - membership = None - for project_membership in project_memberships: - if str(project_membership.project_id) == project_id: - membership = project_membership - break - - if not membership: - return JSONResponse( - status_code=403, - content={"detail": "You do not have access to this organization"}, - ) + if not membership: + return JSONResponse( + status_code=403, + content={"detail": "You do not have access to this organization"}, + ) + # Check org-level access user_org_workspace_data = await get_user_org_and_workspace_id( request.state.user_id ) @@ -118,15 +146,29 @@ async def fetch_organization_details( ) -@router.put("/{organization_id}/", operation_id="update_organization") +@router.put( + "/{organization_id}/", + operation_id="update_organization", + response_model=Organization, +) +@router.patch( + "/{organization_id}/", + operation_id="patch_organization", + response_model=Organization, +) async def update_organization( organization_id: str, payload: OrganizationUpdate, request: Request, ): - if not payload.name and not payload.description: + if ( + not payload.slug + and not payload.name + and not payload.description + and not payload.flags + ): return JSONResponse( - {"detail": "Please provide a name or description to update"}, + {"detail": "Please provide a field to update"}, status_code=400, ) @@ -147,7 +189,23 @@ async def update_organization( return organization + except ValueError as e: + # Slug validation errors (format, immutability, personal org, etc.) + return JSONResponse( + {"detail": str(e)}, + status_code=400, + ) except Exception as e: + # Check for unique constraint violation (duplicate slug) + from sqlalchemy.exc import IntegrityError + + if isinstance(e, IntegrityError) and "uq_organizations_slug" in str(e): + return JSONResponse( + { + "detail": "Slug already in use. Please select another slug or contact your administrator." + }, + status_code=409, + ) raise HTTPException( status_code=500, detail=str(e), @@ -234,3 +292,614 @@ async def update_workspace( status_code=500, detail=str(e), ) + + +@router.post( + "/{organization_id}/transfer/{new_owner_id}", + operation_id="transfer_organization_ownership", +) +async def transfer_organization_ownership( + organization_id: str, + new_owner_id: str, + request: Request, +): + """Transfer organization ownership to another member.""" + try: + user_id = request.state.user_id + + # Check if current user is the owner of the organization + user_org_workspace_data: dict = await get_user_org_and_workspace_id(user_id) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only the organization owner can transfer ownership"}, + status_code=403, + ) + + # Transfer ownership via service layer + organization = await transfer_ownership_service( + organization_id=organization_id, + new_owner_id=new_owner_id, + current_user_id=str(user_id), + ) + + return JSONResponse( + { + "organization_id": str(organization.id), + "owner_id": str(organization.owner_id), + }, + status_code=200, + ) + + except ValueError as e: + # New owner not a member or organization not found + return JSONResponse( + {"detail": str(e)}, + status_code=400, + ) + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException( + status_code=500, + detail=str(e), + ) + + +@router.post("/", operation_id="create_collaborative_organization") +async def create_collaborative_organization( + payload: CreateCollaborativeOrganization, + request: Request, +): + """Create a new collaborative organization.""" + try: + from uuid import UUID + from ee.src.services.commoners import create_organization_with_subscription + + user = await db_manager.get_user(request.state.user_id) + if not user: + return JSONResponse( + {"detail": "User not found"}, + status_code=404, + ) + + organization = await create_organization_with_subscription( + user_id=UUID(str(user.id)), + organization_email=user.email, + organization_name=payload.name, + organization_description=payload.description, + is_personal=False, # Collaborative organization + use_reverse_trial=False, # Use hobby plan instead + ) + + log.info( + "[organization] collaborative organization created", + organization_id=organization.id, + user_id=user.id, + ) + + return JSONResponse( + { + "id": str(organization.id), + "name": organization.name, + "description": organization.description, + }, + status_code=201, + ) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException( + status_code=500, + detail=str(e), + ) + + +@router.delete("/{organization_id}/", operation_id="delete_organization") +async def delete_organization( + organization_id: str, + request: Request, +): + """Delete an organization (owner only).""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "You do not have permission to perform this action"}, + status_code=403, + ) + + await db_manager_ee.delete_organization(organization_id) + + log.info( + "[organization] organization deleted", + organization_id=organization_id, + user_id=request.state.user_id, + ) + + return JSONResponse( + {"detail": "Organization deleted successfully"}, + status_code=200, + ) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException( + status_code=500, + detail=str(e), + ) + + +# ============================================================================ +# Domain Verification Endpoints +# ============================================================================ + + +@router.get("/{organization_id}/domains/", operation_id="list_organization_domains") +async def list_organization_domains( + organization_id: str, + request: Request, +): + """List all domains for an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id + ) + if not has_permission: + return JSONResponse( + {"detail": "You do not have access to this organization"}, + status_code=403, + ) + + from uuid import UUID + + domains_dao = OrganizationDomainsDAO() + domains = await domains_dao.list_by_organization(UUID(organization_id)) + + return [ + { + "id": str(domain.id), + "slug": domain.slug, + "organization_id": str(domain.organization_id), + "flags": domain.flags, + "created_at": domain.created_at.isoformat() + if domain.created_at + else None, + "updated_at": domain.updated_at.isoformat() + if domain.updated_at + else None, + } + for domain in domains + ] + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/{organization_id}/domains/", operation_id="create_organization_domain") +async def create_organization_domain( + organization_id: str, + request: Request, + domain: str, +): + """Add a new domain to an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can add domains"}, + status_code=403, + ) + + from uuid import UUID + + domains_dao = OrganizationDomainsDAO() + domain_create = OrganizationDomainCreate( + slug=domain, + organization_id=UUID(organization_id), + ) + created_domain = await domains_dao.create(domain_create) + + return { + "id": str(created_domain.id), + "slug": created_domain.slug, + "organization_id": str(created_domain.organization_id), + "flags": created_domain.flags, + "created_at": created_domain.created_at.isoformat() + if created_domain.created_at + else None, + "updated_at": created_domain.updated_at.isoformat() + if created_domain.updated_at + else None, + } + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get( + "/{organization_id}/domains/{domain_id}", operation_id="get_organization_domain" +) +async def get_organization_domain( + organization_id: str, + domain_id: str, + request: Request, +): + """Get a single domain by ID.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id + ) + if not has_permission: + return JSONResponse( + {"detail": "You do not have access to this organization"}, + status_code=403, + ) + + from uuid import UUID + + domains_dao = OrganizationDomainsDAO() + domain = await domains_dao.get_by_id(UUID(domain_id)) + + if not domain: + return JSONResponse( + {"detail": "Domain not found"}, + status_code=404, + ) + + return { + "id": str(domain.id), + "slug": domain.slug, + "organization_id": str(domain.organization_id), + "flags": domain.flags, + "created_at": domain.created_at.isoformat() if domain.created_at else None, + "updated_at": domain.updated_at.isoformat() if domain.updated_at else None, + } + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.delete( + "/{organization_id}/domains/{domain_id}", operation_id="delete_organization_domain" +) +async def delete_organization_domain( + organization_id: str, + domain_id: str, + request: Request, +): + """Delete a domain from an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can delete domains"}, + status_code=403, + ) + + from uuid import UUID + + domains_dao = OrganizationDomainsDAO() + # TODO: Implement delete method in DAO + # await domains_dao.delete(UUID(domain_id)) + + return JSONResponse( + {"detail": "Domain deleted successfully"}, + status_code=200, + ) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post( + "/{organization_id}/domains/{domain_id}/verify", + operation_id="verify_organization_domain", +) +async def verify_organization_domain( + organization_id: str, + domain_id: str, + request: Request, +): + """Verify a domain (marks it as verified).""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can verify domains"}, + status_code=403, + ) + + from uuid import UUID + + domains_dao = OrganizationDomainsDAO() + verified_domain = await domains_dao.mark_verified(UUID(domain_id)) + + if not verified_domain: + return JSONResponse( + {"detail": "Domain not found"}, + status_code=404, + ) + + return { + "id": str(verified_domain.id), + "slug": verified_domain.slug, + "organization_id": str(verified_domain.organization_id), + "flags": verified_domain.flags, + "created_at": verified_domain.created_at.isoformat() + if verified_domain.created_at + else None, + "updated_at": verified_domain.updated_at.isoformat() + if verified_domain.updated_at + else None, + } + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +# ============================================================================ +# SSO/OIDC Provider Endpoints +# ============================================================================ + + +@router.get("/{organization_id}/providers/", operation_id="list_organization_providers") +async def list_organization_providers( + organization_id: str, + request: Request, +): + """List all SSO providers for an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id + ) + if not has_permission: + return JSONResponse( + {"detail": "You do not have access to this organization"}, + status_code=403, + ) + + from uuid import UUID + + provider_service = SSOProviderService() + return await provider_service.list_providers(organization_id) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post( + "/{organization_id}/providers/", operation_id="create_organization_provider" +) +async def create_organization_provider( + organization_id: str, + request: Request, + payload: dict, +): + """Add a new SSO provider to an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can add SSO providers"}, + status_code=403, + ) + + from uuid import UUID + + provider_service = SSOProviderService() + provider_create = OrganizationProviderCreate( + slug=payload.get("slug"), + organization_id=UUID(organization_id), + name=payload.get("name"), + description=payload.get("description"), + settings=payload.get("settings"), + flags=payload.get("flags"), + tags=payload.get("tags"), + meta=payload.get("meta"), + ) + created_provider = await provider_service.create_provider( + organization_id=organization_id, + payload=provider_create, + user_id=str(request.state.user_id), + ) + + return created_provider + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get( + "/{organization_id}/providers/{provider_id}", + operation_id="get_organization_provider", +) +async def get_organization_provider( + organization_id: str, + provider_id: str, + request: Request, +): + """Get a single SSO provider by ID.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id + ) + if not has_permission: + return JSONResponse( + {"detail": "You do not have access to this organization"}, + status_code=403, + ) + + from uuid import UUID + + provider_service = SSOProviderService() + return await provider_service.get_provider( + organization_id=organization_id, + provider_id=provider_id, + ) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.patch( + "/{organization_id}/providers/{provider_id}", + operation_id="update_organization_provider", +) +async def update_organization_provider( + organization_id: str, + provider_id: str, + request: Request, + payload: dict, +): + """Update an SSO provider.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can update SSO providers"}, + status_code=403, + ) + + from uuid import UUID + + provider_service = SSOProviderService() + provider_update = OrganizationProviderUpdate( + name=payload.get("name"), + description=payload.get("description"), + settings=payload.get("settings"), + flags=payload.get("flags"), + tags=payload.get("tags"), + meta=payload.get("meta"), + ) + updated_provider = await provider_service.update_provider( + organization_id=organization_id, + provider_id=provider_id, + payload=provider_update, + user_id=str(request.state.user_id), + ) + + return updated_provider + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) + + +@router.delete( + "/{organization_id}/providers/{provider_id}", + operation_id="delete_organization_provider", +) +async def delete_organization_provider( + organization_id: str, + provider_id: str, + request: Request, +): + """Delete an SSO provider from an organization.""" + try: + user_org_workspace_data: dict = await get_user_org_and_workspace_id( + request.state.user_id + ) + has_permission = await check_user_org_access( + user_org_workspace_data, organization_id, check_owner=True + ) + if not has_permission: + return JSONResponse( + {"detail": "Only organization owners can delete SSO providers"}, + status_code=403, + ) + + from uuid import UUID + + provider_service = SSOProviderService() + await provider_service.delete_provider( + organization_id=organization_id, + provider_id=provider_id, + user_id=str(request.state.user_id), + ) + + return JSONResponse( + {"detail": "Provider deleted successfully"}, + status_code=200, + ) + + except Exception as e: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=str(e)) diff --git a/api/ee/src/services/admin_manager.py b/api/ee/src/services/admin_manager.py index ec3b9bd61b..1716f4c7ce 100644 --- a/api/ee/src/services/admin_manager.py +++ b/api/ee/src/services/admin_manager.py @@ -60,21 +60,27 @@ class UserRequest(BaseModel): class OrganizationRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None + # + is_demo: bool = False + is_personal: bool = False + # + owner_id: UUID class WorkspaceRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None + # is_default: bool # organization_ref: Reference class ProjectRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None is_default: bool # workspace_ref: Reference @@ -169,13 +175,13 @@ async def create_user( session.add(user_db) + await session.commit() + log.info( "[scopes] user created", user_id=user_db.id, ) - await session.commit() - response = Reference(id=user_db.id) return response @@ -186,24 +192,25 @@ async def create_organization( ) -> Reference: async with engine.core_session() as session: organization_db = OrganizationDB( - # id=uuid7() # use default - # name=request.name, description=request.description, - # - owner="", # move 'owner' from here to membership 'role' - # type=... # remove 'type' + flags={ + "is_demo": False, + "is_personal": request.is_personal, + }, + owner_id=request.owner_id, + created_by_id=request.owner_id, ) session.add(organization_db) + await session.commit() + log.info( "[scopes] organization created", organization_id=organization_db.id, ) - await session.commit() - response = Reference(id=organization_db.id) return response @@ -225,14 +232,14 @@ async def create_workspace( session.add(workspace_db) + await session.commit() + log.info( "[scopes] workspace created", organization_id=workspace_db.organization_id, workspace_id=workspace_db.id, ) - await session.commit() - response = Reference(id=workspace_db.id) return response @@ -255,6 +262,8 @@ async def create_project( session.add(project_db) + await session.commit() + log.info( "[scopes] project created", organization_id=project_db.organization_id, @@ -262,8 +271,6 @@ async def create_project( project_id=project_db.id, ) - await session.commit() - response = Reference(id=project_db.id) return response @@ -285,6 +292,8 @@ async def create_organization_membership( session.add(membership_db) + await session.commit() + log.info( "[scopes] organization membership created", organization_id=request.organization_ref.id, @@ -292,8 +301,6 @@ async def create_organization_membership( membership_id=membership_db.id, ) - await session.commit() - if request.role == "owner": result = await session.execute( select(OrganizationDB).filter_by( @@ -303,7 +310,7 @@ async def create_organization_membership( organization_db = result.scalars().first() - organization_db.owner = str(request.user_ref.id) + organization_db.owner_id = request.user_ref.id await session.commit() @@ -335,6 +342,8 @@ async def create_workspace_membership( session.add(membership_db) + await session.commit() + log.info( "[scopes] workspace membership created", organization_id=workspace_db.organization_id, @@ -343,8 +352,6 @@ async def create_workspace_membership( membership_id=membership_db.id, ) - await session.commit() - response = Reference(id=membership_db.id) return response @@ -373,6 +380,8 @@ async def create_project_membership( session.add(membership_db) + await session.commit() + log.info( "[scopes] project membership created", organization_id=project_db.organization_id, @@ -382,8 +391,6 @@ async def create_project_membership( membership_id=membership_db.id, ) - await session.commit() - response = Reference(id=membership_db.id) return response diff --git a/api/ee/src/services/commoners.py b/api/ee/src/services/commoners.py index 2f76c7facc..6aa65877af 100644 --- a/api/ee/src/services/commoners.py +++ b/api/ee/src/services/commoners.py @@ -1,7 +1,8 @@ from os import getenv from json import loads -from typing import List +from typing import List, Optional from traceback import format_exc +from uuid import UUID from pydantic import BaseModel @@ -19,7 +20,12 @@ user_exists, ) from ee.src.models.api.organization_models import CreateOrganization -from oss.src.services.user_service import create_new_user +from oss.src.services.user_service import ( + create_new_user, + check_user_exists, + delete_user, +) +from oss.src.models.db_models import UserDB, OrganizationDB from ee.src.services.email_helper import ( add_contact_to_loops, ) @@ -30,6 +36,8 @@ from ee.src.core.subscriptions.service import SubscriptionsService from ee.src.dbs.postgres.meters.dao import MetersDAO from ee.src.core.meters.service import MetersService +from oss.src.utils.caching import set_cache, get_cache +from sqlalchemy.exc import IntegrityError subscription_service = SubscriptionsService( subscriptions_dao=SubscriptionsDAO(), @@ -108,12 +116,20 @@ async def add_user_to_demos(user_id: str) -> None: raise exc # TODO: handle exceptions -async def create_accounts(payload: dict): +async def create_accounts( + payload: dict, + organization_name: Optional[str] = None, + is_personal: bool = True, + use_reverse_trial: bool = True, +): """Creates a user account and an associated organization based on the provided payload. Arguments: payload (dict): The required payload. It consists of; user_id and user_email + organization_name (str): Name for the organization. Default: "Personal" + is_personal (bool): Whether this is a personal org. Default: True + use_reverse_trial (bool): Use reverse trial (True) or hobby plan (False). Default: True """ # Only keep fields expected by UserDB to avoid TypeErrors (e.g., organization_id) @@ -123,59 +139,214 @@ async def create_accounts(payload: dict): "username": payload["email"].split("@")[0], } - user = await db_manager.get_user_with_email(email=user_dict["email"]) - if user is None: - log.info("[scopes] Yey! A new user is signing up!") + email = user_dict["email"] - # Create user first - user = await create_new_user(user_dict) + # Atomically acquire a distributed lock to prevent race conditions + # where multiple concurrent requests create duplicate accounts + from oss.src.utils.caching import acquire_lock, release_lock - log.info("[scopes] User [%s] created", user.id) + lock_acquired = await acquire_lock( + namespace="account-creation", + key=email, + ) - # Prepare payload to create organization - create_org_payload = CreateOrganization( - name=user_dict["username"], - description="Default Organization", - owner=str(user.id), - type="default", - ) + if not lock_acquired: + # Another request is already creating this account - just return the existing user + log.info("[scopes] account creation lock already taken") + user = await db_manager.get_user_with_email(email=email) + return user + + # We have the lock - proceed with account creation + log.info("[scopes] account creation lock acquired") + + try: + # Get or create user + user = await db_manager.get_user_with_email(email=user_dict["email"]) + user_is_new = user is None + + if user is None: + # Create user (idempotent - returns existing if found) + user = await create_new_user(user_dict) + log.info("[scopes] User [%s] created", user.id) + + # Check if user already has organizations (to detect if setup already ran) + user_organizations = await db_manager.get_user_organizations(str(user.id)) + user_has_organization = len(user_organizations) > 0 + + # Only run setup if user is new AND doesn't have organizations + if user_is_new and not user_has_organization: + # We successfully created the user and they have no orgs, proceed with setup + # If setup fails, delete the user to avoid orphaned records + try: + # Add the user to demos + await add_user_to_demos(str(user.id)) + + # Create organization with workspace and subscription + await create_organization_with_subscription( + user_id=UUID(str(user.id)), + organization_email=user_dict["email"], + organization_name="Personal", + organization_description=None, + is_personal=is_personal, + use_reverse_trial=use_reverse_trial, + ) + except Exception as e: + # Setup failed - delete the user to avoid orphaned state + log.error( + "[scopes] setup failed for user [%s], deleting user: %s", + user.id, + str(e), + ) + try: + await delete_user(str(user.id)) + except Exception as delete_error: + log.error( + "[scopes] failed to delete user [%s]: %s", + user.id, + str(delete_error), + ) + # Re-raise the original error + raise + else: + # User already has organization(s) - skip setup + if user_has_organization: + log.info( + "[scopes] User [%s] already has organization, skipping setup", + user.id, + ) + + log.info("[scopes] User [%s] authenticated", user.id) + + try: + from oss.src.core.auth.service import AuthService + + await AuthService().enforce_domain_policies( + email=user_dict["email"], + user_id=user.id, + ) + except Exception as e: + log.debug("Error enforcing domain policies after signup: %s", e) - # Create the user's default organization and workspace + if is_ee(): + try: + # Adds contact to loops for marketing emails. TODO: Add opt-in checkbox to supertokens + add_contact_to_loops(user_dict["email"]) # type: ignore + except ConnectionError as ex: + log.warn("error adding contact to loops %s", ex) + + return user + + finally: + # Always release the lock when done (or on error) + released = await release_lock( + namespace="account-creation", + key=email, + ) + if released: + log.info("[scopes] account creation lock released") + else: + log.warn("[scopes] account creation lock already expired") + + +async def create_organization_with_subscription( + user_id: UUID, + organization_email: str, + organization_name: Optional[str] = None, + organization_description: Optional[str] = None, + is_personal: bool = False, + use_reverse_trial: bool = False, +) -> OrganizationDB: + """Create an organization with workspace and subscription for an existing user. + + Args: + user_id: The user's UUID + organization_email: The user's email for subscription + organization_name: Name for the organization + organization_description: Optional description + is_personal: Whether this is a personal org (default: False for collaborative) + use_reverse_trial: Use reverse trial (True) or hobby plan (False) + + Returns: + OrganizationDB: The created organization + """ + # Get user object + user = await db_manager.get_user(str(user_id)) + if not user: + raise ValueError(f"User {user_id} not found") + + if is_personal: + existing_orgs = await db_manager.get_user_organizations(str(user_id)) + existing_personal = next( + (org for org in existing_orgs if (org.flags or {}).get("is_personal")), + None, + ) + if existing_personal: + log.info( + "[scopes] Personal organization already exists", + organization_id=existing_personal.id, + user_id=user_id, + ) + return existing_personal + + # Prepare payload to create organization + create_org_payload = CreateOrganization( + name=organization_name, + description=organization_description, + is_demo=False, + is_personal=is_personal, + owner_id=user_id, + ) + + # Create organization and workspace + try: organization = await create_organization( payload=create_org_payload, user=user, ) + except IntegrityError: + if is_personal: + existing_orgs = await db_manager.get_user_organizations(str(user_id)) + existing_personal = next( + (org for org in existing_orgs if (org.flags or {}).get("is_personal")), + None, + ) + if existing_personal: + log.info( + "[scopes] Personal organization already exists (race)", + organization_id=existing_personal.id, + user_id=user_id, + ) + return existing_personal + raise - log.info("[scopes] Organization [%s] created", organization.id) - - # Add the user to demos - await add_user_to_demos(str(user.id)) + log.info("[scopes] Organization [%s] created", organization.id) - # Start reverse trial - try: + # Start subscription based on type + try: + if use_reverse_trial: await subscription_service.start_reverse_trial( organization_id=str(organization.id), organization_name=organization.name, - organization_email=user_dict["email"], + organization_email=organization_email, ) - - except Exception as exc: - raise exc # TODO: handle exceptions - # await subscription_service.start_free_plan( - # organization_id=str(organization.id), - # ) - - await check_entitlements( - organization_id=str(organization.id), - key=Gauge.USERS, - delta=1, + else: + # Start hobby/free plan + await subscription_service.start_free_plan( + organization_id=str(organization.id), + ) + except Exception as exc: + log.error( + "[scopes] Failed to create subscription for organization [%s]: %s", + organization.id, + exc, ) + raise exc - log.info("[scopes] User [%s] authenticated", user.id) + # Check entitlements + await check_entitlements( + organization_id=str(organization.id), + key=Gauge.USERS, + delta=1, + ) - if is_ee(): - try: - # Adds contact to loops for marketing emails. TODO: Add opt-in checkbox to supertokens - add_contact_to_loops(user_dict["email"]) # type: ignore - except ConnectionError as ex: - log.warn("Error adding contact to loops %s", ex) + return organization diff --git a/api/ee/src/services/db_manager_ee.py b/api/ee/src/services/db_manager_ee.py index 55f3e55470..239cd4806c 100644 --- a/api/ee/src/services/db_manager_ee.py +++ b/api/ee/src/services/db_manager_ee.py @@ -42,6 +42,10 @@ UserDB, InvitationDB, ) +from ee.src.dbs.postgres.organizations.dao import ( + OrganizationProvidersDAO, + OrganizationDomainsDAO, +) from ee.src.services.converters import get_workspace_in_format from ee.src.services.selectors import get_org_default_workspace @@ -127,7 +131,9 @@ async def get_organization_workspaces(organization_id: str): result = await session.execute( select(WorkspaceDB) .filter_by(organization_id=uuid.UUID(organization_id)) - .options(load_only(WorkspaceDB.organization_id)) # type: ignore + .options( # type: ignore + load_only(WorkspaceDB.id, WorkspaceDB.organization_id) + ) ) workspaces = result.scalars().all() return workspaces @@ -203,6 +209,8 @@ async def create_project( session.add(project_db) + await session.commit() + log.info( "[scopes] project created", organization_id=organization_id, @@ -210,8 +218,6 @@ async def create_project( project_id=project_db.id, ) - await session.commit() - return project_db @@ -231,7 +237,7 @@ async def create_default_project( """ project_db = await create_project( - "Default Project", + "Default", workspace_id=workspace_id, organization_id=organization_id, session=session, @@ -296,13 +302,11 @@ async def _sync(db_session: AsyncSession) -> None: member.user_id: member for member in existing_members_result.scalars().all() } - updated = False for member in workspace_members: project_member = existing_members.get(member.user_id) if project_member: if project_member.role != member.role: project_member.role = member.role - updated = True continue project_member = ProjectMemberDB( @@ -311,6 +315,9 @@ async def _sync(db_session: AsyncSession) -> None: role=member.role, ) db_session.add(project_member) + + await db_session.commit() + log.info( "[scopes] project membership created", organization_id=str(project.organization_id), @@ -319,10 +326,6 @@ async def _sync(db_session: AsyncSession) -> None: user_id=str(member.user_id), membership_id=project_member.id, ) - updated = True - - if updated: - await db_session.commit() if session is not None: await _sync(session) @@ -422,6 +425,8 @@ async def create_project_member( session.add(project_member) + await session.commit() + log.info( "[scopes] project membership created", organization_id=project.organization_id, @@ -431,8 +436,6 @@ async def create_project_member( membership_id=project_member.id, ) - await session.commit() - async def fetch_project_memberships_by_user_id( user_id: str, @@ -478,14 +481,14 @@ async def create_workspace_db_object( session.add(workspace) + await session.commit() + log.info( "[scopes] workspace created", organization_id=organization.id, workspace_id=workspace.id, ) - await session.commit() - # add user as a member to the workspace with the owner role workspace_member = WorkspaceMemberDB( user_id=user.id, @@ -494,6 +497,10 @@ async def create_workspace_db_object( ) session.add(workspace_member) + + await session.commit() + await session.refresh(workspace, attribute_names=["organization"]) + log.info( "[scopes] workspace membership created", organization_id=workspace.organization_id, @@ -502,10 +509,6 @@ async def create_workspace_db_object( membership_id=workspace_member.id, ) - await session.commit() - - await session.refresh(workspace, attribute_names=["organization"]) - project_db = await create_default_project( organization_id=str(organization.id), workspace_id=str(workspace.id), @@ -741,8 +744,11 @@ async def add_user_to_workspace_and_org( user_organization = OrganizationMemberDB( user_id=user.id, organization_id=organization.id ) + session.add(user_organization) + await session.commit() + log.info( "[scopes] organization membership created", organization_id=organization.id, @@ -759,6 +765,8 @@ async def add_user_to_workspace_and_org( session.add(workspace_member) + await session.commit() + log.info( "[scopes] workspace membership created", organization_id=organization.id, @@ -793,8 +801,11 @@ async def add_user_to_workspace_and_org( project_id=project.id, role=role, ) + session.add(project_member) + await session.commit() + log.info( "[scopes] project membership created", organization_id=str(project.organization_id), @@ -804,7 +815,6 @@ async def add_user_to_workspace_and_org( membership_id=project_member.id, ) - await session.commit() return True @@ -969,46 +979,58 @@ async def create_organization( async with engine.core_session() as session: create_org_data = payload.model_dump(exclude_unset=True) - if "owner" not in create_org_data: - create_org_data["owner"] = str(user.id) + + is_demo = create_org_data.pop("is_demo", False) + is_personal = create_org_data.pop("is_personal", False) + + create_org_data["flags"] = { + "is_demo": is_demo, + "is_personal": is_personal, + "allow_email": env.auth.email_enabled, + "allow_social": env.auth.oidc_enabled, + "allow_sso": False, + "allow_root": False, + "domains_only": False, + "auto_join": False, + } + + # Set required audit fields + create_org_data["owner_id"] = user.id + create_org_data["created_by_id"] = user.id # create organization organization_db = OrganizationDB(**create_org_data) session.add(organization_db) + await session.commit() + log.info( "[scopes] organization created", organization_id=organization_db.id, ) - await session.commit() - # create joined organization for user user_organization = OrganizationMemberDB( - user_id=user.id, organization_id=organization_db.id + user_id=user.id, + organization_id=organization_db.id, + role="owner", ) session.add(user_organization) + await session.commit() + log.info( "[scopes] organization membership created", organization_id=organization_db.id, user_id=user.id, + role="owner", membership_id=user_organization.id, ) - await session.commit() - # construct workspace payload workspace_payload = CreateWorkspace( - name=payload.name, - type=payload.type if payload.type else "", - description=( - "Default Workspace" - if payload.type == "default" - else payload.description - if payload.description - else "" - ), + name="Default", + type="default", ) # create workspace @@ -1055,7 +1077,137 @@ async def update_organization( if not organization: raise NoResultFound(f"Organization with id {organization_id} not found") - for key, value in payload.model_dump(exclude_unset=True).items(): + # Validate slug updates before applying + payload_dict = payload.model_dump(exclude_unset=True) + if "slug" in payload_dict: + new_slug = payload_dict["slug"] + + # Slug format validation: only lowercase letters and hyphens, max 64 characters + if new_slug is not None: + import re + + if len(new_slug) > 64: + raise ValueError("Organization slug cannot exceed 64 characters.") + if not re.match(r"^[a-z-]+$", new_slug): + raise ValueError( + "Organization slug can only contain lowercase letters (a-z) and hyphens (-)." + ) + + # Personal organizations cannot have slugs + is_personal = organization.flags and organization.flags.get( + "is_personal", False + ) + if is_personal: + raise ValueError( + "Personal organizations cannot have slugs. " + "Slugs are only available for collaborative organizations." + ) + + # Slug immutability: once set, cannot be changed + if organization.slug is not None and new_slug != organization.slug: + raise ValueError( + f"Organization slug cannot be changed once set. " + f"Current slug: '{organization.slug}'" + ) + + # Special handling for flags: merge instead of replace + if "flags" in payload_dict: + new_flags = payload_dict["flags"] + if new_flags is not None: + # Get existing flags or initialize with defaults + existing_flags = organization.flags or {} + + # Start with complete defaults + default_flags = { + "is_demo": False, + "is_personal": False, + "allow_email": env.auth.email_enabled, + "allow_social": env.auth.oidc_enabled, + "allow_sso": False, + "allow_root": False, + "domains_only": False, + "auto_join": False, + } + + # Merge: defaults <- existing <- new + merged_flags = {**default_flags, **existing_flags, **new_flags} + + # VALIDATION: Ensure at least one auth method is enabled OR allow_root is true + # This prevents organizations from being locked out + allow_email = merged_flags.get("allow_email", False) + allow_social = merged_flags.get("allow_social", False) + allow_sso = merged_flags.get("allow_sso", False) + allow_root = merged_flags.get("allow_root", False) + + changing_auth_flags = any( + key in new_flags + for key in ("allow_email", "allow_social", "allow_sso") + ) + changing_auto_join = "auto_join" in new_flags + changing_domains_only = "domains_only" in new_flags + + if changing_auth_flags and allow_sso: + providers_dao = OrganizationProvidersDAO(session) + providers = await providers_dao.list_by_organization( + organization_id + ) + active_valid = [ + provider + for provider in providers + if (provider.flags or {}).get("is_active") + and (provider.flags or {}).get("is_valid") + ] + if not active_valid: + raise ValueError( + "SSO cannot be enabled until at least one SSO provider is " + "active and verified." + ) + if not allow_email and not allow_social: + if not active_valid: + raise ValueError( + "SSO-only authentication requires at least one SSO provider to " + "be active and verified." + ) + + if changing_auto_join and merged_flags.get("auto_join", False): + domains_dao = OrganizationDomainsDAO(session) + domains = await domains_dao.list_by_organization(organization_id) + has_verified_domain = any( + (domain.flags or {}).get("is_verified") for domain in domains + ) + if not has_verified_domain: + raise ValueError( + "Auto-join requires at least one verified domain." + ) + + if changing_domains_only and merged_flags.get("domains_only", False): + domains_dao = OrganizationDomainsDAO(session) + domains = await domains_dao.list_by_organization(organization_id) + has_verified_domain = any( + (domain.flags or {}).get("is_verified") for domain in domains + ) + if not has_verified_domain: + raise ValueError( + "Domains-only requires at least one verified domain." + ) + + # Check if all auth methods are disabled + all_auth_disabled = not (allow_email or allow_social or allow_sso) + + if all_auth_disabled and not allow_root: + # Auto-enable allow_root to prevent lockout + merged_flags["allow_root"] = True + log.warning( + f"All authentication methods disabled for organization {organization_id}. " + f"Auto-enabling allow_root to prevent lockout." + ) + + organization.flags = merged_flags + # Remove flags from payload_dict to avoid setting it again below + del payload_dict["flags"] + + # Set all other attributes + for key, value in payload_dict.items(): if hasattr(organization, key): setattr(organization, key, value) @@ -1064,6 +1216,33 @@ async def update_organization( return organization +async def delete_organization(organization_id: str) -> bool: + """ + Delete an organization and all its related data. + + Args: + organization_id (str): The organization ID to delete. + + Returns: + bool: True if deletion was successful. + + Raises: + NoResultFound: If organization not found. + """ + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDB).filter_by(id=uuid.UUID(organization_id)) + ) + organization = result.scalars().first() + + if not organization: + raise NoResultFound(f"Organization with id {organization_id} not found") + + await session.delete(organization) + await session.commit() + return True + + async def delete_invitation(invitation_id: str) -> bool: """ Delete an invitation from an organization. @@ -1171,10 +1350,11 @@ async def get_org_details(organization: Organization) -> dict: sample_organization = { "id": str(organization.id), + "slug": organization.slug, "name": organization.name, "description": organization.description, - "type": organization.type, - "owner": organization.owner, + "flags": organization.flags, + "owner_id": str(organization.owner_id), "workspaces": [str(workspace.id) for workspace in workspaces], "default_workspace": default_workspace, } @@ -1206,22 +1386,6 @@ async def get_workspace_details(workspace: WorkspaceDB) -> WorkspaceResponse: raise e -async def get_organization_invitations(organization_id: str): - """ - Gets the organization invitations. - - Args: - organization_id (str): The ID of the organization - """ - - async with engine.core_session() as session: - result = await session.execute( - select(InvitationDB).filter_by(organization_id=organization_id) - ) - invitations = result.scalars().all() - return invitations - - async def get_project_invitations(project_id: str, **kwargs): """ Gets the project invitations. @@ -1376,25 +1540,28 @@ async def get_all_workspace_roles() -> List[WorkspaceRole]: async def add_user_to_organization( organization_id: str, user_id: str, + role: str = "member", # is_demo: bool = False, ) -> None: async with engine.core_session() as session: organization_member = OrganizationMemberDB( user_id=user_id, organization_id=organization_id, + role=role, ) session.add(organization_member) + await session.commit() + log.info( "[scopes] organization membership created", organization_id=organization_id, user_id=user_id, + role=role, membership_id=organization_member.id, ) - await session.commit() - async def add_user_to_workspace( workspace_id: str, @@ -1419,7 +1586,8 @@ async def add_user_to_workspace( session.add(workspace_member) - # TODO: add organization_id + await session.commit() + log.info( "[scopes] workspace membership created", organization_id=workspace.organization_id, @@ -1428,8 +1596,6 @@ async def add_user_to_workspace( membership_id=workspace_member.id, ) - await session.commit() - async def add_user_to_project( project_id: str, @@ -1454,6 +1620,8 @@ async def add_user_to_project( session.add(project_member) + await session.commit() + log.info( "[scopes] project membership created", organization_id=project.organization_id, @@ -1463,4 +1631,148 @@ async def add_user_to_project( membership_id=project_member.id, ) + +async def transfer_organization_ownership( + organization_id: str, + new_owner_id: str, + current_user_id: str, +) -> OrganizationDB: + """Transfer organization ownership to another member. + + Args: + organization_id: The ID of the organization + new_owner_id: The UUID of the new owner + current_user_id: The UUID of the current user (initiating the transfer) + + Returns: + OrganizationDB: The updated organization + + Raises: + ValueError: If new owner is not a member of the organization + """ + from datetime import datetime, timezone + from ee.src.models.db_models import OrganizationMemberDB, WorkspaceMemberDB + + async with engine.core_session() as session: + # Verify organization exists + org_result = await session.execute( + select(OrganizationDB).filter_by(id=uuid.UUID(organization_id)) + ) + organization = org_result.scalars().first() + if not organization: + raise ValueError(f"Organization {organization_id} not found") + + # Check if new owner is a member + member_result = await session.execute( + select(OrganizationMemberDB).filter_by( + user_id=uuid.UUID(new_owner_id), + organization_id=uuid.UUID(organization_id), + ) + ) + member = member_result.scalars().first() + if not member: + raise ValueError("The new owner must be a member of the organization") + + # Swap organization roles between current owner and new owner + current_owner_org_member_result = await session.execute( + select(OrganizationMemberDB).filter_by( + user_id=uuid.UUID(current_user_id), + organization_id=uuid.UUID(organization_id), + ) + ) + current_owner_org_member = current_owner_org_member_result.scalars().first() + + if current_owner_org_member: + # Swap org roles + current_owner_org_old_role = current_owner_org_member.role + new_owner_org_old_role = member.role + + current_owner_org_member.role = new_owner_org_old_role + member.role = current_owner_org_old_role + + log.info( + "[organization] roles swapped", + organization_id=organization_id, + current_owner_id=current_user_id, + current_owner_old_role=current_owner_org_old_role, + current_owner_new_role=new_owner_org_old_role, + new_owner_id=new_owner_id, + new_owner_old_role=new_owner_org_old_role, + new_owner_new_role=current_owner_org_old_role, + ) + + # Get all workspaces in this organization + workspaces_result = await session.execute( + select(WorkspaceDB).filter_by(organization_id=uuid.UUID(organization_id)) + ) + workspaces = workspaces_result.scalars().all() + + # Update workspace roles for both users in all workspaces - swap their roles + for workspace in workspaces: + # Get both members' workspace roles + current_owner_member_result = await session.execute( + select(WorkspaceMemberDB).filter_by( + user_id=uuid.UUID(current_user_id), + workspace_id=workspace.id, + ) + ) + current_owner_member = current_owner_member_result.scalars().first() + + new_owner_member_result = await session.execute( + select(WorkspaceMemberDB).filter_by( + user_id=uuid.UUID(new_owner_id), + workspace_id=workspace.id, + ) + ) + new_owner_member = new_owner_member_result.scalars().first() + + # Swap roles between the two users + if current_owner_member and new_owner_member: + current_owner_old_role = current_owner_member.role + new_owner_old_role = new_owner_member.role + + # Swap the roles + current_owner_member.role = new_owner_old_role + new_owner_member.role = current_owner_old_role + + log.info( + "[workspace] roles swapped", + workspace_id=str(workspace.id), + current_owner_id=current_user_id, + current_owner_old_role=current_owner_old_role, + current_owner_new_role=new_owner_old_role, + new_owner_id=new_owner_id, + new_owner_old_role=new_owner_old_role, + new_owner_new_role=current_owner_old_role, + ) + elif current_owner_member: + # Only current owner is a member - keep their role + log.info( + "[workspace] new owner not a member", + workspace_id=str(workspace.id), + user_id=new_owner_id, + ) + elif new_owner_member: + # Only new owner is a member - keep their role + log.info( + "[workspace] current owner not a member", + workspace_id=str(workspace.id), + user_id=current_user_id, + ) + + # Transfer ownership + organization.owner_id = uuid.UUID(new_owner_id) + organization.updated_at = datetime.now(timezone.utc) + organization.updated_by_id = uuid.UUID(current_user_id) + await session.commit() + await session.refresh(organization) + + log.info( + "[organization] ownership transferred", + organization_id=organization_id, + old_owner_id=current_user_id, + new_owner_id=new_owner_id, + ) + + return organization diff --git a/api/ee/src/services/organization_security_service.py b/api/ee/src/services/organization_security_service.py new file mode 100644 index 0000000000..885ab264dc --- /dev/null +++ b/api/ee/src/services/organization_security_service.py @@ -0,0 +1,729 @@ +"""Service layer for organization security features (domains and SSO providers).""" + +import secrets +import hashlib +import logging +from typing import List, Optional +from uuid import UUID +from fastapi import HTTPException + +from oss.src.dbs.postgres.shared.engine import engine +from oss.src.core.secrets.dtos import ( + CreateSecretDTO, + UpdateSecretDTO, + SecretDTO, + SecretKind, + SSOProviderDTO, + SSOProviderSettingsDTO, +) +from oss.src.core.secrets.services import VaultService +from oss.src.dbs.postgres.secrets.dao import SecretsDAO +from oss.src.core.shared.dtos import Header +from ee.src.dbs.postgres.organizations.dao import ( + OrganizationDomainsDAO, + OrganizationProvidersDAO, +) +from ee.src.apis.fastapi.organizations.models import ( + OrganizationDomainCreate, + OrganizationDomainResponse, + OrganizationProviderCreate, + OrganizationProviderUpdate, + OrganizationProviderResponse, +) +from ee.src.services import db_manager_ee + +logger = logging.getLogger(__name__) + + +class DomainVerificationService: + """Service for managing domain verification.""" + + TOKEN_EXPIRY_HOURS = 48 + + @staticmethod + def generate_verification_token() -> str: + """Generate a unique verification token.""" + # Generate cryptographically secure random token (16 bytes = 64 hex chars) + random_part = secrets.token_hex(16) + + # Add prefix to make it identifiable as an Agenta verification token + return f"{random_part}" + + @staticmethod + async def verify_domain_dns(domain: str, expected_token: str) -> bool: + """Verify domain ownership via DNS TXT record.""" + import dns.resolver + + try: + txt_record_name = f"_agenta-verification.{domain}" + logger.info(f"Attempting DNS verification for {txt_record_name}") + resolvers = [ + ("system", None), + ("cloudflare+google", ["1.1.1.1", "8.8.8.8"]), + ] + + def _resolve_txt(resolver_label: str, nameservers: list[str] | None): + resolver = dns.resolver.Resolver() + if nameservers: + resolver.nameservers = nameservers + logger.info( + f"DNS lookup using {resolver_label} resolver for {txt_record_name}" + ) + return resolver.resolve(txt_record_name, "TXT") + + for resolver_label, nameservers in resolvers: + try: + answers = _resolve_txt(resolver_label, nameservers) + except Exception as exc: + logger.warning( + f"DNS lookup failed via {resolver_label} resolver: {exc}" + ) + continue + + logger.info(f"Found {len(answers)} TXT records for {txt_record_name}") + + for rdata in answers: + txt_value = rdata.to_text().strip('"') + logger.info(f"TXT record value: {txt_value}") + + # Extract the token value from "_agenta-verification=TOKEN" format + if txt_value.startswith("_agenta-verification="): + token = txt_value.split("=", 1)[1] + logger.info(f"Extracted token from DNS: {token}") + logger.info(f"Expected token from DB: {expected_token}") + logger.info(f"Tokens match: {token == expected_token}") + if token == expected_token: + logger.info(f"Domain verification successful for {domain}") + return True + else: + logger.warning( + f"Token mismatch for {domain}. Expected length: {len(expected_token)}, Got length: {len(token)}" + ) + logger.warning(f"Expected: {expected_token}") + logger.warning(f"Got: {token}") + + logger.warning( + f"No matching verification token found in DNS records for {domain}" + ) + return False + except dns.resolver.NXDOMAIN: + logger.warning(f"DNS record not found (NXDOMAIN) for {txt_record_name}") + return False + except dns.resolver.NoAnswer: + logger.warning(f"No TXT records found (NoAnswer) for {txt_record_name}") + return False + except dns.resolver.Timeout: + logger.error(f"DNS lookup timeout for {txt_record_name}") + return False + except Exception as e: + logger.error( + f"Unexpected error during DNS verification for {domain}: {e}", + exc_info=True, + ) + return False + + async def create_domain( + self, + organization_id: str, + payload: OrganizationDomainCreate, + user_id: str, + ) -> OrganizationDomainResponse: + """Create a new domain for verification. + + Token expires after 48 hours and can be refreshed. + """ + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + + # Block if a verified domain already exists anywhere + existing_verified = await dao.get_verified_by_slug(payload.domain) + if existing_verified: + raise HTTPException( + status_code=409, + detail=f"Domain {payload.domain} is already verified", + ) + + # Reuse existing unverified domain for this organization, if any + existing = await dao.get_by_slug(payload.domain, organization_id) + if existing and not (existing.flags or {}).get("is_verified"): + from datetime import datetime, timezone + + token = self.generate_verification_token() + existing.token = token + existing.created_at = datetime.now(timezone.utc) + existing.flags = {"is_verified": False} + existing.updated_by_id = user_id + await session.commit() + await session.refresh(existing) + domain = existing + else: + # Generate verification token + token = self.generate_verification_token() + + # Create domain with token + domain = await dao.create( + organization_id=organization_id, + slug=payload.domain, + name=payload.name, + description=payload.description, + token=token, + created_by_id=user_id, + ) + + await session.commit() + await session.refresh(domain) + + return OrganizationDomainResponse( + id=str(domain.id), + organization_id=str(domain.organization_id), + slug=domain.slug, + name=domain.name, + description=domain.description, + token=token, + flags=domain.flags or {}, + created_at=domain.created_at, + updated_at=domain.updated_at, + ) + + async def verify_domain( + self, organization_id: str, domain_id: str, user_id: str + ) -> OrganizationDomainResponse: + """Verify a domain via DNS check.""" + from datetime import datetime, timezone, timedelta + + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + + domain = await dao.get_by_id(domain_id, organization_id) + if not domain: + raise HTTPException(status_code=404, detail="Domain not found") + + # Check if already verified by this organization + if domain.flags and domain.flags.get("is_verified"): + raise HTTPException(status_code=400, detail="Domain already verified") + + # Check if domain is already verified by another organization + verified_by_other = await dao.get_verified_by_slug(domain.slug) + if ( + verified_by_other + and str(verified_by_other.organization_id) != organization_id + ): + raise HTTPException( + status_code=409, + detail=f"Domain {domain.slug} is already verified by another organization", + ) + + # Check if token has expired (48 hours from creation) + token_age = datetime.now(timezone.utc) - domain.created_at + if token_age > timedelta(hours=self.TOKEN_EXPIRY_HOURS): + raise HTTPException( + status_code=400, + detail=f"Verification token expired after {self.TOKEN_EXPIRY_HOURS} hours. Please refresh the token.", + ) + + # Perform DNS verification + is_valid = await self.verify_domain_dns(domain.slug, domain.token) + + if not is_valid: + raise HTTPException( + status_code=400, + detail="Domain verification failed. Please ensure the DNS TXT record is correctly configured.", + ) + + # Mark as verified and clear the token (one-time use) + domain.flags = {"is_verified": True} + domain.token = None + domain.updated_by_id = user_id + await session.commit() + await session.refresh(domain) + + return OrganizationDomainResponse( + id=str(domain.id), + organization_id=str(domain.organization_id), + slug=domain.slug, + name=domain.name, + description=domain.description, + token=None, + flags=domain.flags or {}, + created_at=domain.created_at, + updated_at=domain.updated_at, + ) + + async def list_domains( + self, organization_id: str + ) -> List[OrganizationDomainResponse]: + """List all domains for an organization. + + Tokens are returned for unverified domains (within expiry period). + Verified domains have token=None (cleared after verification). + """ + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + domains = await dao.list_by_organization(organization_id) + + return [ + OrganizationDomainResponse( + id=str(d.id), + organization_id=str(d.organization_id), + slug=d.slug, + name=d.name, + description=d.description, + token=d.token, # Token available for unverified domains, None for verified + flags=d.flags or {}, + created_at=d.created_at, + updated_at=d.updated_at, + ) + for d in domains + ] + + async def refresh_token( + self, organization_id: str, domain_id: str, user_id: str + ) -> OrganizationDomainResponse: + """Refresh the verification token for a domain. + + Generates a new token and resets the 48-hour expiry window. + For verified domains, this marks them as unverified for re-verification. + """ + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + + domain = await dao.get_by_id(domain_id, organization_id) + if not domain: + raise HTTPException(status_code=404, detail="Domain not found") + + # Generate new token + new_token = self.generate_verification_token() + + # Update domain with new token and reset created_at to restart the 48-hour expiry window + # If domain was verified, mark as unverified for re-verification + from datetime import datetime, timezone + + domain.token = new_token + domain.created_at = datetime.now(timezone.utc) + domain.flags = {"is_verified": False} + domain.updated_by_id = user_id + await session.commit() + await session.refresh(domain) + + return OrganizationDomainResponse( + id=str(domain.id), + organization_id=str(domain.organization_id), + slug=domain.slug, + name=domain.name, + description=domain.description, + token=new_token, + flags=domain.flags or {}, + created_at=domain.created_at, + updated_at=domain.updated_at, + ) + + async def reset_domain( + self, organization_id: str, domain_id: str, user_id: str + ) -> OrganizationDomainResponse: + """Reset a verified domain to unverified state for re-verification. + + Generates a new token and marks the domain as unverified. + """ + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + + domain = await dao.get_by_id(domain_id, organization_id) + if not domain: + raise HTTPException(status_code=404, detail="Domain not found") + + # Generate new token + new_token = self.generate_verification_token() + + # Reset domain to unverified state with new token + from datetime import datetime, timezone + + domain.token = new_token + domain.created_at = datetime.now(timezone.utc) + domain.flags = {"is_verified": False} + domain.updated_by_id = user_id + await session.commit() + await session.refresh(domain) + + return OrganizationDomainResponse( + id=str(domain.id), + organization_id=str(domain.organization_id), + slug=domain.slug, + name=domain.name, + description=domain.description, + token=new_token, + flags=domain.flags or {}, + created_at=domain.created_at, + updated_at=domain.updated_at, + ) + + async def delete_domain( + self, organization_id: str, domain_id: str, user_id: str + ) -> bool: + """Delete a domain.""" + async with engine.core_session() as session: + dao = OrganizationDomainsDAO(session) + + domain = await dao.get_by_id(domain_id, organization_id) + if not domain: + raise HTTPException(status_code=404, detail="Domain not found") + + deleted = await dao.delete(domain_id, user_id) + await session.commit() + return deleted + + +class SSOProviderService: + """Service for managing SSO providers.""" + + @staticmethod + def _vault_service() -> VaultService: + return VaultService(SecretsDAO()) + + @staticmethod + def mask_secret(secret: str) -> str: + """Mask a secret for display.""" + if len(secret) <= 8: + return "***" + return f"{secret[:4]}...{secret[-4:]}" + + @staticmethod + async def test_oidc_connection( + issuer_url: str, + client_id: str, + client_secret: str, + ) -> bool: + """Test OIDC provider connection by fetching discovery document.""" + import httpx + + try: + # Try to fetch OIDC discovery document + discovery_url = f"{issuer_url.rstrip('/')}/.well-known/openid-configuration" + + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(discovery_url) + + if response.status_code != 200: + return False + + config = response.json() + + # Verify required OIDC endpoints exist + required_fields = [ + "authorization_endpoint", + "token_endpoint", + "userinfo_endpoint", + ] + if not all(field in config for field in required_fields): + return False + + return True + except Exception: + return False + + async def create_provider( + self, + organization_id: str, + payload: OrganizationProviderCreate, + user_id: str, + ) -> OrganizationProviderResponse: + """Create a new SSO provider.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + + # Use the slug from payload (already validated to be lowercase letters and hyphens) + slug = payload.slug + + # Check if provider with this slug already exists + existing = await dao.get_by_slug(slug, organization_id) + if existing: + raise HTTPException( + status_code=400, + detail=f"Provider with slug '{payload.slug}' already exists", + ) + + # Merge provided settings with defaults + settings = { + **payload.settings, + } + + # Ensure scopes have default if not provided + if "scopes" not in settings or not settings["scopes"]: + settings["scopes"] = ["openid", "profile", "email"] + + secret_payload = CreateSecretDTO( + header=Header(name=slug, description=payload.description), + secret=SecretDTO( + kind=SecretKind.SSO_PROVIDER, + data=SSOProviderDTO( + provider=SSOProviderSettingsDTO( + client_id=settings.get("client_id", ""), + client_secret=settings.get("client_secret", ""), + issuer_url=settings.get("issuer_url", ""), + scopes=settings.get("scopes", []), + extra=settings.get("extra", {}) or {}, + ) + ), + ), + ) + + secret_dto = await self._vault_service().create_secret( + organization_id=UUID(organization_id), + create_secret_dto=secret_payload, + ) + + # Merge provided flags with defaults + flags = payload.flags or {} + if "is_valid" not in flags: + flags["is_valid"] = False + if "is_active" not in flags: + flags["is_active"] = False + + # Create provider + provider = await dao.create( + organization_id=organization_id, + slug=slug, + name=payload.name, + description=payload.description, + secret_id=str(secret_dto.id), + created_by_id=user_id, + flags=flags, + ) + + await session.commit() + await session.refresh(provider) + + return await self._to_response(provider, organization_id) + + async def update_provider( + self, + organization_id: str, + provider_id: str, + payload: OrganizationProviderUpdate, + user_id: str, + ) -> OrganizationProviderResponse: + """Update an SSO provider.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + + provider = await dao.get_by_id(provider_id, organization_id) + if not provider: + raise HTTPException(status_code=404, detail="Provider not found") + + # Update settings if provided + settings = await self._get_provider_settings( + organization_id, str(provider.secret_id) + ) + settings_changed = False + + if payload.settings is not None: + settings.update(payload.settings) + settings_changed = True + + # Update flags if provided + flags = provider.flags.copy() if provider.flags else {} + + if payload.flags is not None: + flags.update(payload.flags) + + # If settings changed, invalidate the provider (needs re-testing) + if settings_changed: + flags["is_valid"] = False + flags["is_active"] = False + + # Update slug if provided + if payload.slug is not None: + # Check if new slug already exists + existing = await dao.get_by_slug(payload.slug, organization_id) + if existing and existing.id != provider_id: + raise HTTPException( + status_code=400, + detail=f"Provider with slug '{payload.slug}' already exists", + ) + # Update slug in the provider + provider.slug = payload.slug + + # Update name if provided + if payload.name is not None: + provider.name = payload.name + + # Update description if provided + if payload.description is not None: + provider.description = payload.description + + if settings_changed: + updated_secret = UpdateSecretDTO( + header=Header(name=provider.slug, description=provider.description), + secret=SecretDTO( + kind=SecretKind.SSO_PROVIDER, + data=SSOProviderDTO( + provider=SSOProviderSettingsDTO( + client_id=settings.get("client_id", ""), + client_secret=settings.get("client_secret", ""), + issuer_url=settings.get("issuer_url", ""), + scopes=settings.get("scopes", []), + extra=settings.get("extra", {}) or {}, + ) + ), + ), + ) + await self._vault_service().update_secret( + secret_id=provider.secret_id, + organization_id=organization_id, + update_secret_dto=updated_secret, + ) + + provider = await dao.update( + provider_id=provider_id, + flags=flags, + updated_by_id=user_id, + ) + + await session.commit() + await session.refresh(provider) + + return await self._to_response(provider, organization_id) + + async def list_providers( + self, organization_id: str + ) -> List[OrganizationProviderResponse]: + """List all SSO providers for an organization.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + providers = await dao.list_by_organization(organization_id) + + responses: List[OrganizationProviderResponse] = [] + for provider in providers: + responses.append(await self._to_response(provider, organization_id)) + return responses + + async def get_provider( + self, organization_id: str, provider_id: str + ) -> OrganizationProviderResponse: + """Get a single SSO provider by ID.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + provider = await dao.get_by_id(provider_id, organization_id) + if not provider: + raise HTTPException(status_code=404, detail="Provider not found") + return await self._to_response(provider, organization_id) + + async def test_provider( + self, organization_id: str, provider_id: str, user_id: str + ) -> OrganizationProviderResponse: + """Test SSO provider connection and mark as valid if successful.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + + provider = await dao.get_by_id(provider_id, organization_id) + if not provider: + raise HTTPException(status_code=404, detail="Provider not found") + + settings = await self._get_provider_settings( + organization_id, str(provider.secret_id) + ) + + # Test OIDC connection + is_valid = await self.test_oidc_connection( + issuer_url=settings.get("issuer_url", ""), + client_id=settings.get("client_id", ""), + client_secret=settings.get("client_secret", ""), + ) + + # Update flags based on test result + flags = provider.flags.copy() if provider.flags else {} + flags["is_valid"] = is_valid + if is_valid: + flags["is_active"] = True + + # If validation failed, deactivate the provider + if not is_valid: + flags["is_active"] = False + + provider = await dao.update( + provider_id=provider_id, + flags=flags, + updated_by_id=user_id, + ) + + await session.commit() + await session.refresh(provider) + + return await self._to_response(provider, organization_id) + + async def delete_provider( + self, organization_id: str, provider_id: str, user_id: str + ) -> bool: + """Delete an SSO provider.""" + async with engine.core_session() as session: + dao = OrganizationProvidersDAO(session) + + provider = await dao.get_by_id(provider_id, organization_id) + if not provider: + raise HTTPException(status_code=404, detail="Provider not found") + + organization = await db_manager_ee.get_organization(organization_id) + flags = organization.flags or {} + if flags.get("allow_sso"): + providers = await dao.list_by_organization(organization_id) + remaining = [ + p + for p in providers + if str(p.id) != str(provider_id) + and (p.flags or {}).get("is_active") + and (p.flags or {}).get("is_valid") + ] + if not remaining: + raise HTTPException( + status_code=400, + detail=( + "Cannot delete the last active and verified SSO provider while " + "SSO is enabled." + ), + ) + + await self._vault_service().delete_secret( + secret_id=provider.secret_id, + organization_id=organization_id, + ) + deleted = await dao.delete(provider_id, user_id) + await session.commit() + return deleted + + async def _get_provider_settings( + self, organization_id: str, secret_id: str + ) -> dict: + secret = await self._vault_service().get_secret( + secret_id=UUID(secret_id), + organization_id=UUID(organization_id), + ) + if not secret: + raise HTTPException(status_code=404, detail="Provider secret not found") + + data = secret.data + if hasattr(data, "provider"): + return data.provider.model_dump() + if isinstance(data, dict): + provider = data.get("provider") or {} + if isinstance(provider, dict): + return provider + raise HTTPException(status_code=500, detail="Invalid provider secret format") + + async def _to_response( + self, provider, organization_id: str + ) -> OrganizationProviderResponse: + """Convert DBE to response model.""" + settings = await self._get_provider_settings( + organization_id, str(provider.secret_id) + ) + + return OrganizationProviderResponse( + id=str(provider.id), + organization_id=str(provider.organization_id), + slug=provider.slug, + name=provider.name, + description=provider.description, + settings=settings, + flags=provider.flags or {}, + created_at=provider.created_at, + updated_at=provider.updated_at, + ) diff --git a/api/ee/src/services/organization_service.py b/api/ee/src/services/organization_service.py index 9c66c0c22f..463e528de9 100644 --- a/api/ee/src/services/organization_service.py +++ b/api/ee/src/services/organization_service.py @@ -12,6 +12,9 @@ ) from oss.src.utils.env import env +from oss.src.utils.logging import get_module_logger + +log = get_module_logger(__name__) async def update_an_organization( @@ -19,8 +22,8 @@ async def update_an_organization( ) -> OrganizationDB: org = await db_manager_ee.get_organization(organization_id) if org is not None: - await db_manager_ee.update_organization(str(org.id), payload) - return org + updated_org = await db_manager_ee.update_organization(str(org.id), payload) + return updated_org raise NotFound("Organization not found") @@ -127,3 +130,30 @@ async def notify_org_admin_invitation(workspace: WorkspaceDB, user: UserDB) -> b async def get_organization_details(organization_id: str) -> dict: organization = await db_manager_ee.get_organization(organization_id) return await db_manager_ee.get_org_details(organization) + + +async def transfer_organization_ownership( + organization_id: str, + new_owner_id: str, + current_user_id: str, +) -> OrganizationDB: + """Transfer organization ownership to another member. + + Args: + organization_id: The ID of the organization + new_owner_id: The UUID of the new owner + current_user_id: The UUID of the current user (initiating the transfer) + + Returns: + OrganizationDB: The updated organization + + Raises: + NotFound: If organization or new owner member not found + ValueError: If new owner is not a member of the organization + """ + # Delegate to db_manager_ee + return await db_manager_ee.transfer_organization_ownership( + organization_id=organization_id, + new_owner_id=new_owner_id, + current_user_id=current_user_id, + ) diff --git a/api/ee/src/services/workspace_manager.py b/api/ee/src/services/workspace_manager.py index c252b19f66..dd34bfecae 100644 --- a/api/ee/src/services/workspace_manager.py +++ b/api/ee/src/services/workspace_manager.py @@ -30,6 +30,7 @@ check_valid_invitation, ) from ee.src.services.organization_service import send_invitation_email +from ee.src.dbs.postgres.organizations.dao import OrganizationDomainsDAO log = get_module_logger(__name__) @@ -155,6 +156,30 @@ async def invite_user_to_workspace( organization = await db_manager_ee.get_organization(organization_id) user_performing_action = await db_manager.get_user(user_uid) + # Check if domains_only is enabled for this organization + org_flags = organization.flags or {} + domains_only = org_flags.get("domains_only", False) + + # If domains_only is enabled, get the list of verified domains + verified_domain_slugs = set() + if domains_only: + domains_dao = OrganizationDomainsDAO() + org_domains = await domains_dao.list_by_organization(organization_id) + verified_domain_slugs = { + d.slug.lower() + for d in org_domains + if d.flags and d.flags.get("is_verified", False) + } + + # If domains_only is enabled but no verified domains exist, block all invitations + if not verified_domain_slugs: + return JSONResponse( + status_code=400, + content={ + "error": "Cannot send invitations: domains_only is enabled but no verified domains exist" + }, + ) + for payload_invite in payload: # Check that the user is not inviting themselves if payload_invite.email == user_performing_action.email: @@ -163,6 +188,17 @@ async def invite_user_to_workspace( content={"error": "You cannot invite yourself to a workspace"}, ) + # Check if domains_only is enabled and validate the email domain + if domains_only: + email_domain = payload_invite.email.split("@")[-1].lower() + if email_domain not in verified_domain_slugs: + return JSONResponse( + status_code=400, + content={ + "error": f"Cannot invite {payload_invite.email}: domain '{email_domain}' is not a verified domain for this organization" + }, + ) + # Check if the user is already a member of the workspace if await db_manager_ee.check_user_in_workspace_with_email( payload_invite.email, str(workspace.id) diff --git a/api/ee/src/utils/permissions.py b/api/ee/src/utils/permissions.py index 5c575ef0cf..13a320bb8b 100644 --- a/api/ee/src/utils/permissions.py +++ b/api/ee/src/utils/permissions.py @@ -93,7 +93,7 @@ async def check_user_org_access( if not organization: log.error("Organization not found") raise Exception("Organization not found") - return organization.owner == str(user.id) # type: ignore + return organization.owner_id == user.id # type: ignore else: user_organizations: List = kwargs["organization_ids"] user_exists_in_organizations = organization_id in user_organizations @@ -290,7 +290,8 @@ async def check_rbac_permission( if project_id is not None: project = await db_manager.get_project_by_id(project_id) if project is None: - raise Exception("Project not found") + log.warning(f"Project {project_id} not found during permission check") + return False workspace = await db_manager.get_workspace(str(project.workspace_id)) organization = await db_manager_ee.get_organization( diff --git a/api/ee/tests/manual/auth/00-setup-verification.http b/api/ee/tests/manual/auth/00-setup-verification.http new file mode 100644 index 0000000000..c430272175 --- /dev/null +++ b/api/ee/tests/manual/auth/00-setup-verification.http @@ -0,0 +1,471 @@ +### +# Database Setup & Verification +# SQL commands to set up test data and verify schema +# +# Run these in psql or your database client BEFORE running other tests +### + +### +# 1. Verify Migrations Applied +### + +# Check user_identities table exists +# \d user_identities + +# Expected columns: +# - id (uuid) +# - user_id (uuid, FK to users.id) +# - method (text) +# - subject (text) +# - domain (text, nullable) +# - created_at (timestamp) +# - updated_at (timestamp) +# +# Expected constraints: +# - UNIQUE (method, subject) +# - INDEX on (user_id, method) +# - INDEX on (domain) + + +### +# 2. Verify Organization Schema +### + +# Check organizations table has updated schema +# \d organizations + +# Expected columns: +# - slug (text, unique, nullable) +# - flags (jsonb, nullable) - contains is_personal, is_demo, auth policy flags +# - owner_id (uuid, FK to users.id, NOT NULL) +# - created_by_id, updated_by_id, deleted_by_id (uuid, nullable) +# - created_at, updated_at, deleted_at (timestamp) +# +# Flags structure: +# { +# "is_personal": bool, +# "is_demo": bool, +# "allow_email": bool, +# "allow_social": bool, +# "allow_sso": bool, +# "allow_root": bool, +# "domains_only": bool, +# "auto_join": bool +# } + + +### +# 3. Setup Test Collaborative Organization (EE Mode) +### + +-- Create test collaborative organization with slug +-- INSERT INTO organizations ( +-- id, +-- name, +-- slug, +-- description, +-- flags, +-- owner_id, +-- created_by_id, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- 'ACME Corporation', +-- 'acme', +-- 'Test collaborative organization for SSO', +-- '{"is_personal": false, "allow_email": true, "allow_social": true, "allow_sso": true}'::jsonb, +-- '', -- Replace with actual user ID who will own this org +-- '', +-- now() +-- ) +-- RETURNING id; +-- Save the returned ID as @testOrgId + + +### +# 4. Setup Test Personal Organization (EE Mode) +### + +-- Create test personal organization +-- INSERT INTO organizations ( +-- id, +-- name, +-- slug, +-- description, +-- flags, +-- owner_id, +-- created_by_id, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- 'Personal', +-- NULL, -- Personal orgs have no slug +-- NULL, +-- '{"is_personal": true}'::jsonb, +-- '', -- Replace with actual user ID +-- '', +-- now() +-- ) +-- RETURNING id; +-- Save the returned ID as @testPersonalOrgId + +-- Add user as member to their personal org +-- INSERT INTO organization_members ( +-- id, +-- user_id, +-- organization_id, +-- role, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', +-- '', +-- 'owner', +-- now() +-- ); + + +### +# 5. Setup Organization Auth Flags (EE Mode) +### + +-- Update organization flags to set authentication policies +-- UPDATE organizations +-- SET flags = jsonb_set( +-- jsonb_set( +-- jsonb_set( +-- jsonb_set( +-- jsonb_set( +-- flags, +-- '{allow_email}', 'false' +-- ), +-- '{allow_social}', 'false' +-- ), +-- '{allow_sso}', 'true' +-- ), +-- '{auto_join}', 'true' +-- ), +-- '{domains_only}', 'true' +-- ) +-- WHERE id = ''; + +# Auth policy flags in organizations.flags: +# - allow_email (boolean, default: true) - Allow email authentication (OTP/password) +# - allow_social (boolean, default: true) - Allow social authentication (Google, GitHub) +# - allow_sso (boolean, default: false) - Allow SSO/OIDC authentication +# - allow_root (boolean, default: true) - Allow owner bypass of auth restrictions +# - domains_only (boolean, default: false) - Only allow users with verified domain emails +# - auto_join (boolean, default: false) - Auto-add users with verified domain emails + + +### +# 6. Setup Verified Domain (EE Mode - Collaborative Org) +### + +-- Add verified domain for SSO on collaborative org +-- INSERT INTO organization_domains ( +-- id, +-- organization_id, +-- slug, +-- name, +-- description, +-- token, +-- flags, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', -- Collaborative org ID +-- 'acme.com', +-- 'ACME Domain', +-- 'Primary domain for ACME Corporation', +-- NULL, -- Token only needed during verification +-- '{"is_verified": true}'::jsonb, +-- now() +-- ) +-- RETURNING id; +-- Save as @testDomainId + +# Note: domain column renamed to slug, verified moved to flags.is_verified + + +### +# 7. Test Domain Verification Restrictions (EE Mode) +### + +-- Attempt to add domain to personal org (verification should fail) +-- INSERT INTO organization_domains ( +-- id, +-- organization_id, +-- slug, +-- name, +-- token, +-- flags, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', -- Personal org ID +-- 'personal-test.com', +-- 'Personal Test Domain', +-- 'test-token-123', +-- '{"is_verified": false}'::jsonb, +-- now() +-- ) +-- RETURNING id; +-- Save as @personalOrgDomainId + +# NOTE: Attempting to verify this domain should fail with: +# "Personal organizations cannot verify domains" + + +### +# 8. Test Domain Exclusivity (EE Mode) +### + +-- Create second collaborative org to test exclusivity +-- INSERT INTO organizations ( +-- id, +-- name, +-- slug, +-- description, +-- flags, +-- owner_id, +-- created_by_id, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- 'Second Corp', +-- 'second', +-- 'Test domain exclusivity', +-- '{"is_personal": false}'::jsonb, +-- '', +-- '', +-- now() +-- ) +-- RETURNING id; +-- Save as @secondOrgId + +-- Attempt to verify same domain as first org +-- INSERT INTO organization_domains ( +-- id, +-- organization_id, +-- slug, +-- name, +-- token, +-- flags, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', +-- 'acme.com', -- Same domain as testOrgId +-- 'Conflicting ACME Domain', +-- 'conflict-token-456', +-- '{"is_verified": false}'::jsonb, +-- now() +-- ) +-- RETURNING id; +-- Save as @conflictingDomainId + +# NOTE: Attempting to verify this domain should fail with: +# "Domain 'acme.com' is already verified by another organization" + + +### +# 9. Setup OIDC Provider (EE Mode) +### + +-- Add OIDC provider configuration +-- INSERT INTO organization_providers ( +-- id, +-- organization_id, +-- slug, +-- name, +-- description, +-- settings, +-- flags, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', +-- 'okta', +-- 'Okta SSO', +-- 'ACME Okta integration', +-- '{ +-- "issuer": "https://dev-12345.okta.com", +-- "client_id": "0oa...", +-- "client_secret": "xxx", +-- "scopes": ["openid", "profile", "email"], +-- "authorization_endpoint": "https://dev-12345.okta.com/oauth2/v1/authorize", +-- "token_endpoint": "https://dev-12345.okta.com/oauth2/v1/token", +-- "userinfo_endpoint": "https://dev-12345.okta.com/oauth2/v1/userinfo" +-- }'::jsonb, +-- '{"is_active": true}'::jsonb, +-- now() +-- ) +-- RETURNING id; +-- Save as @testProviderId + +# Note: +# - config renamed to settings +# - enabled moved to flags.is_active +# - domain_id removed (SSO provider can handle multiple domains) + + +### +# 10. Create Test User +### + +-- Create test user +-- INSERT INTO users (id, uid, username, email, created_at) +-- VALUES ( +-- gen_random_uuid(), +-- 'st_user_123', -- SuperTokens user ID +-- 'Test User', +-- 'test@acme.com', +-- now() +-- ) +-- RETURNING id; +-- Save as @testUserId + + +### +# 11. Add User to Organization (EE Mode) +### + +-- Create organization membership +-- INSERT INTO organization_members ( +-- id, +-- user_id, +-- organization_id, +-- role, +-- created_at +-- ) +-- VALUES ( +-- gen_random_uuid(), +-- '', +-- '', +-- 'member', +-- now() +-- ); + +# Role can be: 'owner', 'member' + + +### +# 12. Verification Queries +### + +-- Check all test data created successfully + +-- Verify organizations (check flags) +-- SELECT id, name, slug, flags FROM organizations ORDER BY flags->>'is_personal'; + +-- Verify collaborative organization +-- SELECT id, name, slug, flags FROM organizations WHERE slug = 'acme'; + +-- Verify personal organization +-- SELECT id, name, slug, flags FROM organizations WHERE flags->>'is_personal' = 'true'; + +-- Verify organization auth flags +-- SELECT +-- id, +-- name, +-- flags->'allow_email' as allow_email, +-- flags->'allow_social' as allow_social, +-- flags->'allow_sso' as allow_sso, +-- flags->'auto_join' as auto_join, +-- flags->'domains_only' as domains_only, +-- flags->'allow_root' as allow_root +-- FROM organizations +-- WHERE id = ''; + +-- Verify domains +-- SELECT +-- od.id, +-- od.slug as domain, +-- od.flags->>'is_verified' as verified, +-- o.name as org_name, +-- o.flags->>'is_personal' as is_personal +-- FROM organization_domains od +-- JOIN organizations o ON o.id = od.organization_id +-- ORDER BY o.flags->>'is_personal', od.slug; + +-- Verify provider +-- SELECT +-- slug, +-- name, +-- flags->>'is_active' as is_active, +-- settings->>'issuer' as issuer +-- FROM organization_providers +-- WHERE organization_id = ''; + +-- Verify user +-- SELECT id, email FROM users WHERE email = 'test@acme.com'; + +-- Verify membership +-- SELECT +-- om.id, +-- u.email, +-- o.name, +-- om.role, +-- o.flags->>'is_personal' as is_personal +-- FROM organization_members om +-- JOIN users u ON u.id = om.user_id +-- JOIN organizations o ON o.id = om.organization_id +-- WHERE u.email = 'test@acme.com' +-- ORDER BY o.flags->>'is_personal'; + + +### +# 13. Cleanup (Run after testing) +### + +-- Clean up test data +-- DELETE FROM organization_members WHERE user_id = ''; +-- DELETE FROM organization_providers WHERE organization_id IN ('', ''); +-- DELETE FROM organization_domains WHERE organization_id IN ('', '', ''); +-- DELETE FROM user_identities WHERE user_id = ''; +-- DELETE FROM users WHERE id = ''; +-- DELETE FROM organizations WHERE id IN ('', '', ''); + + +### +# Quick Setup Script (OSS Mode - Email OTP Only) +### + +-- For OSS mode testing, verify migrations: +-- SELECT EXISTS ( +-- SELECT FROM information_schema.tables +-- WHERE table_name = 'user_identities' +-- ) as user_identities_exists; + +-- SELECT EXISTS ( +-- SELECT FROM information_schema.columns +-- WHERE table_name = 'organizations' AND column_name = 'slug' +-- ) as org_slug_exists; + +-- SELECT EXISTS ( +-- SELECT FROM information_schema.columns +-- WHERE table_name = 'organizations' AND column_name = 'flags' +-- ) as org_flags_exists; + +-- Verify OSS has exactly 1 collaborative organization +-- SELECT +-- COUNT(*) as org_count, +-- flags->>'is_personal' as is_personal +-- FROM organizations +-- GROUP BY flags->>'is_personal'; +-- Expected: 1 row with is_personal=false (or null), count=1 + +-- Verify no personal organizations exist in OSS +-- SELECT COUNT(*) as personal_org_count +-- FROM organizations +-- WHERE flags->>'is_personal' = 'true'; +-- Expected: 0 diff --git a/api/ee/tests/manual/auth/00-setup-verification.md b/api/ee/tests/manual/auth/00-setup-verification.md new file mode 100644 index 0000000000..2bae988343 --- /dev/null +++ b/api/ee/tests/manual/auth/00-setup-verification.md @@ -0,0 +1,576 @@ +# Database Setup & Verification + +SQL commands to set up test data and verify schema. + +Run these in psql or your database client BEFORE running other tests. + +--- + +## 1. Verify Migrations Applied + +Check user_identities table exists: + +```sql +\d user_identities +``` + +**Expected columns:** +- `id` (uuid) +- `user_id` (uuid, FK to users.id) +- `method` (text) +- `subject` (text) +- `domain` (text, nullable) +- `created_at` (timestamp) +- `updated_at` (timestamp) +- `updated_by_id` (uuid, nullable) + +**Expected constraints:** +- UNIQUE (method, subject) +- INDEX on (user_id, method) +- INDEX on (domain) + +--- + +## 2. Verify Organization Schema + +Check organizations table has updated schema: + +```sql +\d organizations +``` + +**Expected columns:** +- `slug` (text, unique, nullable) +- `flags` (jsonb, nullable) - contains `is_personal`, `is_demo`, auth policy flags +- `tags` (jsonb, nullable) +- `meta` (jsonb, nullable) +- `owner_id` (uuid, FK to users.id, NOT NULL) +- `created_by_id` (uuid, FK to users.id, NOT NULL) +- `updated_by_id` (uuid, nullable) +- `deleted_by_id` (uuid, nullable) +- `created_at` (timestamp, NOT NULL) +- `updated_at` (timestamp, nullable) +- `deleted_at` (timestamp, nullable) + +**Expected indexes:** +- Unique index on `slug` +- GIN index on `flags` + +--- + +## 3. Setup Test Collaborative Organization (EE Mode) + +Create test collaborative organization with slug: + +```sql +INSERT INTO organizations ( + id, + name, + slug, + description, + flags, + owner_id, + created_by_id, + created_at +) +VALUES ( + gen_random_uuid(), + 'ACME Corporation', + 'acme', + 'Test collaborative organization for SSO', + '{"is_personal": false, "allow_email": true, "allow_social": true, "allow_sso": true}'::jsonb, + '', -- Replace with actual user ID who will own this org + '', + now() +) +RETURNING id; +``` + +Save the returned ID as `@testOrgId`. + +--- + +## 4. Setup Test Personal Organization (EE Mode) + +Create test personal organization: + +```sql +INSERT INTO organizations ( + id, + name, + slug, + description, + flags, + owner_id, + created_by_id, + created_at +) +VALUES ( + gen_random_uuid(), + 'Personal', + NULL, -- Personal orgs have no slug + NULL, + '{"is_personal": true}'::jsonb, + '', -- Replace with actual user ID + '', + now() +) +RETURNING id; +``` + +Save the returned ID as `@testPersonalOrgId`. + +Add user as member to their personal org: + +```sql +INSERT INTO organization_members ( + id, + user_id, + organization_id, + role, + created_at +) +VALUES ( + gen_random_uuid(), + '', + '', + 'owner', + now() +); +``` + +--- + +## 5. Setup Organization Policy Flags (EE Mode) + +Update organization flags to set authentication policies: + +```sql +-- Allow only SSO, enforce verified domains, allow auto-join +UPDATE organizations +SET flags = jsonb_set( + jsonb_set( + jsonb_set( + jsonb_set( + jsonb_set( + flags, + '{allow_email}', 'false' + ), + '{allow_social}', 'false' + ), + '{allow_sso}', 'true' + ), + '{auto_join}', 'true' + ), + '{domains_only}', 'true' +) +WHERE id = ''; +``` + +**Policy flags in `organizations.flags`:** +- `allow_email` (boolean, default: true) - Allow email authentication (OTP/password) +- `allow_social` (boolean, default: true) - Allow social authentication (Google, GitHub, etc.) +- `allow_sso` (boolean, default: true) - Allow SSO/OIDC authentication +- `auto_join` (boolean, default: false) - Allow users with verified domains to automatically join +- `domains_only` (boolean, default: false) - Only allow users with verified domain emails +- `allow_root` (boolean, default: true) - Allow organization owner to bypass auth restrictions + +--- + +## 6. Setup Verified Domain (EE Mode - Collaborative Org) + +Add verified domain for SSO on collaborative org: + +```sql +INSERT INTO organization_domains ( + id, + organization_id, + slug, + name, + description, + token, + flags, + created_at +) +VALUES ( + gen_random_uuid(), + '', -- Collaborative org ID + 'acme.com', + 'ACME Domain', + 'Primary domain for ACME Corporation', + NULL, -- Token only needed during verification + '{"is_verified": true}'::jsonb, + now() +) +RETURNING id; +``` + +Save as `@testDomainId`. + +**Note:** `domain` field renamed to `slug`, `verified` moved to `flags.is_verified`, `verification_token` renamed to `token`. + +--- + +## 7. Test Domain Verification Restrictions (EE Mode) + +Attempt to add domain to personal org (verification should fail): + +```sql +INSERT INTO organization_domains ( + id, + organization_id, + slug, + name, + token, + flags, + created_at +) +VALUES ( + gen_random_uuid(), + '', -- Personal org ID + 'personal-test.com', + 'Personal Test Domain', + 'test-token-123', + '{"is_verified": false}'::jsonb, + now() +) +RETURNING id; +``` + +Save as `@personalOrgDomainId`. + +**NOTE:** Attempting to verify this domain should fail with: +> "Personal organizations cannot verify domains" + +--- + +## 8. Test Domain Exclusivity (EE Mode) + +Create second collaborative org to test exclusivity: + +```sql +INSERT INTO organizations ( + id, + name, + slug, + description, + flags, + owner_id, + created_by_id, + created_at +) +VALUES ( + gen_random_uuid(), + 'Second Corp', + 'second', + 'Test domain exclusivity', + '{"is_personal": false}'::jsonb, + '', + '', + now() +) +RETURNING id; +``` + +Save as `@secondOrgId`. + +Attempt to verify same domain as first org: + +```sql +INSERT INTO organization_domains ( + id, + organization_id, + slug, + name, + token, + flags, + created_at +) +VALUES ( + gen_random_uuid(), + '', + 'acme.com', -- Same domain as testOrgId + 'Conflicting ACME Domain', + 'conflict-token-456', + '{"is_verified": false}'::jsonb, + now() +) +RETURNING id; +``` + +Save as `@conflictingDomainId`. + +**NOTE:** Attempting to verify this domain should fail with: +> "Domain 'acme.com' is already verified by another organization" + +--- + +## 9. Setup OIDC Provider (EE Mode) + +Add OIDC provider configuration: + +```sql +INSERT INTO organization_providers ( + id, + organization_id, + slug, + name, + description, + settings, + flags, + created_at +) +VALUES ( + gen_random_uuid(), + '', + 'okta', + 'Okta SSO', + 'ACME Okta integration', + '{ + "issuer": "https://dev-12345.okta.com", + "client_id": "0oa...", + "client_secret": "xxx", + "scopes": ["openid", "profile", "email"], + "authorization_endpoint": "https://dev-12345.okta.com/oauth2/v1/authorize", + "token_endpoint": "https://dev-12345.okta.com/oauth2/v1/token", + "userinfo_endpoint": "https://dev-12345.okta.com/oauth2/v1/userinfo" + }'::jsonb, + '{"is_active": true}'::jsonb, + now() +) +RETURNING id; +``` + +Save as `@testProviderId`. + +**Note:** +- `config` renamed to `settings` +- `enabled` moved to `flags.is_active` +- `domain_id` removed (SSO provider can handle multiple domains) + +--- + +## 10. Create Test User + +Create test user: + +```sql +INSERT INTO users (id, uid, username, email, created_at) +VALUES ( + gen_random_uuid(), + 'st_user_123', -- SuperTokens user ID + 'Test User', + 'test@acme.com', + now() +) +RETURNING id; +``` + +Save as `@testUserId`. + +--- + +## 11. Add User to Organization (EE Mode) + +Create organization membership: + +```sql +INSERT INTO organization_members ( + id, + user_id, + organization_id, + role, + created_at +) +VALUES ( + gen_random_uuid(), + '', + '', + 'member', + now() +); +``` + +**Note:** Added `role` field (default: "member", can be "owner"). + +--- + +## 12. Verification Queries + +Check all test data created successfully: + +### Verify organizations (check flags) + +```sql +SELECT id, name, slug, flags +FROM organizations +ORDER BY flags->>'is_personal'; +``` + +### Verify collaborative organization + +```sql +SELECT id, name, slug, flags +FROM organizations +WHERE slug = 'acme'; +``` + +### Verify personal organization + +```sql +SELECT id, name, slug, flags +FROM organizations +WHERE flags->>'is_personal' = 'true'; +``` + +### Verify organization policy flags + +```sql +SELECT + id, + name, + flags->'allow_email' as allow_email, + flags->'allow_social' as allow_social, + flags->'allow_sso' as allow_sso, + flags->'auto_join' as auto_join, + flags->'domains_only' as domains_only, + flags->'allow_root' as allow_root +FROM organizations +WHERE id = ''; +``` + +### Verify domains + +```sql +SELECT + od.id, + od.slug as domain, + od.flags->>'is_verified' as verified, + o.name as org_name, + o.flags->>'is_personal' as is_personal +FROM organization_domains od +JOIN organizations o ON o.id = od.organization_id +ORDER BY o.flags->>'is_personal', od.slug; +``` + +### Verify provider + +```sql +SELECT + slug, + name, + flags->>'is_active' as enabled, + settings->>'issuer' as issuer +FROM organization_providers +WHERE organization_id = ''; +``` + +### Verify user + +```sql +SELECT id, email +FROM users +WHERE email = 'test@acme.com'; +``` + +### Verify membership + +```sql +SELECT + om.id, + u.email, + o.name, + om.role, + o.flags->>'is_personal' as is_personal +FROM organization_members om +JOIN users u ON u.id = om.user_id +JOIN organizations o ON o.id = om.organization_id +WHERE u.email = 'test@acme.com' +ORDER BY o.flags->>'is_personal'; +``` + +--- + +## 13. Cleanup (Run after testing) + +Clean up test data: + +```sql +DELETE FROM organization_members WHERE user_id = ''; +DELETE FROM organization_providers WHERE organization_id IN ('', ''); +DELETE FROM organization_domains WHERE organization_id IN ('', '', ''); +DELETE FROM user_identities WHERE user_id = ''; +DELETE FROM users WHERE id = ''; +DELETE FROM organizations WHERE id IN ('', '', ''); +``` + +**Note:** No need to delete from `organization_policies` table (removed). + +--- + +## Quick Setup Script (OSS Mode - Email OTP Only) + +For OSS mode testing, verify migrations: + +```sql +-- Check user_identities table exists +SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_name = 'user_identities' +) as user_identities_exists; + +-- Check organizations.slug exists +SELECT EXISTS ( + SELECT FROM information_schema.columns + WHERE table_name = 'organizations' AND column_name = 'slug' +) as org_slug_exists; + +-- Check organizations.flags exists +SELECT EXISTS ( + SELECT FROM information_schema.columns + WHERE table_name = 'organizations' AND column_name = 'flags' +) as org_flags_exists; + +-- Verify OSS has exactly 1 collaborative organization +SELECT + COUNT(*) as org_count, + flags->>'is_personal' as is_personal +FROM organizations +GROUP BY flags->>'is_personal'; +-- Expected: 1 row with is_personal=false (or null), count=1 + +-- Verify no personal organizations exist in OSS +SELECT COUNT(*) as personal_org_count +FROM organizations +WHERE flags->>'is_personal' = 'true'; +-- Expected: 0 +``` + +--- + +## Schema Changes Summary + +### Organizations Table +- **Removed:** `type`, `owner` (string), `kind` +- **Added:** `owner_id` (UUID FK), `created_by_id`, `updated_by_id`, `deleted_by_id`, `deleted_at`, `flags`, `tags`, `meta` +- **Flags structure:** `{"is_personal": bool, "is_demo": bool, "allow_email": bool, "allow_social": bool, "allow_sso": bool, "auto_join": bool, "domains_only": bool, "allow_root": bool}` + +### Organization Domains Table +- **Renamed:** `domain` → `slug`, `verification_token` → `token` +- **Removed:** `verified` (boolean column) +- **Added:** `name`, `description`, `flags`, `tags`, `meta` +- **Flags structure:** `{"is_verified": bool}` +- **Lifecycle:** Changed from `LegacyLifecycle` to `Lifecycle` (added `deleted_at`, `created_by_id`, `deleted_by_id`) + +### Organization Providers Table +- **Renamed:** `config` → `settings` +- **Removed:** `enabled` (boolean), `domain_id` (FK) +- **Added:** `name`, `description`, `flags`, `tags`, `meta` +- **Flags structure:** `{"is_active": bool}` +- **Lifecycle:** Changed from `LegacyLifecycle` to `Lifecycle` + +### Organization Members Table +- **Added:** `role` (string, default: "member"), `created_at`, `updated_at`, `updated_by_id` (nullable) + +### Removed Tables +- **organization_policies** - moved to `organizations.flags` diff --git a/api/ee/tests/manual/auth/01-discovery.http b/api/ee/tests/manual/auth/01-discovery.http new file mode 100644 index 0000000000..33d8e54b24 --- /dev/null +++ b/api/ee/tests/manual/auth/01-discovery.http @@ -0,0 +1,105 @@ +### +# Auth Discovery Tests +# Tests the /auth/discover endpoint which determines available authentication methods +# +# Setup: +# 1. Start backend: cd vibes/api && uvicorn main:app --reload +# 2. Run migrations: alembic upgrade head +# 3. Update @apiUrl if needed +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@contentType = application/json + +### Test 1: New User Discovery (No Account) +# Expected: Returns available auth methods (email:[...], social:[...], oidc:[...]) +# Should show: exists=false, all globally enabled methods +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "newuser@example.com" +} + +### Test 2: Existing User Discovery (Email OTP) +# Prerequisites: User exists with email:otp identity +# Expected: Returns exists=true, methods based on their org policies +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "existing@example.com" +} + +### Test 3: SSO Required Organization Member +# Prerequisites: +# - User exists and is member of org with sso_only policy +# - Organization has verified domain +# - OIDC provider configured +# Expected: sso_required_by_some=true, shows SSO providers +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "user@acme.com" +} + +### Test 4: Multi-Org User Discovery +# Prerequisites: User is member of multiple orgs with different policies +# Expected: Returns union of all allowed methods across all orgs +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "multiorg@example.com" +} + +### Test 5: Domain with SSO Provider +# Prerequisites: +# - Domain "verified.com" is verified in organization_domains +# - SSO provider linked to this domain +# Expected: Returns SSO provider information +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "user@verified.com" +} + +### Test 6: Invalid Email Format +# Expected: 400 Bad Request or validation error +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "notanemail" +} + +### Test 7: Empty Email +# Expected: 422 Validation error +POST {{apiUrl}}/auth/discover +Content-Type: {{contentType}} + +{ + "email": "" +} + +### +# Expected Response Structure: +# { +# "exists": boolean, +# "methods": { +# "email:otp": boolean, +# "email:password": boolean, +# "social:google": boolean, +# "social:github": boolean, +# "sso": [ +# { +# "slug": string, +# "name": string, +# } +# ] +# } +# } +### diff --git a/api/ee/tests/manual/auth/02-oidc-authorize.http b/api/ee/tests/manual/auth/02-oidc-authorize.http new file mode 100644 index 0000000000..e8b6e35e6d --- /dev/null +++ b/api/ee/tests/manual/auth/02-oidc-authorize.http @@ -0,0 +1,99 @@ +### +# OIDC Authorization Tests (EE Only) +# Tests the OIDC authorization flow initiation +# +# Setup: +# 1. Ensure AGENTA_LICENSE=ee +# 2. Create organization with SSO provider: +# - Add organization_providers record with OIDC settings +# - Configure issuer, client_id, client_secret in settings +# - Set flags.is_active=true +# 3. Update variables below with actual UUIDs +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@contentType = application/json + +# REPLACE THESE WITH ACTUAL VALUES FROM YOUR DATABASE +@organizationId = 00000000-0000-0000-0000-000000000000 +@providerId = 00000000-0000-0000-0000-000000000000 + +### Test 1: Initiate OIDC Authorization (Valid Provider) +# Expected: 302 redirect to IdP authorization endpoint +# Should include state parameter and redirect_uri +GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/ +Accept: application/json + +### Test 2: OIDC Authorization with Custom Redirect +# Expected: 302 redirect, redirect_uri includes custom path +GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/dashboard +Accept: application/json + +### Test 3: Invalid Provider ID (Not Found) +# Expected: 404 Not Found +GET {{apiUrl}}/auth/authorize/oidc?provider_id=99999999-9999-9999-9999-999999999999&redirect=/ +Accept: application/json + +### Test 4: Disabled Provider (flags.is_active=false) +# Prerequisites: Provider exists but flags.is_active=false +# Expected: 403 Forbidden or 404 +GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/ +Accept: application/json + +### Test 5: Missing Provider ID +# Expected: 422 Validation error +GET {{apiUrl}}/auth/authorize/oidc?redirect=/ +Accept: application/json + +### Test 6: OIDC in OSS Mode (Should Fail) +# Prerequisites: AGENTA_LICENSE=oss +# Expected: 404 Not Found with message "SSO/OIDC is only available in Enterprise Edition" +GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/ +Accept: application/json + +### +# OIDC Flow Overview: +# 1. User clicks SSO provider button +# 2. Frontend calls GET /auth/authorize/oidc?provider_id=xxx +# 3. Backend generates state, stores in state_store +# 4. Backend redirects (302) to IdP authorization endpoint +# 5. User authenticates at IdP +# 6. IdP redirects back to /auth/callback with code + state +# 7. Backend exchanges code for tokens +# 8. Backend creates/links user_identity +# 9. Backend creates session with identities array +# 10. Backend redirects to application +### + +### +# Testing OIDC Provider Configuration: +# You can verify provider config is loaded correctly by checking: +# - Provider record exists in organization_providers table +# - settings JSONB contains: issuer, client_id, client_secret, scopes +# - Provider is active (flags.is_active=true) +# - Organization exists and has allow_sso=true in flags +### + +### +# Example Provider Configuration (organization_providers table): +# { +# "id": "uuid", +# "organization_id": "uuid", +# "slug": "okta", +# "name": "Okta SSO", +# "settings": { +# "issuer": "https://dev-12345.okta.com", +# "client_id": "0oa...", +# "client_secret": "xxx", +# "scopes": ["openid", "profile", "email"] +# }, +# "flags": { +# "is_active": true +# } +# } +# +# Note: +# - 'config' renamed to 'settings' +# - 'enabled' moved to 'flags.is_active' +### diff --git a/api/ee/tests/manual/auth/03-domain-verification.http b/api/ee/tests/manual/auth/03-domain-verification.http new file mode 100644 index 0000000000..76d091d3f5 --- /dev/null +++ b/api/ee/tests/manual/auth/03-domain-verification.http @@ -0,0 +1,291 @@ +### +# Domain Verification Testing +# Tests for organization type restrictions and domain exclusivity +# +# Prerequisites: +# - Run 00-setup-verification.http first +# - EE mode enabled (AGENTA_LICENSE=ee) +# - Test organizations created (collaborative + personal) +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@testOrgId = +@testPersonalOrgId = +@secondOrgId = +@personalOrgDomainId = +@conflictingDomainId = + + +### +# Test 1: Verify Domain on Collaborative Organization (Should Succeed) +### + +# This should succeed - collaborative orgs can verify domains +POST {{apiUrl}}/organizations/{{testOrgId}}/domains/verify +Content-Type: application/json + +{ + "domain_id": "" +} + +# Expected: 200 OK +# { +# "id": "...", +# "organization_id": "{{testOrgId}}", +# "slug": "acme.com", +# "flags": {"is_verified": true} +# } + + +### +# Test 2: Attempt to Verify Domain on Personal Organization (Should Fail) +### + +# This should fail - personal orgs cannot verify domains +POST {{apiUrl}}/organizations/{{testPersonalOrgId}}/domains/verify +Content-Type: application/json + +{ + "domain_id": "{{personalOrgDomainId}}" +} + +# Expected: 400 Bad Request +# { +# "error": "Personal organizations cannot verify domains. Domain verification is only available for collaborative organizations." +# } + + +### +# Test 3: Attempt to Verify Already-Verified Domain (Should Fail) +### + +# This should fail - domain already verified by another org +POST {{apiUrl}}/organizations/{{secondOrgId}}/domains/verify +Content-Type: application/json + +{ + "domain_id": "{{conflictingDomainId}}" +} + +# Expected: 400 Bad Request +# { +# "error": "Domain 'acme.com' is already verified by another organization. Each domain can only be verified by one organization at a time." +# } + + +### +# Test 4: Add Domain to Collaborative Organization (Should Succeed) +### + +POST {{apiUrl}}/organizations/{{testOrgId}}/domains +Content-Type: application/json + +{ + "slug": "newdomain.com", + "name": "New Domain" +} + +# Expected: 201 Created +# { +# "id": "...", +# "organization_id": "{{testOrgId}}", +# "slug": "newdomain.com", +# "flags": {"is_verified": false}, +# "token": "..." +# } + + +### +# Test 5: List Domains for Collaborative Organization +### + +GET {{apiUrl}}/organizations/{{testOrgId}}/domains + +# Expected: 200 OK +# [ +# { +# "id": "...", +# "slug": "acme.com", +# "flags": {"is_verified": true} +# }, +# { +# "id": "...", +# "slug": "newdomain.com", +# "flags": {"is_verified": false} +# } +# ] + + +### +# Test 6: List Domains for Personal Organization +### + +GET {{apiUrl}}/organizations/{{testPersonalOrgId}}/domains + +# Expected: 200 OK +# [ +# { +# "id": "...", +# "slug": "personal-test.com", +# "flags": {"is_verified": false} +# } +# ] +# NOTE: Personal org can have domains, but cannot verify them + + +### +# Test 7: Unverify Domain (Transfer Ownership) +### + +# First, unverify the domain from the first org +DELETE {{apiUrl}}/organizations/{{testOrgId}}/domains//verification + +# Expected: 200 OK +# { +# "id": "...", +# "flags": {"is_verified": false} +# } + +# Now verify it with the second org (should succeed now) +POST {{apiUrl}}/organizations/{{secondOrgId}}/domains/verify +Content-Type: application/json + +{ + "domain_id": "{{conflictingDomainId}}" +} + +# Expected: 200 OK +# { +# "flags": {"is_verified": true} +# } + + +### +# Test 8: Check Organization Type (via flags) +### + +GET {{apiUrl}}/organizations/{{testOrgId}} + +# Expected: 200 OK +# { +# "id": "{{testOrgId}}", +# "name": "ACME Corporation", +# "slug": "acme", +# "flags": {"is_personal": false, "allow_email": true, ...} +# } + +GET {{apiUrl}}/organizations/{{testPersonalOrgId}} + +# Expected: 200 OK +# { +# "id": "{{testPersonalOrgId}}", +# "name": "Personal", +# "slug": null, +# "flags": {"is_personal": true} +# } + + +### +# Test 9: Verify Auto-Join Flag with Verified Domain +### + +# Get organization to check flags +GET {{apiUrl}}/organizations/{{testOrgId}} + +# Expected: 200 OK +# { +# "id": "{{testOrgId}}", +# "flags": { +# "is_personal": false, +# "allow_email": false, +# "allow_social": false, +# "allow_sso": true, +# "domains_only": true, +# "auto_join": true, +# "allow_root": true +# } +# } + +# When a user authenticates with email from verified domain (e.g., user@acme.com), +# and auto_join=true, they should automatically be added to the organization as a member +# This is tested in the SSO flow tests + + +### +# Test 10: Update Auto-Join Flag +### + +PATCH {{apiUrl}}/organizations/{{testOrgId}} +Content-Type: application/json + +{ + "flags": { + "auto_join": false + } +} + +# Expected: 200 OK +# { +# "flags": {"auto_join": false, ...} +# } + +# Now users with verified domain emails will NOT be auto-added +# They must be explicitly invited + + +### +# Test 11: Test domains_only Enforcement +### + +# When domains_only=true, only users with verified domain emails can access the org + +# Step 1: Set domains_only flag +PATCH {{apiUrl}}/organizations/{{testOrgId}} +Content-Type: application/json + +{ + "flags": { + "domains_only": true + } +} + +# Expected: 200 OK + +# Step 2: User with non-verified domain tries to access +# Expected: 403 AUTH_DOMAIN_DENIED +# { +# "error": "AUTH_DOMAIN_DENIED", +# "message": "Your email domain 'gmail.com' is not allowed for this organization" +# } + +# Step 3: User with verified domain (acme.com) accesses +# Expected: 200 OK (access granted) + + +### +# Test 12: Invitation Validation with domains_only +### + +# When domains_only=true, invitations to non-verified domains should be blocked + +POST {{apiUrl}}/organizations/{{testOrgId}}/invitations +Content-Type: application/json + +{ + "email": "user@gmail.com" +} + +# Expected: 400 Bad Request +# { +# "error": "Cannot invite user@gmail.com: domain 'gmail.com' is not a verified domain for this organization" +# } + +POST {{apiUrl}}/organizations/{{testOrgId}}/invitations +Content-Type: application/json + +{ + "email": "user@acme.com" +} + +# Expected: 201 Created (invitation sent) diff --git a/api/ee/tests/manual/auth/03-identity-tracking.http b/api/ee/tests/manual/auth/03-identity-tracking.http new file mode 100644 index 0000000000..286129838d --- /dev/null +++ b/api/ee/tests/manual/auth/03-identity-tracking.http @@ -0,0 +1,225 @@ +### +# Identity Tracking & Session Payload Tests +# Tests that user_identities are created and sessions contain identities array +# +# These tests verify the SuperTokens override functions are working correctly +# +# Prerequisites: +# 1. SuperTokens Core running +# 2. Migrations applied +# 3. Backend started with SuperTokens configured +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@contentType = application/json + +### +# Test Flow 1: Email OTP Login → Check Identity Created +### + +### Step 1: Request OTP Code +# This triggers SuperTokens passwordless flow +# You'll need to complete this via SuperTokens UI or SDK +# After successful login, check database: + +# SQL to verify email:otp identity created: +# SELECT * FROM user_identities +# WHERE method = 'email:otp' +# ORDER BY created_at DESC +# LIMIT 5; + +# Expected: +# - id: uuid +# - user_id: matches SuperTokens user +# - method: 'email:otp' +# - subject: user's email address +# - domain: extracted from email (e.g., 'example.com') + + +### Step 2: Check Session Payload Contains Identities +# After successful OTP login, use SuperTokens session verification +# to check the access token payload + +# The session should contain: +# { +# "userId": "...", +# "identities": ["email:otp"], +# ...other claims +# } + + +### +# Test Flow 2: Social Login (Google) → Check Identity Created +### + +### Step 1: Complete Google OAuth Flow +# Navigate to: http://localhost/api/auth +# Click "Continue with Google" +# Complete Google auth +# Check database: + +# SQL to verify social:google identity: +# SELECT * FROM user_identities +# WHERE method = 'social:google' +# ORDER BY created_at DESC +# LIMIT 5; + +# Expected: +# - method: 'social:google' +# - subject: Google user ID (stable identifier) +# - domain: extracted from Google email + + +### Step 2: Check Session After Social Login +# Session should now contain: +# { +# "identities": ["email:otp", "social:google"] +# } +# (assuming user previously logged in with OTP) + + +### +# Test Flow 3: SSO Login (OIDC) → Check Identity Created +### + +### Step 1: Complete OIDC Flow (EE Only) +# Prerequisites: +# - Organization provider configured +# - User initiates SSO via provider_id + +# After successful SSO login, check: + +# SQL to verify sso identity: +# SELECT * FROM user_identities +# WHERE method LIKE 'sso:%' +# ORDER BY created_at DESC +# LIMIT 5; + +# Expected: +# - method: 'sso:acme:okta' (or sso:{org_id}:okta if slug not set) +# - subject: OIDC subject claim from IdP +# - domain: extracted from OIDC email + + +### Step 3: Check Multi-Method Session +# After logging in via all three methods, session should contain: +# { +# "identities": [ +# "email:otp", +# "social:google", +# "sso:acme:okta" +# ] +# } + + +### +# Verification Queries +### + +# Query 1: Check all identities for a specific user +# SELECT +# ui.method, +# ui.subject, +# ui.domain, +# ui.created_at +# FROM user_identities ui +# WHERE ui.user_id = '' +# ORDER BY ui.created_at; + + +# Query 2: Count identities by method +# SELECT +# method, +# COUNT(*) as count +# FROM user_identities +# GROUP BY method +# ORDER BY count DESC; + + +# Query 3: Find users with multiple identities +# SELECT +# ui.user_id, +# u.email, +# COUNT(*) as identity_count, +# array_agg(ui.method) as methods +# FROM user_identities ui +# JOIN users u ON u.id = ui.user_id +# GROUP BY ui.user_id, u.email +# HAVING COUNT(*) > 1 +# ORDER BY identity_count DESC; + + +# Query 4: Check identity created after OTP login +# SELECT +# ui.*, +# u.email as user_email +# FROM user_identities ui +# JOIN users u ON u.id = ui.user_id +# WHERE ui.method = 'email:otp' +# AND ui.created_at > now() - interval '5 minutes' +# ORDER BY ui.created_at DESC; + + +### +# Testing Session Payload (via SuperTokens API) +### + +# You can verify session payload using SuperTokens' session verification: +# 1. Log in to get session cookie +# 2. Make authenticated request to any protected endpoint +# 3. Backend should verify session and have access to identities array + +# Example protected endpoint (if you have one): +# GET {{apiUrl}}/me +# Cookie: sAccessToken=...; sRefreshToken=... + +# The endpoint handler can access session like: +# session = await verify_session(request) +# payload = session.get_access_token_payload() +# existing_identities = payload.get("existing_identities", []) + + +### +# Edge Cases to Test +### + +# 1. User logs in with email:otp twice +# → Should NOT create duplicate identity +# → Check UNIQUE constraint works + +# 2. User logs in with social:google, then email:otp +# → Should have 2 identities +# → Both should appear in session + +# 3. User with SSO identity logs in with email:otp +# → Should accumulate both +# → Session should reflect all methods + +# 4. Database query errors (simulate by removing permissions) +# → Should log error but not block authentication +# → Session should still be created with fallback + + +### +# Common Issues & Debugging +### + +# Issue: Identity not created after login +# Check: +# - SuperTokens override functions are registered +# - Database has user_identities table +# - User exists in users table +# - No errors in backend logs + +# Issue: Session doesn't contain existing_identities array +# Check: +# - Session override function is registered +# - user_context["existing_identities"] is set in sign_in_up/consume_code +# - SuperTokens session.init includes override config + +# Issue: Wrong method format (e.g., 'sso:undefined:okta') +# Check: +# - Organization has slug set +# - db_manager.get_organization_by_id returns org +# - Fallback to org_id works if slug is null diff --git a/api/ee/tests/manual/auth/04-policy-enforcement.http b/api/ee/tests/manual/auth/04-policy-enforcement.http new file mode 100644 index 0000000000..6c187bf570 --- /dev/null +++ b/api/ee/tests/manual/auth/04-policy-enforcement.http @@ -0,0 +1,329 @@ +### +# Organization Flag Enforcement Tests (EE Only) +# Tests the auth flag enforcement and auth upgrade requirements +# +# Prerequisites: +# 1. AGENTA_LICENSE=ee +# 2. Organizations set up with different flag configurations +# 3. Users with various authentication methods +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@contentType = application/json + +# REPLACE THESE WITH ACTUAL VALUES +@organizationId = 00000000-0000-0000-0000-000000000000 +@projectId = 00000000-0000-0000-0000-000000000000 + +### +# Scenario 1: User with email:otp tries to access SSO-only organization +### + +### Step 1: Login with Email OTP +# Complete OTP login flow first to get session with identities=["email:otp"] + +### Step 2: Try to Access SSO-Required Organization +# Expected: 403 Forbidden with AUTH_UPGRADE_REQUIRED +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json +# Cookie: sAccessToken=...; sRefreshToken=... + +# Expected Response: +# { +# "error": "AUTH_UPGRADE_REQUIRED", +# "message": "Additional authentication required", +# "required_methods": ["sso:*"], +# "current_identities": ["email:otp"] +# } + + +### +# Scenario 2: User completes SSO, then retries access +### + +### Step 1: Complete SSO Authentication +# User is redirected to SSO provider +# After successful SSO, session updated to: +# identities = ["email:otp", "sso:acme:okta"] + +### Step 2: Retry Organization Access +# Expected: 200 OK, access granted +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json +# Cookie: sAccessToken=...; sRefreshToken=... + +# Expected: Successful response with project data + + +### +# Scenario 3: Organization allows multiple methods (via flags) +### + +### Setup: +# Organization flags: allow_email=true, allow_social=true, allow_sso=true + +### Test: Access with any valid method +# User with identities=["email:otp"] should be allowed +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 200 OK + + +### +# Scenario 4: No organization_id in request +### + +### Test: Request without organization context +# Middleware should skip flag enforcement +GET {{apiUrl}}/health +Accept: application/json + +# Expected: 200 OK (no flag enforcement) + + +### +# Scenario 5: User not a member of organization +### + +### Setup: +# User exists but not in organization_members for this org + +### Test: Try to access organization +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 403 Forbidden +# { +# "error": "NOT_A_MEMBER", +# "message": "You are not a member of this organization" +# } + + +### +# Scenario 6: Organization has default flags (all methods allowed) +### + +### Setup: +# Organization flags: allow_email=true (default), allow_social=true (default) + +### Test: Access should work with any auth method +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 200 OK (no restrictions) + + +### +# Scenario 7: Owner Bypass (allow_root=true) +### + +### Setup: +# Organization flags: allow_email=false, allow_social=false, allow_sso=true, allow_root=true +# User is owner of organization + +### Test: Owner accesses with email:otp (normally blocked method) +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 200 OK (owner bypasses all auth restrictions when allow_root=true) + + +### +# Scenario 8: domains_only Enforcement +### + +### Setup: +# Organization flags: domains_only=true +# Verified domain: acme.com +# User email: user@gmail.com + +### Test: User with non-verified domain tries to access +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 403 AUTH_DOMAIN_DENIED +# { +# "error": "AUTH_DOMAIN_DENIED", +# "message": "Your email domain 'gmail.com' is not allowed for this organization", +# "current_domain": "gmail.com", +# "allowed_domains": ["acme.com"] +# } + + +### +# Scenario 9: SSO Provider Disabled +### + +### Setup: +# User has session with sso:acme:okta identity +# Admin sets provider flags.is_active=false + +### Test: User tries to access SSO-only org +GET {{apiUrl}}/projects?organization_id={{organizationId}} +Accept: application/json + +# Expected: 403 AUTH_SSO_DENIED +# { +# "error": "AUTH_SSO_DENIED", +# "message": "SSO provider is disabled or no longer valid" +# } + + +### +# Testing Flag Configurations +### + +# SQL to set different flag configurations: + +# Config 1: SSO Only +# UPDATE organizations +# SET flags = jsonb_set(jsonb_set(jsonb_set(flags, +# '{allow_email}', 'false'), +# '{allow_social}', 'false'), +# '{allow_sso}', 'true') +# WHERE id = ''; + +# Config 2: Email or Social (no SSO) +# UPDATE organizations +# SET flags = jsonb_set(jsonb_set(jsonb_set(flags, +# '{allow_email}', 'true'), +# '{allow_social}', 'true'), +# '{allow_sso}', 'false') +# WHERE id = ''; + +# Config 3: Everything allowed (default) +# UPDATE organizations +# SET flags = jsonb_set(jsonb_set(jsonb_set(flags, +# '{allow_email}', 'true'), +# '{allow_social}', 'true'), +# '{allow_sso}', 'true') +# WHERE id = ''; + +# Config 4: SSO only with owner bypass +# UPDATE organizations +# SET flags = jsonb_set(jsonb_set(jsonb_set(jsonb_set(flags, +# '{allow_email}', 'false'), +# '{allow_social}', 'false'), +# '{allow_sso}', 'true'), +# '{allow_root}', 'true') +# WHERE id = ''; + + +### +# Verification Queries +### + +# Check user's current identities +# SELECT ui.method +# FROM user_identities ui +# WHERE ui.user_id = '' +# ORDER BY ui.created_at; + +# Check organization flags +# SELECT +# id, +# name, +# flags->'allow_email' as allow_email, +# flags->'allow_social' as allow_social, +# flags->'allow_sso' as allow_sso, +# flags->'allow_root' as allow_root, +# flags->'domains_only' as domains_only +# FROM organizations +# WHERE id = ''; + +# Check if user is organization member +# SELECT EXISTS ( +# SELECT 1 +# FROM organization_members +# WHERE user_id = '' +# AND organization_id = '' +# ) as is_member; + +# Check if user is owner +# SELECT owner_id = '' as is_owner +# FROM organizations +# WHERE id = ''; + + +### +# Middleware Bypass Routes (Should NOT Check Flags) +### + +### Auth routes (no flag check) +GET {{apiUrl}}/auth/discover +Accept: application/json + +### Health check (no flag check) +GET {{apiUrl}}/health +Accept: application/json + +### Public routes (no flag check) +GET {{apiUrl}}/public/status +Accept: application/json + + +### +# Edge Cases +### + +# 1. User session has no identities array +# → Should treat as empty, enforce flags + +# 2. All allow_* flags are false +# → System auto-enables allow_root to prevent lockout +# → Owner can still access + +# 3. User has multiple identities, only one matches +# → Should allow access (OR logic) + +# 4. Organization deleted but membership remains +# → Should handle gracefully + +# 5. Concurrent flag updates during request +# → Should use consistent flag snapshot + + +### +# Error Codes Summary +### + +# AUTH_UPGRADE_REQUIRED +# - Trigger: User's session identities don't match any allowed method +# - Response: 403 with required_methods list +# - User action: Complete additional authentication + +# AUTH_SSO_DENIED +# - Trigger: SSO provider is disabled (flags.is_active=false) +# - Response: 403 with message +# - User action: Contact admin or use different auth method + +# AUTH_DOMAIN_DENIED +# - Trigger: User's email domain not in verified domains list (when domains_only=true) +# - Response: 403 with allowed_domains list +# - User action: Use email from verified domain or contact admin + + +### +# Common Issues & Debugging +### + +# Issue: Flags not enforced +# Check: +# - Middleware is registered in FastAPI app +# - Request includes organization_id param +# - AGENTA_LICENSE=ee +# - organizations.flags contains auth flags + +# Issue: Wrong flags applied +# Check: +# - organization_id in request matches flags +# - No caching issues +# - Database query returns correct organization + +# Issue: Session identities not checked +# Check: +# - Session payload includes "identities" array +# - Middleware can verify session correctly +# - check_organization_access() function logic is correct diff --git a/api/ee/tests/manual/auth/05-slug-immutability.http b/api/ee/tests/manual/auth/05-slug-immutability.http new file mode 100644 index 0000000000..3fe4ca45b5 --- /dev/null +++ b/api/ee/tests/manual/auth/05-slug-immutability.http @@ -0,0 +1,192 @@ +### +# Organization Slug Immutability Tests +# Tests that organization slug cannot be changed once set +# +# This validates the business rule: +# - Slug can be null (for backward compatibility) +# - Slug can be set once (from null to a value) +# - Slug CANNOT be changed once set (immutable) +### + +@baseUrl = http://localhost +@apiUrl = {{baseUrl}}/api +@contentType = application/json + +# REPLACE WITH ACTUAL VALUES +@organizationId = 00000000-0000-0000-0000-000000000000 +@apikey = your_api_key_here +### +# Scenario 1: Set slug for organization without slug (Should Succeed) +### + +# Step 1: Verify organization has no slug +# SQL: SELECT id, name, slug FROM organizations WHERE id = ''; +# Expected: slug IS NULL + +# Step 2: Set slug for the first time +# This should SUCCEED +# Note: Replace with your actual organization update endpoint +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "some-slug" +} + +# Expected Response: 200 OK +# Slug should now be set to "some-slug" + + +### +# Scenario 2: Try to change existing slug (Should Fail) +### + +# Prerequisites: Organization already has slug="some-slug" + +# Step 1: Try to change slug to different value +# This should FAIL with ValueError +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "different-slug" +} + +# Expected Response: 400 Bad Request or 422 Validation Error +# { +# "detail": "Organization slug cannot be changed once set. Current slug: 'some-slug'" +# } + + +### +# Scenario 3: Update slug to same value (Should Succeed) +### + +# Prerequisites: Organization has slug="some-slug" + +# Step 1: Set slug to the same value +# This should SUCCEED (idempotent operation) +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "some-slug" +} + +# Expected Response: 200 OK +# Slug remains "some-slug" + +### +# Scenario 4: Update other fields without touching slug (Should Succeed) +### + +# Prerequisites: Organization has slug="some-slug" + +# Step 1: Update organization name without changing slug +# This should SUCCEED +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "name": "ACME Corporation Updated", + "description": "New description" +} + +# Expected Response: 200 OK +# Name and description updated, slug unchanged + + +### +# Scenario 5: Try to set slug to null/empty (Should Fail) +### + +# Prerequisites: Organization has slug="some-slug" + +# Step 1: Try to clear the slug +# This should FAIL (changing from value to null) +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": null +} + +# Expected Response: 400 Bad Request +# Slug should remain "some-slug" + + +### +# Verification Queries +### + +# Check slug value after operations +# SQL: SELECT id, name, slug FROM organizations WHERE id = ''; + +# Check slug is unique across organizations +# SQL: SELECT slug, COUNT(*) FROM organizations WHERE slug IS NOT NULL GROUP BY slug HAVING COUNT(*) > 1; +# Expected: No rows (all slugs unique) + +# Find organizations without slugs (legacy) +# SQL: SELECT id, name, created_at FROM organizations WHERE slug IS NULL ORDER BY created_at; + + +### +# Edge Cases +### + +# Edge Case 1: Slug with special characters +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "some-slug-2025!#$%" +} + +### + +# Should succeed if slug validation allows hyphens and numbers + +# Edge Case 2: Very long slug +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "this-looooooooooooooooooooooooooooooooooooooooooooooooooooooong-slug" +} + +### + +# Should fail if slug has length constraint + +# Edge Case 3: Duplicate slug (different organization) +# Prerequisites: Organization A has slug="some-slug" +# Try to set Organization B's slug to "some-slug" +# Expected: Database constraint violation (unique constraint) + +PATCH {{apiUrl}}/organizations/{{organizationId}} +Content-Type: {{contentType}} +Authorization: ApiKey {{apikey}} + +{ + "slug": "some-slug" +} + +### +# Test Summary +### + +# ✅ Setting slug first time (null → value): ALLOWED +# ❌ Changing slug (value → different value): BLOCKED +# ✅ Updating slug (value → same value): ALLOWED +# ❌ Clearing slug (value → null): BLOCKED +# ✅ Updating other fields with slug set: ALLOWED +# ❌ Special characters in slug: DEPENDS ON VALIDATION +# ❌ Very long slug: DEPENDS ON LENGTH CONSTRAINT +# ❌ Duplicate slugs across orgs: BLOCKED (DB constraint) diff --git a/api/ee/tests/manual/auth/QUICK-START.md b/api/ee/tests/manual/auth/QUICK-START.md new file mode 100644 index 0000000000..dd48a1ced6 --- /dev/null +++ b/api/ee/tests/manual/auth/QUICK-START.md @@ -0,0 +1,100 @@ +# 🚀 Quick Start - SSO/OIDC Testing + +Get started testing in 5 minutes! + +## 1️⃣ Start Services (1 minute) + +Nothing to see, here. + +## 2️⃣ Test Discovery (30 seconds) + +```bash +curl -X POST http://localhost:8000/auth/discover \ + -H "Content-Type: application/json" \ + -d '{"email": "test@example.com"}' +``` + +**Expected Response:** +```json +{ + "user_exists": false, + "methods": { + "email:otp": true, + "sso": { + "available": false, + "required_by_some_orgs": false, + "providers": [] + } + } +} +``` + +## 3️⃣ Test OTP Flow (2 minutes) + +1. Open browser: `http://localhost:8000/auth` +2. Enter email and click "Send OTP" +3. Check backend logs for OTP code (dev mode) +4. Enter code and submit +5. Verify in database: + +```sql +SELECT * FROM user_identities WHERE method = 'email:otp' ORDER BY created_at DESC LIMIT 1; +``` + +**Expected:** New row with your email as subject + +## 4️⃣ Verify Session (30 seconds) + +After login, check session cookie contains identities: + +```bash +# Get session from browser dev tools +# Cookie: sAccessToken=... + +# Make authenticated request +curl http://localhost:8000/api/me \ + -H "Cookie: sAccessToken=" +``` + +The backend should verify session and see `identities: ["email:otp"]` + +--- + +## ✅ Basic Test Complete! + +You've verified: +- ✅ Migrations applied +- ✅ Discovery endpoint works +- ✅ Email OTP login functional +- ✅ Identity tracking creates records +- ✅ Session contains identities array + +## 🎯 Next Steps + +### For OSS Mode Testing: +Continue with `01-discovery.http` tests + +### For EE Mode Testing: +1. Switch to EE mode: `export AGENTA_LICENSE=ee` +2. Set up test organization (use SQL in `00-setup-verification.http`) +3. Configure SSO provider +4. Test with `02-oidc-authorize.http` + +## 🐛 Something Not Working? + +### Discovery returns error +- Check backend is running on port 8000 +- Verify SuperTokens Core is accessible + +### OTP not sent +- Check email provider configuration +- Look for OTP code in backend logs (dev mode) +- Verify SuperTokens Core connection + +### Identity not created +- Check `user_identities` table exists +- Verify override functions registered +- Check backend logs for errors + +### Need Help? +See `README.md` for detailed troubleshooting guide. diff --git a/api/ee/tests/manual/auth/README.md b/api/ee/tests/manual/auth/README.md new file mode 100644 index 0000000000..c909b02c24 --- /dev/null +++ b/api/ee/tests/manual/auth/README.md @@ -0,0 +1,224 @@ +# SSO/OIDC Manual Testing Guide + +This directory contains `.http` files for manually testing the SSO/OIDC authentication implementation. + +## 🧪 Test Execution Order + +### Phase 1: Setup & Verification +1. **`00-setup-verification.http`** - Run SQL setup commands first + - Create test organizations with flags + - Set up domains and providers (EE only) + - Create test users + - Verify schema + +### Phase 2: Discovery Testing +2. **`01-discovery.http`** - Test auth method discovery + - Test new user discovery + - Test existing user discovery + - Test SSO-required scenarios + - Test multi-org users + +### Phase 3: Domain Verification Testing (EE Only) +3. **`03-domain-verification.http`** - Test domain verification and governance + - Verify domains on collaborative organizations + - Prevent personal orgs from verifying domains + - Enforce domain exclusivity (one domain, one org) + - Test auto-join flag configuration + - Domain transfer scenarios + +### Phase 4: OIDC Flow Testing (EE Only) +4. **`02-oidc-authorize.http`** - Test OIDC initiation + - Valid provider authorization + - Invalid provider handling + - OSS mode blocking + +### Phase 5: Identity Tracking +5. **`03-identity-tracking.http`** - Verify identity creation + - Email OTP identity tracking + - Social login identity tracking + - SSO identity tracking + - Session payload verification + +### Phase 6: Flag Enforcement (EE Only) +6. **`04-policy-enforcement.http`** - Test access control + - SSO-only organization access (via flags) + - Multi-method flag combinations + - Auth upgrade requirements + - Membership validation + +### Phase 7: Slug Immutability +7. **`05-slug-immutability.http`** - Test slug constraints + - Setting slug first time (null → value) + - Preventing slug changes (immutability) + - Updating organization without changing slug + - Edge cases and validation + +## 🔧 Using the .http Files + +### Option 1: VS Code REST Client +1. Install "REST Client" extension by Huachao Mao +2. Open any `.http` file +3. Click "Send Request" above each test +4. View response in split pane + +### Option 2: IntelliJ HTTP Client +1. Open `.http` file in IntelliJ IDEA +2. Click ▶️ button next to each request +3. View response in tool window + +### Option 3: Manual with curl +```bash +# Discovery example +curl -X POST http://localhost:8000/auth/discover \ + -H "Content-Type: application/json" \ + -d '{"email": "test@example.com"}' +``` + +## 📊 Expected Test Results + +### OSS Mode Tests (Should Pass) +- ✅ Discovery returns `email:otp` and social methods +- ✅ Email OTP login creates `user_identity` record +- ✅ Social login creates `user_identity` record +- ✅ Session contains `identities` array +- ✅ Exactly 1 collaborative organization exists +- ✅ No personal organizations exist +- ✅ Organization has `flags.is_personal = false` +- ❌ SSO endpoints return 404 "EE only" +- ❌ Flag enforcement not active (EE only) +- ❌ Domain verification not available + +### EE Mode Tests (Should Pass) +- ✅ All OSS tests pass +- ✅ Organizations have `flags.is_personal` (true or false) +- ✅ Personal organizations cannot verify domains +- ✅ Domain exclusivity enforced (one domain per org) +- ✅ Auto-join flag (`flags.auto_join`) can be configured +- ✅ Discovery returns SSO providers for verified domains +- ✅ OIDC authorization redirects to IdP +- ✅ SSO login creates `user_identity` with `sso:*` method +- ✅ Flag-based access control blocks unauthorized methods +- ✅ Auth upgrade flow works + +## 🐛 Troubleshooting + +### Discovery Returns No Methods +**Check:** +- Backend environment variables configured +- SuperTokens Core is running +- Database migrations applied +- No errors in backend logs + +### Identities Not Created +**Check:** +- `user_identities` table exists +- SuperTokens overrides registered in config +- User exists in `users` table +- Database has write permissions + +### Flags Not Enforced +**Check:** +- `AGENTA_LICENSE=ee` +- Middleware registered in FastAPI app +- Request includes `organization_id` parameter +- `organizations.flags` JSONB contains policy flags + +### SSO Flow Fails +**Check:** +- OIDC provider configuration in `organization_providers` +- Provider `flags.is_active = true` +- Domain verified in `organization_domains` (`flags.is_verified = true`) +- Organization is collaborative (`flags.is_personal = false`) +- IdP credentials valid +- Callback URL configured at IdP + +### Domain Verification Fails +**Check:** +- Organization is collaborative (`flags.is_personal = false`) +- Domain not already verified by another organization +- `organization_domains` table exists and populated + +## 🔍 Debugging Tips + +### Inspect Database State +```sql +-- Check identities created +SELECT * FROM user_identities ORDER BY created_at DESC LIMIT 10; + +-- Check organization types (personal vs collaborative) +SELECT id, name, slug, flags->>'is_personal' as is_personal +FROM organizations +ORDER BY flags->>'is_personal'; + +-- Check organization flags (auth policy) +SELECT + id, + name, + flags->'is_personal' as is_personal, + flags->'allow_email' as allow_email, + flags->'allow_social' as allow_social, + flags->'allow_sso' as allow_sso, + flags->'domains_only' as domains_only, + flags->'auto_join' as auto_join, + flags->'allow_root' as allow_root +FROM organizations; + +-- Check domain verification +SELECT + od.slug as domain, + od.flags->>'is_verified' as verified, + o.name as org_name, + o.flags->>'is_personal' as is_personal +FROM organization_domains od +JOIN organizations o ON o.id = od.organization_id +ORDER BY od.flags->>'is_verified' DESC; + +-- Check SSO providers +SELECT + o.name, + o.flags->>'is_personal' as is_personal, + op.slug, + op.flags->>'is_active' as is_active, + op.settings->>'issuer' as issuer +FROM organizations o +JOIN organization_providers op ON op.organization_id = o.id; +``` + +### Check SuperTokens Core +```bash +# Verify SuperTokens is running +curl http://localhost:3567/hello + +# Check user list +curl http://localhost:3567/users?limit=10 +``` + +## ✅ Test Coverage + +These manual tests cover: +- ✅ Email OTP authentication (OSS + EE) +- ✅ Social authentication (OSS + EE) +- ✅ SSO/OIDC authentication (EE only) +- ✅ Identity tracking and accumulation +- ✅ Session payload with identities +- ✅ Organization membership queries +- ✅ Flag-based access control +- ✅ Auth method discovery +- ✅ Multi-organization support +- ✅ Auth upgrade requirements +- ✅ Organization slug immutability +- ✅ Slug validation and constraints +- ✅ Organization classification (personal vs collaborative via `flags.is_personal`) +- ✅ Domain verification on collaborative organizations +- ✅ Domain verification restrictions on personal organizations +- ✅ Domain exclusivity enforcement (one domain per org) +- ✅ Auto-join flag configuration +- ✅ Auto-join behavior with verified domains + +## Error Codes + +| Error Code | Trigger | HTTP Status | +|------------|---------|-------------| +| `AUTH_UPGRADE_REQUIRED` | Auth method not in allowed list | 403 | +| `AUTH_SSO_DENIED` | SSO provider disabled or inactive | 403 | +| `AUTH_DOMAIN_DENIED` | Email domain not in verified list | 403 | diff --git a/api/entrypoints/routers.py b/api/entrypoints/routers.py index e9c0ac03f6..9d12da4865 100644 --- a/api/entrypoints/routers.py +++ b/api/entrypoints/routers.py @@ -73,6 +73,7 @@ # Routers from oss.src.apis.fastapi.vault.router import VaultRouter +from oss.src.apis.fastapi.auth.router import auth_router from oss.src.apis.fastapi.otlp.router import OTLPRouter from oss.src.apis.fastapi.tracing.router import TracingRouter from oss.src.apis.fastapi.invocations.router import InvocationsRouter @@ -225,8 +226,8 @@ async def lifespan(*args, **kwargs): ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, @@ -391,11 +392,20 @@ async def lifespan(*args, **kwargs): tags=["Observability"], ) +app.include_router( + router=auth_router, + prefix="/auth", + tags=["Auth"], +) + +## DEPRECATED app.include_router( router=tracing.router, prefix="/preview/tracing", tags=["Deprecated"], + include_in_schema=False, ) +## DEPRECATED app.include_router( router=tracing.router, diff --git a/api/entrypoints/worker_evaluations.py b/api/entrypoints/worker_evaluations.py index 6955bfc03e..12aec47860 100644 --- a/api/entrypoints/worker_evaluations.py +++ b/api/entrypoints/worker_evaluations.py @@ -103,8 +103,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py b/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py index ac015ac6a2..a17c8e085d 100644 --- a/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py +++ b/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py @@ -39,8 +39,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py index ad2f342fe6..0a97e71236 100644 --- a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py +++ b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py @@ -64,7 +64,7 @@ def check_for_multiple_default_projects(session: Session) -> Sequence[ProjectDB] def create_default_project(): - PROJECT_NAME = "Default Project" + PROJECT_NAME = "Default" engine = create_engine(env.postgres.uri_core) sync_session = sessionmaker(engine, expire_on_commit=False) diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py b/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py index 4399873507..cf70e04fc9 100644 --- a/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py +++ b/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py @@ -2,11 +2,9 @@ import traceback import click -from sqlalchemy.future import select -from sqlalchemy import Connection, update, func +from sqlalchemy import Connection, MetaData, Table, func, select, update from oss.src.utils.env import env -from oss.src.dbs.postgres.secrets.dbes import SecretsDBE from oss.src.core.secrets.dtos import ( StandardProviderDTO, StandardProviderSettingsDTO, @@ -17,15 +15,22 @@ BATCH_SIZE = 500 +def _secrets_table(session: Connection) -> Table: + metadata = MetaData() + return Table("secrets", metadata, autoload_with=session) + + def rename_and_update_secrets_data_schema(session: Connection): try: TOTAL_MIGRATED = 0 + secrets_table = _secrets_table(session) + # Count total rows in secrets table - total_query = select(func.count()).select_from(SecretsDBE) + total_query = select(func.count()).select_from(secrets_table) result = session.execute(total_query).scalar() TOTAL_SECRETS = result or 0 - print(f"Total rows in {SecretsDBE.__tablename__}: {TOTAL_SECRETS}") + print(f"Total rows in secrets: {TOTAL_SECRETS}") encryption_key = env.agenta.crypt_key if not encryption_key: @@ -37,31 +42,36 @@ def rename_and_update_secrets_data_schema(session: Connection): while True: with set_data_encryption_key(data_encryption_key=encryption_key): - # Fetch a batch of records using keyset pagination (ID-based) - stmt = select(SecretsDBE).order_by(SecretsDBE.id).limit(BATCH_SIZE) + data_expr = func.pgp_sym_decrypt( + secrets_table.c.data, encryption_key + ).label("data") + stmt = ( + select(secrets_table.c.id, data_expr) + .order_by(secrets_table.c.id) + .limit(BATCH_SIZE) + ) if last_processed_id: - stmt = stmt.where(SecretsDBE.id > last_processed_id) + stmt = stmt.where(secrets_table.c.id > last_processed_id) - secrets_dbes = session.execute(stmt).fetchall() - if not secrets_dbes: - break # No more records to process + secrets_rows = session.execute(stmt).fetchall() + if not secrets_rows: + break - actual_batch_size = len(secrets_dbes) + actual_batch_size = len(secrets_rows) if actual_batch_size == 0: break - # Update the schema structure of data for each record in the batch - for secret_dbe in secrets_dbes: - last_processed_id = secret_dbe.id # Update checkpoint + for secret_row in secrets_rows: + secret_id = secret_row.id + last_processed_id = secret_id - # Load and validate JSON - secret_json_data = json.loads(secret_dbe.data) + secret_json_data = json.loads(secret_row.data) if ( "provider" not in secret_json_data and "key" not in secret_json_data ): raise ValueError( - f"Invalid secret data format for ID {secret_dbe.id}. Data format: {secret_json_data}" + f"Invalid secret data format for ID {secret_id}. Data format: {secret_json_data}" ) secret_data_dto = StandardProviderDTO( @@ -72,9 +82,14 @@ def rename_and_update_secrets_data_schema(session: Connection): ) update_statement = ( - update(SecretsDBE) - .where(SecretsDBE.id == secret_dbe.id) - .values(data=secret_data_dto.model_dump_json()) + update(secrets_table) + .where(secrets_table.c.id == secret_id) + .values( + data=func.pgp_sym_encrypt( + secret_data_dto.model_dump_json(), + encryption_key, + ) + ) ) session.execute(update_statement) @@ -83,7 +98,7 @@ def rename_and_update_secrets_data_schema(session: Connection): click.echo( click.style( - f"Processed {len(secrets_dbes)} records in this batch. " + f"Processed {len(secrets_rows)} records in this batch. " f"Total migrated: {TOTAL_MIGRATED}. Remaining: {remaining_secrets}", fg="yellow", ) @@ -108,10 +123,12 @@ def revert_rename_and_update_secrets_data_schema(session: Connection): try: TOTAL_MIGRATED = 0 + secrets_table = _secrets_table(session) + # Count total rows in secrets table - total_query = select(func.count()).select_from(SecretsDBE) + total_query = select(func.count()).select_from(secrets_table) TOTAL_SECRETS = session.execute(total_query).scalar() or 0 - print(f"Total rows in {SecretsDBE.__tablename__}: {TOTAL_SECRETS}") + print(f"Total rows in secrets: {TOTAL_SECRETS}") encryption_key = env.agenta.crypt_key if not encryption_key: @@ -123,47 +140,53 @@ def revert_rename_and_update_secrets_data_schema(session: Connection): while True: with set_data_encryption_key(data_encryption_key=encryption_key): - # Fetch a batch of records using keyset pagination - stmt = select(SecretsDBE).order_by(SecretsDBE.id).limit(BATCH_SIZE) + data_expr = func.pgp_sym_decrypt( + secrets_table.c.data, encryption_key + ).label("data") + stmt = ( + select(secrets_table.c.id, data_expr) + .order_by(secrets_table.c.id) + .limit(BATCH_SIZE) + ) if last_processed_id: - stmt = stmt.where(SecretsDBE.id > last_processed_id) + stmt = stmt.where(secrets_table.c.id > last_processed_id) - secrets_dbes = session.execute(stmt).fetchall() - if not secrets_dbes: - break # No more records to process + secrets_rows = session.execute(stmt).fetchall() + if not secrets_rows: + break - for secret_dbe in secrets_dbes: - last_processed_id = secret_dbe.id # Update checkpoint + for secret_row in secrets_rows: + secret_id = secret_row.id + last_processed_id = secret_id - # Load and validate JSON - secret_json_data = json.loads(secret_dbe.data) + secret_json_data = json.loads(secret_row.data) if ( "kind" not in secret_json_data and "provider" not in secret_json_data ): - raise ValueError( - f"Invalid secret format for ID {secret_dbe.id}" - ) + raise ValueError(f"Invalid secret format for ID {secret_id}") - # Convert back to old schema old_format_data = { "provider": secret_json_data["kind"], "key": secret_json_data["provider"]["key"], } - # Update record with encryption session.execute( - update(SecretsDBE) - .where(SecretsDBE.id == secret_dbe.id) - .values(data=json.dumps(old_format_data)) + update(secrets_table) + .where(secrets_table.c.id == secret_id) + .values( + data=func.pgp_sym_encrypt( + json.dumps(old_format_data), encryption_key + ) + ) ) - TOTAL_MIGRATED += len(secrets_dbes) + TOTAL_MIGRATED += len(secrets_rows) remaining_secrets = TOTAL_SECRETS - TOTAL_MIGRATED click.echo( click.style( - f"Processed {len(secrets_dbes)} records in this batch. " + f"Processed {len(secrets_rows)} records in this batch. " f"Total reverted: {TOTAL_MIGRATED}. Remaining: {remaining_secrets}", fg="yellow", ) diff --git a/api/oss/databases/postgres/migrations/core/utils.py b/api/oss/databases/postgres/migrations/core/utils.py index c5ce9cd838..d47ccf7f8d 100644 --- a/api/oss/databases/postgres/migrations/core/utils.py +++ b/api/oss/databases/postgres/migrations/core/utils.py @@ -117,7 +117,9 @@ async def get_pending_migration_head(): def run_alembic_migration(): """ - Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users. + Applies migration for first-time users and also checks the environment variable + "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether + to apply migrations for returning users. """ try: diff --git a/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py b/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py new file mode 100644 index 0000000000..a22123f1e7 --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py @@ -0,0 +1,51 @@ +"""add slug to organizations + +Revision ID: 12d23a8f7dde +Revises: 59b85eb7516c +Create Date: 2025-12-25 00:00:00.000000+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision: str = "12d23a8f7dde" +down_revision: Union[str, None] = "59b85eb7516c" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add slug column to organizations table + op.add_column( + "organizations", + sa.Column( + "slug", + sa.String(), + nullable=True, + ), + ) + + # Add unique constraint on slug + op.create_unique_constraint( + "uq_organizations_slug", + "organizations", + ["slug"], + ) + + # Add index for faster lookups + op.create_index( + "ix_organizations_slug", + "organizations", + ["slug"], + ) + + +def downgrade() -> None: + # Drop in reverse order + op.drop_index("ix_organizations_slug", table_name="organizations") + op.drop_constraint("uq_organizations_slug", "organizations", type_="unique") + op.drop_column("organizations", "slug") diff --git a/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py b/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py new file mode 100644 index 0000000000..f1282a85c0 --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py @@ -0,0 +1,132 @@ +"""add sso oidc tables + +Revision ID: 59b85eb7516c +Revises: 80910d2fa9a4 +Create Date: 2025-12-10 08:53:56.000000+00:00 + +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision: str = "59b85eb7516c" +down_revision: Union[str, None] = "80910d2fa9a4" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # 1. user_identities table + op.create_table( + "user_identities", + sa.Column( + "id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "user_id", + sa.UUID(), + nullable=False, + ), + sa.Column( + "method", + sa.String(), + nullable=False, + ), + sa.Column( + "subject", + sa.String(), + nullable=False, + ), + sa.Column( + "domain", + sa.String(), + nullable=True, + ), + sa.Column( + "created_at", + sa.TIMESTAMP(timezone=True), + server_default=sa.text("CURRENT_TIMESTAMP"), + nullable=False, + ), + sa.Column( + "updated_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "deleted_at", + sa.TIMESTAMP(timezone=True), + nullable=True, + ), + sa.Column( + "created_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "updated_by_id", + sa.UUID(), + nullable=True, + ), + sa.Column( + "deleted_by_id", + sa.UUID(), + nullable=True, + ), + sa.PrimaryKeyConstraint("id"), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "method", + "subject", + name="uq_user_identities_method_subject", + ), + sa.Index( + "ix_user_identities_user_method", + "user_id", + "method", + ), + sa.Index( + "ix_user_identities_domain", + "domain", + ), + ) + + # EE-only tables (organization_policies, organization_domains, organization_providers, organization_invitations) + # are defined in the EE migration version of this file + + # 2. Add is_active to users table + op.add_column( + "users", + sa.Column( + "is_active", + sa.Boolean(), + nullable=False, + server_default="true", + ), + ) + + +def downgrade() -> None: + # Drop in reverse order + op.drop_column("users", "is_active") + + # EE-only table drops are in the EE migration version of this file + + op.drop_index( + "ix_user_identities_domain", + table_name="user_identities", + ) + op.drop_index( + "ix_user_identities_user_method", + table_name="user_identities", + ) + op.drop_table("user_identities") diff --git a/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py b/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py new file mode 100644 index 0000000000..1905a6a320 --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py @@ -0,0 +1,335 @@ +"""clean up organizations + +Revision ID: a9f3e8b7c5d1 +Revises: 12d23a8f7dde +Create Date: 2025-12-26 00:00:00.000000 + +""" + +from typing import Sequence, Union +from alembic import op +import sqlalchemy as sa +from sqlalchemy import text +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = "a9f3e8b7c5d1" +down_revision: Union[str, None] = "12d23a8f7dde" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """ + Clean up organizations table and introduce new schema. + + Changes: + - Add flags (JSONB, nullable) with is_personal and is_demo fields + - Migrate type='view-only' to flags.is_demo=true + - Set is_personal=false for the single organization + - Drop type column + - Convert owner (String) to owner_id (UUID, NOT NULL) + - Add created_by_id (UUID, NOT NULL) + - Ensure created_at is NOT NULL, remove default from updated_at + - Add updated_by_id (UUID, nullable) + - Add deleted_at (DateTime, nullable) + - Add deleted_by_id (UUID, nullable) + - Drop user_organizations table (replaced by organization_members) + - Drop invitations table (obsolete) + + OSS Mode: + - Must have exactly 1 organization (fail-fast if not) + - Set is_personal=false (no personal organizations in OSS) + """ + conn = op.get_bind() + + # OSS: Must have exactly 1 organization + org_count = conn.execute(text("SELECT COUNT(*) FROM organizations")).scalar() + + if org_count == 0: + raise ValueError( + "OSS mode: No organizations found. Cannot proceed with migration." + ) + elif org_count > 1: + raise ValueError( + f"OSS mode: Found {org_count} organizations. OSS supports exactly 1 collaborative organization. " + "Please consolidate organizations before migrating." + ) + + # Step 1: Add JSONB columns (flags, tags, meta - all nullable) + op.add_column( + "organizations", + sa.Column( + "flags", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + op.add_column( + "organizations", + sa.Column( + "tags", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + op.add_column( + "organizations", + sa.Column( + "meta", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + + # Step 2: Add new UUID columns (all nullable initially for migration) + op.add_column( + "organizations", + sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("created_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("updated_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True), + ) + op.add_column( + "organizations", + sa.Column("deleted_by_id", postgresql.UUID(as_uuid=True), nullable=True), + ) + + # Step 3: Migrate type='view-only' to is_demo=true, set is_personal=false + conn.execute( + text(""" + UPDATE organizations + SET flags = jsonb_build_object( + 'is_demo', CASE WHEN type = 'view-only' THEN true ELSE false END, + 'is_personal', false + ) + WHERE flags IS NULL OR flags = '{}'::jsonb + """) + ) + + # Step 4: Migrate owner (String) to owner_id (UUID) + # Set owner_id = owner::uuid for existing org + conn.execute( + text(""" + UPDATE organizations + SET owner_id = owner::uuid + WHERE owner IS NOT NULL + """) + ) + + # Step 5: Set created_by_id = owner_id for existing org + conn.execute( + text(""" + UPDATE organizations + SET created_by_id = owner_id + WHERE owner_id IS NOT NULL + """) + ) + + # Step 6: Set updated_by_id = owner_id for existing org + conn.execute( + text(""" + UPDATE organizations + SET updated_by_id = owner_id + WHERE owner_id IS NOT NULL + """) + ) + + # Step 7: Ensure created_at has a value for all existing records + conn.execute( + text(""" + UPDATE organizations + SET created_at = COALESCE(created_at, NOW()) + WHERE created_at IS NULL + """) + ) + + # Step 8: Make owner_id, created_by_id, and created_at NOT NULL; remove updated_at default + op.alter_column("organizations", "owner_id", nullable=False) + op.alter_column("organizations", "created_by_id", nullable=False) + op.alter_column("organizations", "created_at", nullable=False) + op.alter_column("organizations", "updated_at", server_default=None) + + # Step 9: Add foreign key constraints + op.create_foreign_key( + "fk_organizations_owner_id_users", + "organizations", + "users", + ["owner_id"], + ["id"], + ondelete="RESTRICT", + ) + op.create_foreign_key( + "fk_organizations_created_by_id_users", + "organizations", + "users", + ["created_by_id"], + ["id"], + ondelete="RESTRICT", + ) + op.create_foreign_key( + "fk_organizations_updated_by_id_users", + "organizations", + "users", + ["updated_by_id"], + ["id"], + ondelete="SET NULL", + ) + op.create_foreign_key( + "fk_organizations_deleted_by_id_users", + "organizations", + "users", + ["deleted_by_id"], + ["id"], + ondelete="SET NULL", + ) + + # Step 9b: Ensure workspaces cascade on organization delete + try: + op.drop_constraint( + "workspaces_organization_id_fkey", + "workspaces", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "workspaces_organization_id_fkey", + "workspaces", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + # Step 9c: Ensure workspace_members cascade on workspace delete + try: + op.drop_constraint( + "workspace_members_workspace_id_fkey", + "workspace_members", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "workspace_members_workspace_id_fkey", + "workspace_members", + "workspaces", + ["workspace_id"], + ["id"], + ondelete="CASCADE", + ) + + # Step 9d: Ensure projects cascade on organization delete + try: + op.drop_constraint( + "projects_organization_id_fkey", + "projects", + type_="foreignkey", + ) + except Exception: + pass # Constraint might not exist yet + op.create_foreign_key( + "projects_organization_id_fkey", + "projects", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + # Note: Other tables (testsets, evaluations, scenarios, etc.) are linked to + # organizations via projects, so they will cascade delete through projects. + # They should keep SET NULL on organization_id for direct references. + + # Step 10: Drop type and owner columns + op.drop_column("organizations", "type") + op.drop_column("organizations", "owner") + + # Step 11: Drop obsolete tables + conn.execute(text("DROP TABLE IF EXISTS user_organizations CASCADE")) + conn.execute(text("DROP TABLE IF EXISTS invitations CASCADE")) + + +def downgrade() -> None: + """Restore organizations type and owner columns and revert schema changes.""" + conn = op.get_bind() + + # Drop foreign key constraints + op.drop_constraint( + "fk_organizations_deleted_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_updated_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_created_by_id_users", "organizations", type_="foreignkey" + ) + op.drop_constraint( + "fk_organizations_owner_id_users", "organizations", type_="foreignkey" + ) + + # Recreate type column + op.add_column("organizations", sa.Column("type", sa.String(), nullable=True)) + + # Migrate flags back to type + conn.execute( + text(""" + UPDATE organizations + SET type = CASE + WHEN flags->>'is_demo' = 'true' THEN 'view-only' + ELSE 'default' + END + """) + ) + + op.alter_column("organizations", "type", nullable=False) + + # Recreate owner column + op.add_column("organizations", sa.Column("owner", sa.String(), nullable=True)) + + # Migrate owner_id back to owner (UUID to String) + conn.execute( + text(""" + UPDATE organizations + SET owner = owner_id::text + WHERE owner_id IS NOT NULL + """) + ) + + # Restore updated_at default + conn.execute( + text(""" + UPDATE organizations + SET updated_at = COALESCE(updated_at, NOW()) + WHERE updated_at IS NULL + """) + ) + op.alter_column( + "organizations", + "updated_at", + server_default=sa.text("NOW()"), + nullable=False, + ) + + # Drop new columns + op.drop_column("organizations", "deleted_by_id") + op.drop_column("organizations", "deleted_at") + op.drop_column("organizations", "updated_by_id") + op.drop_column("organizations", "created_by_id") + op.drop_column("organizations", "owner_id") + op.drop_column("organizations", "meta") + op.drop_column("organizations", "tags") + op.drop_column("organizations", "flags") + + # Note: We don't recreate user_organizations and invitations tables + # as they contain no data at this point diff --git a/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py b/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py new file mode 100644 index 0000000000..d62826b2f5 --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py @@ -0,0 +1,48 @@ +"""add organization scope to secrets + +Revision ID: c3b2a1d4e5f6 +Revises: a9f3e8b7c5d1 +Create Date: 2025-01-10 00:00:00.000000 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + + +# revision identifiers, used by Alembic. +revision: str = "c3b2a1d4e5f6" +down_revision: Union[str, None] = "a9f3e8b7c5d1" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + connection = op.get_bind() + + op.execute("ALTER TYPE secretkind_enum ADD VALUE IF NOT EXISTS 'SSO_PROVIDER'") + + inspector = sa.inspect(connection) + columns = {col["name"] for col in inspector.get_columns("secrets")} + + if "organization_id" not in columns: + op.add_column("secrets", sa.Column("organization_id", sa.UUID(), nullable=True)) + + op.alter_column("secrets", "project_id", nullable=True) + + op.create_foreign_key( + "secrets_organization_id_fkey", + "secrets", + "organizations", + ["organization_id"], + ["id"], + ondelete="CASCADE", + ) + + +def downgrade() -> None: + op.drop_constraint("secrets_organization_id_fkey", "secrets", type_="foreignkey") + op.drop_column("secrets", "organization_id") + op.alter_column("secrets", "project_id", nullable=False) diff --git a/api/oss/databases/postgres/migrations/tracing/utils.py b/api/oss/databases/postgres/migrations/tracing/utils.py index b6065ac8a0..10815051c3 100644 --- a/api/oss/databases/postgres/migrations/tracing/utils.py +++ b/api/oss/databases/postgres/migrations/tracing/utils.py @@ -109,7 +109,9 @@ async def get_pending_migration_head(): def run_alembic_migration(): """ - Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users. + Applies migration for first-time users and also checks the environment variable + "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether + to apply migrations for returning users. """ try: diff --git a/api/oss/src/__init__.py b/api/oss/src/__init__.py index f76b6b286a..9712dde450 100644 --- a/api/oss/src/__init__.py +++ b/api/oss/src/__init__.py @@ -6,9 +6,6 @@ from supertokens_python import init, InputAppInfo, SupertokensConfig from supertokens_python.asyncio import get_user as get_user_from_supertokens from supertokens_python.recipe.thirdparty import ( - ProviderInput, - ProviderConfig, - ProviderClientConfig, SignInAndUpFeature, ) from supertokens_python.recipe import ( @@ -193,6 +190,8 @@ async def _create_account(email: str, uid: str) -> None: - Organization assignment (OSS only) - Account creation + This function is idempotent - if user already exists, it returns early. + Args: email: The user's normalized email address uid: The SuperTokens user ID @@ -200,6 +199,11 @@ async def _create_account(email: str, uid: str) -> None: Raises: UnauthorizedException: If email is blocked or user not invited (OSS only) """ + # Check if user already exists (idempotent - skip if adding new auth method) + existing_user = await get_user_with_email(email=email) + if existing_user is not None: + return + # Check email blocking (EE only) if is_ee() and await _is_blocked(email): raise UnauthorizedException(detail="This email is not allowed.") @@ -207,6 +211,8 @@ async def _create_account(email: str, uid: str) -> None: payload = { "uid": uid, "email": email, + "name": "Personal", + "is_personal": True, } # For OSS: compute organization before calling create_accounts @@ -269,6 +275,8 @@ async def consume_code_post( if isinstance(response, ConsumeCodeOkResult): email = response.user.emails[0].lower() await _create_account(email, response.user.id) + # Note: Identity tracking is now handled by the recipe-level override (override_passwordless_functions) + # which runs before session creation and properly injects user_identities into the JWT payload return response @@ -304,6 +312,8 @@ async def thirdparty_sign_in_up_post( if isinstance(response, SignInUpPostOkResult): email = response.user.emails[0].lower() await _create_account(email, response.user.id) + # Note: Identity tracking is now handled by the recipe-level override (override_thirdparty_functions) + # which runs before session creation and properly injects user_identities into the JWT payload return response @@ -361,14 +371,27 @@ async def sign_up_post( api_options: EmailPasswordAPIOptions, user_context: Dict[str, Any], ): - # FLOW 1: Sign in (redirect existing users) + # FLOW 1: Sign in (redirect existing users with emailpassword credential) email = form_fields[0].value.lower() if is_ee() and await _is_blocked(email): raise UnauthorizedException(detail="This email is not allowed.") user_info_from_st = await list_users_by_account_info( tenant_id="public", account_info=AccountInfo(email=email) ) - if len(user_info_from_st) >= 1 or await get_user_with_email(email=email): + + # Check if user has an emailpassword login method + has_emailpassword_method = False + for user in user_info_from_st: + for lm in user.login_methods: + if lm.recipe_id == "emailpassword": + has_emailpassword_method = True + break + if has_emailpassword_method: + break + + # Only redirect to sign_in if user has emailpassword credential + # This allows users who signed up via OAuth to add email/password + if has_emailpassword_method: return await sign_in_post( form_fields, tenant_id, @@ -388,20 +411,14 @@ async def sign_up_post( user_context, ) - # FLOW 3: Create application user (organization assignment is handled in create_accounts) + # FLOW 3: Create application user (idempotent - skips if user exists) if isinstance(response, EmailPasswordSignUpPostOkResult): - # sign up successful actual_email = "" for field in form_fields: if field.id == "email": actual_email = field.value - if actual_email == "": - # User did not provide an email. - # This is possible since we set optional: true - # in the form field config - pass - else: + if actual_email != "": email = ( actual_email if "@" in actual_email @@ -439,7 +456,7 @@ def _init_supertokens(): # Validate auth configuration try: - env.auth.validate() + env.auth.validate_config() except ValueError as e: logger.error(f"[AUTH CONFIG ERROR] {e}") raise @@ -449,6 +466,10 @@ def _init_supertokens(): # Email Password Authentication if env.auth.email_method == "password": + from oss.src.core.auth.supertokens_overrides import ( + override_emailpassword_functions, + ) + logger.info("✓ Email/Password authentication enabled") recipe_list.append( emailpassword.init( @@ -466,87 +487,80 @@ def _init_supertokens(): ), override=InputOverrideConfig( apis=override_password_apis, + functions=override_emailpassword_functions, ), ) ) # Email OTP Authentication if env.auth.email_method == "otp": + from oss.src.core.auth.supertokens_overrides import ( + override_passwordless_functions, + ) + logger.info("✓ Email/OTP authentication enabled") recipe_list.append( passwordless.init( flow_type="USER_INPUT_CODE", contact_config=ContactEmailOnlyConfig(), override=passwordless.InputOverrideConfig( - functions=override_passwordless_apis + apis=override_passwordless_apis, + functions=override_passwordless_functions, ), ) ) # Third-Party OIDC Authentication - oidc_providers = [] - if env.auth.google_enabled: - logger.info("✓ Google OAuth enabled") - oidc_providers.append( - ProviderInput( - config=ProviderConfig( - third_party_id="google", - clients=[ - ProviderClientConfig( - client_id=env.auth.google_oauth_client_id, - client_secret=env.auth.google_oauth_client_secret, - ), - ], - ), - ) - ) - - if env.auth.github_enabled: - logger.info("✓ GitHub OAuth enabled") - oidc_providers.append( - ProviderInput( - config=ProviderConfig( - third_party_id="github", - clients=[ - ProviderClientConfig( - client_id=env.auth.github_oauth_client_id, - client_secret=env.auth.github_oauth_client_secret, - ) - ], - ), - ) - ) + # Always initialize thirdparty recipe for dynamic OIDC support (EE) + from oss.src.core.auth.supertokens_config import get_thirdparty_providers + from oss.src.core.auth.supertokens_overrides import override_thirdparty_functions + from oss.src.utils.common import is_ee + oidc_providers = get_thirdparty_providers() if oidc_providers: + enabled_providers = [ + provider.config.third_party_id for provider in oidc_providers + ] + logger.info("✓ OIDC providers enabled: %s", ", ".join(enabled_providers)) + + # Initialize thirdparty recipe if we have static providers OR if EE is enabled (for dynamic OIDC) + if oidc_providers or is_ee(): recipe_list.append( thirdparty.init( sign_in_and_up_feature=SignInAndUpFeature(providers=oidc_providers), - override=thirdparty.InputOverrideConfig(apis=override_thirdparty_apis), + override=thirdparty.InputOverrideConfig( + apis=override_thirdparty_apis, + functions=override_thirdparty_functions, + ), ) ) + if is_ee() and not oidc_providers: + logger.info("✓ Third-party recipe enabled for dynamic OIDC (EE)") # Sessions always required if auth is enabled + from oss.src.core.auth.supertokens_overrides import override_session_functions + recipe_list.append( - session.init(expose_access_token_to_frontend_in_cookie_based_auth=True) + session.init( + expose_access_token_to_frontend_in_cookie_based_auth=True, + override=session.InputOverrideConfig( + functions=override_session_functions, + ), + ) ) # Dashboard for admin management recipe_list.append(dashboard.init()) # Initialize SuperTokens with selected recipes + from oss.src.core.auth.supertokens_config import ( + get_app_info, + get_supertokens_config, + ) + init( - app_info=InputAppInfo( - app_name="agenta", - api_domain=api_domain, - website_domain=env.agenta.web_url, - api_gateway_path=api_gateway_path, - api_base_path="/auth/", - website_base_path="/auth", - ), - supertokens_config=SupertokensConfig( - uri_core=env.supertokens.uri_core, - api_key=env.supertokens.api_key, - ), + app_info=get_app_info(), + supertokens_config=SupertokensConfig(**get_supertokens_config()), framework="fastapi", recipe_list=recipe_list, mode="asgi", diff --git a/api/oss/src/apis/fastapi/auth/__init__.py b/api/oss/src/apis/fastapi/auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/apis/fastapi/auth/models.py b/api/oss/src/apis/fastapi/auth/models.py new file mode 100644 index 0000000000..6013b2329e --- /dev/null +++ b/api/oss/src/apis/fastapi/auth/models.py @@ -0,0 +1,46 @@ +from pydantic import BaseModel, EmailStr +from typing import Optional, List, Dict + + +# ============================================================================ +# AUTH DISCOVER +# ============================================================================ + + +class DiscoverRequest(BaseModel): + email: EmailStr + + +class SSOProviderInfo(BaseModel): + id: str + slug: str + third_party_id: str + + +class SSOProviders(BaseModel): + providers: List[SSOProviderInfo] + + +class DiscoverResponse(BaseModel): + exists: bool + methods: Dict[str, bool | SSOProviders] + + +# ============================================================================ +# OIDC AUTHORIZE +# ============================================================================ + + +class OIDCAuthorizeRequest(BaseModel): + provider_id: str + redirect: Optional[str] = "/" + + +# ============================================================================ +# OIDC CALLBACK +# ============================================================================ + + +class OIDCCallbackRequest(BaseModel): + code: str + state: str diff --git a/api/oss/src/apis/fastapi/auth/router.py b/api/oss/src/apis/fastapi/auth/router.py new file mode 100644 index 0000000000..1697d8c582 --- /dev/null +++ b/api/oss/src/apis/fastapi/auth/router.py @@ -0,0 +1,339 @@ +from fastapi import APIRouter, HTTPException, Request +from fastapi.responses import RedirectResponse +from pydantic import BaseModel +from supertokens_python.recipe.session.asyncio import get_session + +from oss.src.apis.fastapi.auth.models import ( + DiscoverRequest, + DiscoverResponse, +) +from oss.src.core.auth.service import AuthService +from oss.src.utils.common import is_ee +from oss.src.utils.logging import get_module_logger + + +auth_router = APIRouter() +auth_service = AuthService() +log = get_module_logger(__name__) + + +class SessionIdentitiesUpdate(BaseModel): + session_identities: list[str] + + +@auth_router.post("/discover", response_model=DiscoverResponse) +async def discover(request: DiscoverRequest): + """ + Discover authentication methods available for a given email. + + This endpoint does NOT reveal: + - Organization names + - User existence (optionally - currently does for UX) + - Detailed policy information + + Returns minimal information needed for authentication flow. + """ + try: + result = await auth_service.discover(request.email) + return DiscoverResponse(**result) + except Exception as e: + import traceback + + print(f"❌ Discovery error: {e}") + print(traceback.format_exc()) + raise HTTPException(status_code=500, detail=str(e)) + + +@auth_router.get("/organization/access") +async def check_organization_access(request: Request, organization_id: str): + """ + Check if the current session satisfies the organization's auth policy. + + Returns 200 when access is allowed, 403 with AUTH_UPGRADE_REQUIRED when not. + """ + try: + session = await get_session(request) # type: ignore + except Exception: + raise HTTPException(status_code=401, detail="Unauthorized") + + payload = session.get_access_token_payload() if session else {} + session_identities = payload.get("session_identities") or [] + user_identities = payload.get("user_identities", []) + + try: + from uuid import UUID + from oss.src.services import db_manager + + user_uid = session.get_user_id() + user = await db_manager.get_user_with_uid(user_uid) + if not user: + raise HTTPException( + status_code=403, + detail={ + "error": "AUTH_DOMAIN_DENIED", + "message": "Organization available but access restricted to verified domain(s).", + "required_methods": [], + }, + ) + + user_id = UUID(str(user.id)) + org_id = UUID(organization_id) + except HTTPException: + raise + except Exception: + raise HTTPException(status_code=400, detail="Invalid organization_id") + + policy_error = await auth_service.check_organization_access( + user_id, org_id, session_identities + ) + + if policy_error and policy_error.get("error") in { + "AUTH_UPGRADE_REQUIRED", + "AUTH_SSO_DENIED", + "AUTH_DOMAIN_DENIED", + }: + detail = { + "error": policy_error.get("error"), + "message": policy_error.get("message"), + "required_methods": policy_error.get("required_methods", []), + "session_identities": session_identities, + "user_identities": user_identities, + "sso_providers": policy_error.get("sso_providers", []), + "current_domain": policy_error.get("current_domain"), + "allowed_domains": policy_error.get("allowed_domains", []), + } + raise HTTPException(status_code=403, detail=detail) + + return {"ok": True} + + +@auth_router.post("/session/identities") +async def update_session_identities(request: Request, payload: SessionIdentitiesUpdate): + try: + session = await get_session(request) # type: ignore + except Exception: + raise HTTPException(status_code=401, detail="Unauthorized") + + access_payload = session.get_access_token_payload() if session else {} + current = access_payload.get("session_identities") or [] + merged = list(dict.fromkeys(current + payload.session_identities)) + log.debug( + "[AUTH-IDENTITY] session_identities update", + { + "user_id": session.get_user_id() if session else None, + "current": current, + "incoming": payload.session_identities, + "merged": merged, + }, + ) + + if hasattr(session, "update_access_token_payload"): + access_payload["session_identities"] = merged + await session.update_access_token_payload(access_payload) + elif hasattr(session, "merge_into_access_token_payload"): + await session.merge_into_access_token_payload({"session_identities": merged}) + else: + raise HTTPException( + status_code=500, detail="Session payload update not supported" + ) + return {"session_identities": merged, "previous": current} + + +@auth_router.get("/authorize/oidc") +async def oidc_authorize(request: Request, provider_id: str, redirect: str = "/"): + """ + Initiate OIDC/SSO authorization flow using SuperTokens third-party recipe (EE only). + + Query params: + - provider_id: UUID of the organization_providers entry + - redirect: Where to redirect after successful authentication (stored in state) + + This endpoint redirects to SuperTokens third-party signinup with: + - third_party_id: "sso:{organization_slug}:{provider_slug}" + - redirect_uri: Frontend URL after authentication + + SuperTokens will handle: + 1. Building OIDC authorization URL (via our get_dynamic_oidc_provider) + 2. Redirecting user to IdP + 3. Handling callback at /auth/callback/sso:{organization_slug}:{provider_slug} + 4. Creating session with user_identities (via our overrides) + 5. Redirecting to frontend + """ + if not is_ee(): + raise HTTPException( + status_code=404, + detail="SSO/OIDC is only available in Enterprise Edition", + ) + + try: + # Get provider to build third_party_id + from uuid import UUID + from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO + import httpx + + from oss.src.utils.env import env + from oss.src.utils.helpers import parse_url + + providers_dao = OrganizationProvidersDAO() + provider = await providers_dao.get_by_id_any(str(provider_id)) + + if not provider or not (provider.flags and provider.flags.get("is_active")): + raise HTTPException( + status_code=404, detail="Provider not found or disabled" + ) + + from oss.src.services import db_manager + + organization = await db_manager.get_organization_by_id( + str(provider.organization_id) + ) + if not organization or not organization.slug: + raise HTTPException( + status_code=400, + detail="Organization slug is required for SSO providers", + ) + + # Build third_party_id for SuperTokens + # Format: "sso:{organization_slug}:{provider_slug}" + third_party_id = f"sso:{organization.slug}:{provider.slug}" + + callback_url = ( + f"{env.agenta.web_url.rstrip('/')}/auth/callback/{third_party_id}" + ) + print(f"[OIDC-AUTH] Expected redirect URI: {callback_url}") + api_url = parse_url(env.agenta.api_url) + request_base_url = str(request.base_url).rstrip("/") + + authorisation_urls = [ + f"{request_base_url}/auth/authorisationurl", + f"{api_url}/auth/authorisationurl", + ] + + print( + "[OIDC-AUTH] Request context: " + f"request_url={request.url} base_url={request_base_url} api_url={api_url} " + f"candidates={authorisation_urls}" + ) + + response = None + async with httpx.AsyncClient(timeout=10.0) as client: + for candidate in authorisation_urls: + print( + f"[OIDC-AUTH] Resolving auth URL. third_party_id={third_party_id} " + f"authorisation_url={candidate} callback_url={callback_url}" + ) + try: + response = await client.get( + candidate, + params={ + "thirdPartyId": third_party_id, + "redirectURIOnProviderDashboard": callback_url, + }, + ) + except Exception as exc: + print(f"[OIDC-AUTH] Request failed for {candidate}: {exc}") + continue + content_type = response.headers.get("content-type", "") + print( + f"[OIDC-AUTH] SuperTokens response status={response.status_code} " + f"content_type={content_type} body={response.text}" + ) + if response.status_code == 200 and "application/json" in content_type: + break + + if not response or response.status_code != 200: + raise HTTPException( + status_code=502, + detail="Failed to fetch authorization URL from auth provider.", + ) + + data = response.json() + redirect_url = data.get("urlWithQueryParams") or data.get("url") + if not redirect_url: + raise HTTPException( + status_code=502, + detail="Auth provider response missing authorization URL.", + ) + + return RedirectResponse(url=redirect_url, status_code=302) + + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@auth_router.get("/sso/callback/{organization_slug}/{provider_slug}") +async def sso_callback_redirect( + organization_slug: str, provider_slug: str, request: Request +): + """ + Custom SSO callback endpoint that redirects to SuperTokens. + + This endpoint: + 1. Accepts clean URL path: /auth/sso/callback/{organization_slug}/{provider_slug} + 2. Validates the organization and provider exist + 3. Builds SuperTokens thirdPartyId: sso:{organization_slug}:{provider_slug} + 4. Redirects to SuperTokens callback: /auth/callback/{thirdPartyId} + + SuperTokens then handles: + 1. Exchange code for tokens (using our dynamic provider config) + 2. Get user info + 3. Call our sign_in_up override (creates user_identity, adds user_identities to session) + 4. Redirect to frontend with session cookie + """ + if not is_ee(): + raise HTTPException( + status_code=404, + detail="SSO/OIDC is only available in Enterprise Edition", + ) + + try: + from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO + from oss.src.services import db_manager + + # Validate organization exists + organization = await db_manager.get_organization_by_slug(organization_slug) + if not organization: + raise HTTPException( + status_code=404, + detail=f"Organization '{organization_slug}' not found", + ) + + # Validate provider exists and is active + providers_dao = OrganizationProvidersDAO() + provider = await providers_dao.get_by_slug(provider_slug, str(organization.id)) + + if not provider: + raise HTTPException( + status_code=404, + detail=f"SSO provider '{provider_slug}' not found for organization '{organization_slug}'", + ) + + if not (provider.flags and provider.flags.get("is_active")): + raise HTTPException( + status_code=400, + detail=f"SSO provider '{provider_slug}' is not active", + ) + + # Build thirdPartyId and redirect to SuperTokens callback + third_party_id = f"sso:{organization.slug}:{provider.slug}" + + # Get the original query parameters from the IdP callback (code, state, etc.) + # SuperTokens expects them at /auth/callback/{thirdPartyId}?code=...&state=... + query_params = request.query_params + + # Build SuperTokens callback URL with query params + supertokens_callback_url = f"/auth/callback/{third_party_id}" + if query_params: + query_string = "&".join(f"{k}={v}" for k, v in query_params.items()) + supertokens_callback_url = f"{supertokens_callback_url}?{query_string}" + + return RedirectResponse(url=supertokens_callback_url, status_code=302) + + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +# Note: Final SSO callback is handled by SuperTokens at /auth/callback/{thirdPartyId} +# After our custom endpoint redirects to it with the code and state parameters diff --git a/api/oss/src/apis/fastapi/shared/utils.py b/api/oss/src/apis/fastapi/shared/utils.py index b7e2c4300b..b231e2d440 100644 --- a/api/oss/src/apis/fastapi/shared/utils.py +++ b/api/oss/src/apis/fastapi/shared/utils.py @@ -44,6 +44,7 @@ def compute_next_windowing( entities: List[Any], attribute: str, windowing: Optional[Windowing], + order: str = "ascending", ) -> Optional[Windowing]: if not windowing or not windowing.limit or not entities: return None @@ -68,20 +69,46 @@ def compute_next_windowing( order_attribute_name = attribute.lower() - if not time_attribute_value or not id_attribute_value: + if not id_attribute_value: return None - if order_attribute_name in id_attributes: - next_value = id_attribute_value - elif order_attribute_name in time_attributes: - next_value = time_attribute_value - else: - return None + # Determine effective order (windowing.order overrides default) + effective_order = (windowing.order or order).lower() - return Windowing( - newest=windowing.newest, - oldest=windowing.oldest, - next=next_value, - limit=windowing.limit, - order=windowing.order, - ) + # For ID-based ordering (UUID7), just use the ID as cursor + if order_attribute_name in id_attributes: + return Windowing( + newest=windowing.newest, + oldest=windowing.oldest, + next=id_attribute_value, + limit=windowing.limit, + order=windowing.order, + ) + + # For time-based ordering (UUID5/content-hashed IDs), we need both: + # - next: the ID for tie-breaking when timestamps are equal + # - oldest/newest: the timestamp boundary for the cursor + if order_attribute_name in time_attributes: + if not time_attribute_value: + return None + + if effective_order == "ascending": + # Ascending: set oldest to last record's timestamp + return Windowing( + newest=windowing.newest, + oldest=time_attribute_value, + next=id_attribute_value, + limit=windowing.limit, + order=windowing.order, + ) + else: + # Descending: set newest to last record's timestamp + return Windowing( + newest=time_attribute_value, + oldest=windowing.oldest, + next=id_attribute_value, + limit=windowing.limit, + order=windowing.order, + ) + + return None diff --git a/api/oss/src/apis/fastapi/testcases/router.py b/api/oss/src/apis/fastapi/testcases/router.py index f56a5cadb4..67fd20159c 100644 --- a/api/oss/src/apis/fastapi/testcases/router.py +++ b/api/oss/src/apis/fastapi/testcases/router.py @@ -150,7 +150,7 @@ async def query_testcases( # Revision not found or has no testcases return TestcasesResponse() - testcases = await self.testcases_service.fetch_testcases( + testcases = await self.testcases_service.query_testcases( project_id=UUID(request.state.project_id), # testcase_ids=testcase_ids, @@ -162,8 +162,9 @@ async def query_testcases( next_windowing = compute_next_windowing( entities=testcases, - attribute="id", # UUID7 - use id for cursor-based pagination + attribute="created_at", # Testcase IDs are content-hashed (UUID5), use timestamp windowing=testcases_query_request.windowing, + order="ascending", # Must match order used in BlobsDAO.query_blobs ) testcase_response = TestcasesResponse( diff --git a/api/oss/src/apis/fastapi/testsets/models.py b/api/oss/src/apis/fastapi/testsets/models.py index 515eab1f14..3f7273236e 100644 --- a/api/oss/src/apis/fastapi/testsets/models.py +++ b/api/oss/src/apis/fastapi/testsets/models.py @@ -23,7 +23,6 @@ TestsetRevisionCreate, TestsetRevisionEdit, TestsetRevisionCommit, - TestsetRevisionPatch, # SimpleTestset, SimpleTestsetCreate, @@ -61,6 +60,7 @@ class TestsetResponse(BaseModel): class TestsetsResponse(BaseModel): count: int = 0 testsets: List[Testset] = [] + windowing: Optional[Windowing] = None # TESTSET VARIANTS ------------------------------------------------------------- @@ -126,10 +126,6 @@ class TestsetRevisionCommitRequest(BaseModel): include_testcases: Optional[bool] = None -class TestsetRevisionPatchRequest(BaseModel): - testset_revision_patch: TestsetRevisionPatch - - class TestsetRevisionRetrieveRequest(BaseModel): testset_ref: Optional[Reference] = None testset_variant_ref: Optional[Reference] = None diff --git a/api/oss/src/apis/fastapi/testsets/router.py b/api/oss/src/apis/fastapi/testsets/router.py index e0e3bf8f2d..ca84ac23b1 100644 --- a/api/oss/src/apis/fastapi/testsets/router.py +++ b/api/oss/src/apis/fastapi/testsets/router.py @@ -25,6 +25,8 @@ from oss.src.utils.exceptions import intercept_exceptions, suppress_exceptions from oss.src.utils.caching import get_cache, set_cache, invalidate_cache +from oss.src.apis.fastapi.shared.utils import compute_next_windowing + from oss.src.core.shared.dtos import ( Reference, ) @@ -79,7 +81,6 @@ TestsetRevisionRetrieveRequest, TestsetRevisionCommitRequest, TestsetRevisionsLogRequest, - TestsetRevisionPatchRequest, TestsetRevisionResponse, TestsetRevisionsResponse, # @@ -128,6 +129,143 @@ } +def _to_plain_dict(value: Any) -> Dict[str, Any]: + """Convert a value to a plain Python dict, handling Pydantic models.""" + if value is None: + return {} + if hasattr(value, "model_dump"): + return value.model_dump() + if hasattr(value, "dict"): + return value.dict() + if isinstance(value, dict): + return dict(value) # Make a copy to be safe + return {} + + +def _serialize_value(value: Any) -> Any: + """Serialize a value to a JSON-safe type. + + Handles Pydantic models, dicts, lists, and primitives. + Returns the serialized value (not a JSON string). + """ + if value is None: + return None + if isinstance(value, (str, int, float, bool)): + return value + if hasattr(value, "model_dump"): + return value.model_dump() + if hasattr(value, "dict"): + return value.dict() + if isinstance(value, dict): + return {k: _serialize_value(v) for k, v in value.items()} + if isinstance(value, (list, tuple)): + return [_serialize_value(v) for v in value] + # Fallback: convert to string + return str(value) + + +def _serialize_value_for_csv(value: Any) -> Any: + """Serialize complex values to JSON strings for CSV export. + + Polars cannot serialize dicts, lists, or other complex objects to CSV, + so we convert them to JSON strings. This includes Pydantic models. + """ + if value is None: + return "" + # Handle primitive types directly + if isinstance(value, (str, int, float, bool)): + return value + # Handle Pydantic models by converting to dict first + if hasattr(value, "model_dump"): + return orjson.dumps(value.model_dump()).decode("utf-8") + if hasattr(value, "dict"): + return orjson.dumps(value.dict()).decode("utf-8") + # Handle dicts and lists + if isinstance(value, (dict, list)): + return orjson.dumps(value).decode("utf-8") + # Fallback: convert to string + return str(value) + + +def _prepare_testcases_for_csv( + testcases_data: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + """Prepare testcases data for CSV export by serializing complex values.""" + return [ + {key: _serialize_value_for_csv(val) for key, val in row.items()} + for row in testcases_data + ] + + +def _drop_empty_export_columns(testcases_data: List[Dict[str, Any]]) -> None: + """Drop metadata columns that are None for every row (CSV export only).""" + if not testcases_data: + return + for column in ("__flags__", "__tags__", "__meta__"): + if all(row.get(column) is None for row in testcases_data): + for row in testcases_data: + row.pop(column, None) + + +def _normalize_testcase_dedup_ids(testcases_data: List[Dict[str, Any]]) -> None: + """Normalize legacy dedup keys to the canonical testcase_dedup_id field.""" + for testcase_data in testcases_data: + if not isinstance(testcase_data, dict): + continue + legacy_dedup_id = testcase_data.pop("__dedup_id__", None) + existing_dedup_id = testcase_data.get("testcase_dedup_id") + if legacy_dedup_id not in (None, "") and existing_dedup_id in (None, ""): + testcase_data["testcase_dedup_id"] = legacy_dedup_id + + +def _normalize_testcase_dedup_ids_in_request( + testcases: Optional[List[Testcase]], +) -> None: + """Normalize CSV-style dedup keys in JSON body requests.""" + for testcase in testcases or []: + testcase_data = testcase.data + if not isinstance(testcase_data, dict): + continue + legacy_dedup_id = testcase_data.pop("__dedup_id__", None) + existing_dedup_id = testcase_data.get("testcase_dedup_id") + if legacy_dedup_id not in (None, "") and existing_dedup_id in (None, ""): + testcase_data["testcase_dedup_id"] = legacy_dedup_id + + +def _build_testcase_export_row(testcase: Any) -> Dict[str, Any]: + """Build a dict for exporting a testcase, properly handling Pydantic models. + + Extracts and serializes all testcase fields into a flat dict suitable for export. + """ + # Extract the data field - handle both Pydantic models and plain dicts + data_dict = _to_plain_dict(testcase.data) + + # Serialize all values in the data dict to ensure they're JSON-safe + serialized_data = {key: _serialize_value(val) for key, val in data_dict.items()} + if "__dedup_id__" not in serialized_data and "testcase_dedup_id" in serialized_data: + serialized_data["__dedup_id__"] = serialized_data["testcase_dedup_id"] + if "__dedup_id__" in serialized_data and "testcase_dedup_id" in serialized_data: + serialized_data.pop("testcase_dedup_id", None) + + export_row = { + **serialized_data, + "__id__": str(testcase.id) if testcase.id else None, + } + + flags = _serialize_value(testcase.flags) + tags = _serialize_value(testcase.tags) + meta = _serialize_value(testcase.meta) + + if flags is not None: + export_row["__flags__"] = flags + if tags is not None: + export_row["__tags__"] = tags + if meta is not None: + export_row["__meta__"] = meta + + return export_row + + class TestsetsRouter: TESTCASES_FLAGS = TestsetFlags( has_testcases=True, @@ -331,6 +469,15 @@ def __init__(self, *, testsets_service: TestsetsService): response_model_exclude=TESTSET_REVISION_RESPONSE_EXCLUDE, ) + # POST /api/preview/testsets/revisions/{testset_revision_id}/download + self.router.add_api_route( + "/revisions/{testset_revision_id}/download", + self.fetch_testset_revision_to_file, + methods=["POST"], + operation_id="fetch_testset_revision_to_file", + status_code=status.HTTP_200_OK, + ) + self.router.add_api_route( "/revisions/query", self.query_testset_revisions, @@ -353,16 +500,6 @@ def __init__(self, *, testsets_service: TestsetsService): response_model_exclude=TESTSET_REVISION_RESPONSE_EXCLUDE, ) - self.router.add_api_route( - "/revisions/patch", - self.patch_testset_revision, - methods=["POST"], - operation_id="patch_testset_revision", - status_code=status.HTTP_200_OK, - response_model=TestsetRevisionResponse, - response_model_exclude_none=True, - ) - self.router.add_api_route( "/revisions/log", self.log_testset_revisions, @@ -550,9 +687,16 @@ async def query_testsets( windowing=testset_query_request.windowing, ) + next_windowing = compute_next_windowing( + entities=testsets, + attribute="id", # UUID7 - use id for cursor-based pagination + windowing=testset_query_request.windowing, + ) + testsets_response = TestsetsResponse( count=len(testsets), testsets=testsets, + windowing=next_windowing, ) return testsets_response @@ -943,6 +1087,92 @@ async def unarchive_testset_revision( return testset_revision_response + @intercept_exceptions() + async def fetch_testset_revision_to_file( + self, + request: Request, + *, + testset_revision_id: UUID, + # + file_type: Optional[Literal["csv", "json"]] = Query( + "csv", + description="File type to download. Supported: 'csv' or 'json'. Default: 'csv'.", + ), + file_name: Optional[str] = Query( + None, + description="Optional custom filename for the download.", + ), + ) -> StreamingResponse: # type: ignore + if is_ee(): + if not await check_action_access( # type: ignore + user_uid=request.state.user_id, + project_id=request.state.project_id, + permission=Permission.VIEW_TESTSETS, # type: ignore + ): + raise FORBIDDEN_EXCEPTION # type: ignore + + if file_type is None or file_type not in ["csv", "json"]: + raise HTTPException( + status_code=400, + detail="Invalid file type. Supported types are 'csv' and 'json'.", + ) + + # Fetch the revision with testcases + testset_revision_response = await self.fetch_testset_revision( + request=request, + testset_revision_id=testset_revision_id, + include_testcases=True, + ) + + if ( + not testset_revision_response.count + or not testset_revision_response.testset_revision + ): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Testset revision not found. Please check the revision_id and try again.", + ) + + revision = testset_revision_response.testset_revision + + filename = ( + file_name or f"revision_{testset_revision_id}" + ) + f".{file_type.lower()}" + testcases = revision.data.testcases if revision.data else [] + + # Build export data using helper that properly handles Pydantic models + testcases_data = [ + _build_testcase_export_row(testcase) for testcase in testcases or [] + ] + + if file_type.lower() == "json": + buffer = BytesIO(orjson.dumps(testcases_data)) + + return StreamingResponse( + buffer, + media_type="application/json", + headers={"Content-Disposition": f"attachment; filename={filename}"}, + ) + + elif file_type.lower() == "csv": + buffer = BytesIO() + _drop_empty_export_columns(testcases_data) + csv_data = _prepare_testcases_for_csv(testcases_data) + pl.DataFrame(csv_data).write_csv(buffer) + buffer.seek(0) + + return StreamingResponse( + buffer, + media_type="text/csv", + headers={"Content-Disposition": f"attachment; filename={filename}"}, + ) + + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Invalid file type. Supported types are 'csv' and 'json'.", + ) + async def query_testset_revisions( self, request: Request, @@ -993,11 +1223,23 @@ async def commit_testset_revision( ): raise FORBIDDEN_EXCEPTION # type: ignore + commit = testset_revision_commit_request.testset_revision_commit + if commit.data and commit.delta: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Provide either data or delta for a commit, not both.", + ) + if not commit.data and not commit.delta: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Provide either data or delta for a commit.", + ) + testset_revision = await self.testsets_service.commit_testset_revision( project_id=UUID(request.state.project_id), user_id=UUID(request.state.user_id), # - testset_revision_commit=testset_revision_commit_request.testset_revision_commit, + testset_revision_commit=commit, include_testcases=testset_revision_commit_request.include_testcases, ) @@ -1008,34 +1250,6 @@ async def commit_testset_revision( return testset_revision_response - async def patch_testset_revision( - self, - request: Request, - *, - testset_revision_patch_request: TestsetRevisionPatchRequest, - ) -> TestsetRevisionResponse: - if is_ee(): - if not await check_action_access( # type: ignore - user_uid=request.state.user_id, - project_id=request.state.project_id, - permission=Permission.EDIT_EVALUATORS, # type: ignore - ): - raise FORBIDDEN_EXCEPTION # type: ignore - - testset_revision = await self.testsets_service.patch_testset_revision( - project_id=UUID(request.state.project_id), - user_id=UUID(request.state.user_id), - # - testset_revision_patch=testset_revision_patch_request.testset_revision_patch, - ) - - testset_revision_response = TestsetRevisionResponse( - count=1 if testset_revision else 0, - testset_revision=testset_revision, - ) - - return testset_revision_response - async def log_testset_revisions( self, request: Request, @@ -1211,6 +1425,10 @@ async def create_simple_testset( ): raise FORBIDDEN_EXCEPTION # type: ignore + _normalize_testcase_dedup_ids_in_request( + simple_testset_create_request.testset.data.testcases + ) + simple_testset = await self.simple_testsets_service.create( project_id=UUID(request.state.project_id), user_id=UUID(request.state.user_id), @@ -1322,6 +1540,10 @@ async def edit_simple_testset( if str(testset_id) != str(simple_testset_edit_request.testset.id): return SimpleTestsetResponse() + _normalize_testcase_dedup_ids_in_request( + simple_testset_edit_request.testset.data.testcases + ) + simple_testset: Optional[ SimpleTestset ] = await self.simple_testsets_service.edit( @@ -1605,6 +1827,7 @@ async def create_simple_testset_from_file( ) try: + _normalize_testcase_dedup_ids(testcases_data) testcases_data = json_array_to_json_object( data=testcases_data, testcase_id_key="__id__", @@ -1737,6 +1960,7 @@ async def edit_simple_testset_from_file( ) try: + _normalize_testcase_dedup_ids(testcases_data) testcases_data = json_array_to_json_object( data=testcases_data, testcase_id_key="__id__", @@ -1852,15 +2076,9 @@ async def fetch_simple_testset_to_file( filename = (file_name or f"testset_{testset_id}") + f".{file_type.lower()}" testcases = testset.data.testcases + # Build export data using helper that properly handles Pydantic models testcases_data = [ - { - **testcase.data, - "__id__": testcase.id, - "__flags__": testcase.flags, - "__tags__": testcase.tags, - "__meta__": testcase.meta, - } - for testcase in testcases or [] + _build_testcase_export_row(testcase) for testcase in testcases or [] ] if file_type.lower() == "json": @@ -1874,7 +2092,10 @@ async def fetch_simple_testset_to_file( elif file_type.lower() == "csv": buffer = BytesIO() - pl.DataFrame(testcases_data).write_csv(buffer) + _drop_empty_export_columns(testcases_data) + csv_data = _prepare_testcases_for_csv(testcases_data) + df = pl.DataFrame(csv_data) + df.write_csv(buffer) buffer.seek(0) return StreamingResponse( diff --git a/api/oss/src/apis/fastapi/tracing/router.py b/api/oss/src/apis/fastapi/tracing/router.py index 2349eef759..45831fdc93 100644 --- a/api/oss/src/apis/fastapi/tracing/router.py +++ b/api/oss/src/apis/fastapi/tracing/router.py @@ -9,6 +9,8 @@ from oss.src.utils.exceptions import intercept_exceptions, suppress_exceptions from oss.src.utils.caching import get_cache, set_cache, invalidate_cache +from oss.src.core.tracing.dtos import ListOperator, ComparisonOperator, Condition + from oss.src.apis.fastapi.tracing.utils import ( merge_queries, parse_query_from_params_request, @@ -225,17 +227,26 @@ async def query_spans( # QUERY merged_query = merge_queries(query, query_from_body) - try: - span_dtos = await self.service.query( + # Optimize: detect simple trace_id queries and use fetch() instead + trace_ids = self._extract_trace_ids_from_query(merged_query) + + if trace_ids is not None: + span_dtos = await self.service.fetch( project_id=UUID(request.state.project_id), - # - query=merged_query, + trace_ids=trace_ids, ) - except FilteringException as e: - raise HTTPException( - status_code=400, - detail=str(e), - ) from e + else: + try: + span_dtos = await self.service.query( + project_id=UUID(request.state.project_id), + # + query=merged_query, + ) + except FilteringException as e: + raise HTTPException( + status_code=400, + detail=str(e), + ) from e spans_or_traces = parse_spans_into_response( span_dtos, @@ -269,6 +280,42 @@ async def query_spans( # QUERY return spans_response + def _extract_trace_ids_from_query( + self, query: TracingQuery + ) -> Optional[List[UUID]]: + """ + Detect if query is a simple trace_id filter and extract trace IDs. + Returns trace_ids if query can be optimized to use fetch(), else None. + """ + if not query.filtering or not query.filtering.conditions: + return None + + if len(query.filtering.conditions) != 1: + return None + + condition = query.filtering.conditions[0] + + if not isinstance(condition, Condition): + return None + + if condition.field != "trace_id": + return None + + if condition.operator not in [ComparisonOperator.IS, ListOperator.IN]: + return None + + # Extract trace IDs from value + try: + if isinstance(condition.value, list): + # IN operator with list of trace_ids + return [UUID(str(tid)) for tid in condition.value] + else: + # IS operator with single trace_id + return [UUID(str(condition.value))] + except (ValueError, TypeError): + # Invalid UUID format + return None + @intercept_exceptions() @suppress_exceptions(default=AnalyticsResponse()) async def fetch_analytics( diff --git a/api/oss/src/core/auth/__init__.py b/api/oss/src/core/auth/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/core/auth/middleware.py b/api/oss/src/core/auth/middleware.py new file mode 100644 index 0000000000..4839219490 --- /dev/null +++ b/api/oss/src/core/auth/middleware.py @@ -0,0 +1,139 @@ +"""Organization policy enforcement middleware (EE).""" + +from typing import Optional, Callable, List +from uuid import UUID +from fastapi import Request, Response, HTTPException +from fastapi.responses import JSONResponse +from starlette.middleware.base import BaseHTTPMiddleware + +from supertokens_python.recipe.session import SessionContainer +from supertokens_python.recipe.session.framework.fastapi import verify_session + +from oss.src.core.auth.types import MethodKind +from oss.src.utils.common import is_ee + +# Note: This middleware requires EE organization tables +# Organization policy enforcement is only available in EE +# TODO: Policy enforcement needs to be reimplemented to read from organizations.flags +# Previously used organization_policies table (now removed) +policies_dao = None + + +def matches_policy(identities: List[str], allowed_methods: List[str]) -> bool: + """ + Check if user's identities satisfy the organization's allowed_methods policy. + + Supports wildcards defined in MethodKind: + - "email:*" matches "email:otp", "email:password" + - "social:*" matches "social:google", "social:github" + - "sso:*" matches any SSO provider + - "sso:acme:*" matches any provider for organization 'acme' + + Args: + identities: List of authentication methods used by user (from session) + allowed_methods: List of MethodKind patterns allowed by organization policy + + Returns: + True if any identity matches any allowed method pattern + + Examples: + identities = ["email:otp", "social:google"] + allowed_methods = ["email:*", "social:*"] + → True + + identities = ["email:otp"] + allowed_methods = ["sso:*"] + → False + + identities = ["sso:acme:okta"] + allowed_methods = ["sso:acme:*"] + → True + """ + for identity in identities: + for allowed in allowed_methods: + if MethodKind.matches_pattern(identity, allowed): + return True + + return False + + +async def check_organization_policy( + session: SessionContainer, + organization_id: UUID, +) -> Optional[dict]: + """ + Check if user's session satisfies organization policy. + + Returns: + None if policy satisfied + Dict with error details if upgrade required + """ + # Get session identities + payload = session.get_access_token_payload() + identities = payload.get("session_identities") or [] + + # Get user ID + user_id = UUID(session.get_user_id()) + + # Use AuthService for policy enforcement + from oss.src.core.auth.service import AuthService + + auth_service = AuthService() + return await auth_service.check_organization_access( + user_id, organization_id, identities + ) + + +class OrganizationPolicyMiddleware(BaseHTTPMiddleware): + """ + Middleware to enforce organization authentication policies (EE). + + Applies to routes that specify an organization_id (via query param or path). + Only active when EE features are enabled. + """ + + async def dispatch(self, request: Request, call_next: Callable) -> Response: + # Skip if EE not enabled + if not is_ee(): + return await call_next(request) + + # Skip auth routes + if request.url.path.startswith("/auth"): + return await call_next(request) + + # Skip non-org routes + # Check if organization_id is in query params + organization_id_str = request.query_params.get("organization_id") + + if not organization_id_str: + # No organization context, skip policy check + return await call_next(request) + + try: + organization_id = UUID(organization_id_str) + except ValueError: + return JSONResponse( + status_code=400, + content={"error": "Invalid organization_id"}, + ) + + # Verify session + try: + session = await verify_session(request) + except Exception: + return JSONResponse( + status_code=401, + content={"error": "Unauthorized", "message": "No valid session"}, + ) + + # Check organization policy + policy_error = await check_organization_policy(session, organization_id) + + if policy_error: + return JSONResponse( + status_code=403, + content=policy_error, + ) + + # Policy satisfied, continue + return await call_next(request) diff --git a/api/oss/src/core/auth/oidc.py b/api/oss/src/core/auth/oidc.py new file mode 100644 index 0000000000..de44dfd78b --- /dev/null +++ b/api/oss/src/core/auth/oidc.py @@ -0,0 +1,94 @@ +"""OIDC helper utilities for authorization and token exchange.""" + +import secrets +import httpx +from typing import Dict, Any, Optional +from urllib.parse import urlencode + + +class OIDCState: + """Manages OIDC state for CSRF protection.""" + + def __init__(self, provider_id: str, redirect_uri: str): + self.state_id = secrets.token_urlsafe(32) + self.nonce = secrets.token_urlsafe(32) + self.provider_id = provider_id + self.redirect_uri = redirect_uri + + def to_dict(self) -> Dict[str, str]: + return { + "state_id": self.state_id, + "nonce": self.nonce, + "provider_id": self.provider_id, + "redirect_uri": self.redirect_uri, + } + + +class OIDCClient: + """OIDC client for building authorization URLs and exchanging tokens.""" + + def __init__(self, config: Dict[str, Any], callback_url: str): + self.issuer = config["issuer"] + self.client_id = config["client_id"] + self.client_secret = config["client_secret"] + self.scopes = config.get("scopes", ["openid", "profile", "email"]) + self.callback_url = callback_url + + # Endpoints can be explicit or discovered + self.authorization_endpoint = config.get("authorization_endpoint") + self.token_endpoint = config.get("token_endpoint") + self.userinfo_endpoint = config.get("userinfo_endpoint") + + async def discover_endpoints(self): + """Discover OIDC endpoints from .well-known/openid-configuration.""" + if not self.authorization_endpoint or not self.token_endpoint: + discovery_url = f"{self.issuer}/.well-known/openid-configuration" + async with httpx.AsyncClient() as client: + response = await client.get(discovery_url) + response.raise_for_status() + config = response.json() + + self.authorization_endpoint = config["authorization_endpoint"] + self.token_endpoint = config["token_endpoint"] + self.userinfo_endpoint = config.get("userinfo_endpoint") + + def build_authorization_url(self, state: OIDCState) -> str: + """Build the OIDC authorization URL.""" + params = { + "client_id": self.client_id, + "redirect_uri": self.callback_url, + "response_type": "code", + "scope": " ".join(self.scopes), + "state": state.state_id, + "nonce": state.nonce, + } + return f"{self.authorization_endpoint}?{urlencode(params)}" + + async def exchange_code_for_tokens(self, code: str) -> Dict[str, Any]: + """Exchange authorization code for tokens.""" + data = { + "grant_type": "authorization_code", + "code": code, + "client_id": self.client_id, + "client_secret": self.client_secret, + "redirect_uri": self.callback_url, + } + + async with httpx.AsyncClient() as client: + response = await client.post( + self.token_endpoint, + data=data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + response.raise_for_status() + return response.json() + + async def get_userinfo(self, access_token: str) -> Dict[str, Any]: + """Fetch user info from the userinfo endpoint.""" + async with httpx.AsyncClient() as client: + response = await client.get( + self.userinfo_endpoint, + headers={"Authorization": f"Bearer {access_token}"}, + ) + response.raise_for_status() + return response.json() diff --git a/api/oss/src/core/auth/service.py b/api/oss/src/core/auth/service.py new file mode 100644 index 0000000000..689da967ea --- /dev/null +++ b/api/oss/src/core/auth/service.py @@ -0,0 +1,887 @@ +"""Authentication and authorization service. + +This service provides three main capabilities: +1. Discovery: Determine available authentication methods for a user +2. Authentication: Support authentication flows (via SuperTokens + helpers) +3. Authorization: Validate user access based on organization policies +""" + +from typing import Optional, Dict, List, Any +from uuid import UUID + +from oss.src.utils.common import is_ee +from oss.src.dbs.postgres.users.dao import IdentitiesDAO +from oss.src.services import db_manager +from oss.src.utils.env import env +from oss.src.utils.logging import get_module_logger +from oss.src.models.db_models import InvitationDB, ProjectDB +from oss.src.dbs.postgres.shared.engine import engine +from sqlalchemy import select + +# Organization DAOs and models (EE only) +if is_ee(): + from ee.src.dbs.postgres.organizations.dao import ( + OrganizationDomainsDAO, + OrganizationProvidersDAO, + ) + from oss.src.models.db_models import OrganizationDB + from ee.src.models.db_models import OrganizationMemberDB + +log = get_module_logger(__name__) + + +class AuthService: + """ + Centralized authentication and authorization service. + + Note: Actual authentication flows are handled by SuperTokens recipes. + This service provides supporting logic for discovery, validation, and policy enforcement. + """ + + def __init__(self): + self.identities_dao = IdentitiesDAO() + + # Initialize EE DAOs if available + if is_ee(): + self.domains_dao = OrganizationDomainsDAO() + self.providers_dao = OrganizationProvidersDAO() + else: + self.domains_dao = None + self.providers_dao = None + + # ============================================================================ + # DISCOVERY: Determine available authentication methods + # ============================================================================ + + async def discover(self, email: str) -> Dict[str, Any]: + """ + Discover authentication methods available for a given email. + + This is the pre-authentication discovery endpoint that helps the frontend + determine which auth flows to present to the user. + + Logic: + 1. Check user's organization memberships and pending invitations + 2. Check for organizations with verified domains matching user's email + 3. For each relevant organization: + - If org has verified domain + active SSO: enforce SSO-only + - Otherwise: aggregate allowed methods from org policy flags + 4. SSO providers are shown if user has access to orgs with active SSO + + SSO Enforcement Rules: + - SSO can ONLY be the sole auth method if org has BOTH: + a) Verified domain matching user's email domain + b) Active SSO provider configured + - When SSO is enforced, email and social auth are not available + + Auto-join and Domain Restrictions (enforced at login, not discovery): + - auto_join: User is auto-added to org on login if domain matches + - domains_only: Only users with matching domain can access org + + Response format: + { + "exists": bool, # Whether user account exists + "methods": { + "email:password": true, # Only present if available + "email:otp": true, # Only present if available + "social:google": true, # Only present if available + "social:github": true, # Only present if available + "sso": [ # Only present if SSO available + {"slug": "okta", "name": "ACME SSO"} + ] + } + } + + Note: Only methods that are available (true) are included in the response. + Missing methods should be assumed false on the client side. + """ + print(f"[DISCOVERY] Starting discovery for email: {email}") + + # Extract domain from email (if provided) + domain = email.split("@")[1] if email and "@" in email else None + print(f"[DISCOVERY] Extracted domain: {domain}") + + # Check if user exists only when email looks valid + user = None + user_exists = False + user_id = None + if email and "@" in email: + user = await db_manager.get_user_with_email(email) + user_exists = user is not None + user_id = UUID(str(user.id)) if user else None + print(f"[DISCOVERY] User exists: {user_exists}, user_id: {user_id}") + + # Get relevant organization IDs (EE only) + # Include: memberships, pending invitations, and domain-based access + org_ids: List[UUID] = [] + domain_org_ids: List[ + UUID + ] = [] # Orgs with verified domain matching user's email + + print(f"[DISCOVERY] Is EE: {is_ee()}") + + if is_ee(): + # 1. User's existing memberships + if user_exists and user_id: + try: + orgs = await db_manager.get_user_organizations(str(user_id)) + org_ids = [org.id for org in orgs] + print(f"[DISCOVERY] User organizations: {org_ids}") + except Exception as e: + print(f"[DISCOVERY] Error fetching user organizations: {e}") + org_ids = [] + + # 2. Organizations with pending project invitations + if email: + try: + async with engine.core_session() as session: + # Query project_invitations for this email, join with projects to get organization_id + stmt = ( + select(ProjectDB.organization_id) + .join(InvitationDB, InvitationDB.project_id == ProjectDB.id) + .where(InvitationDB.email == email) + .where(~InvitationDB.used) + .distinct() + ) + result = await session.execute(stmt) + invitation_org_ids = [row[0] for row in result.fetchall()] + + print( + f"[DISCOVERY] Pending invitation orgs: {invitation_org_ids}" + ) + + # Add to org_ids if not already present + for invitation_org_id in invitation_org_ids: + if invitation_org_id not in org_ids: + org_ids.append(invitation_org_id) + except Exception as e: + print(f"[DISCOVERY] Error fetching pending invitations: {e}") + + # 3. Organizations with verified domain matching user's email + if domain and self.domains_dao: + domain_dto = await self.domains_dao.get_verified_by_slug(domain) + print(f"[DISCOVERY] Domain lookup for {domain}: {domain_dto}") + if domain_dto: + domain_org_ids.append(domain_dto.organization_id) + print(f"[DISCOVERY] Domain org: {domain_dto.organization_id}") + # Include in org_ids for policy aggregation + if domain_dto.organization_id not in org_ids: + org_ids.append(domain_dto.organization_id) + + print(f"[DISCOVERY] Final org_ids: {org_ids}") + print(f"[DISCOVERY] Domain org_ids: {domain_org_ids}") + + # Aggregate allowed methods across all organizations (EE only) + all_allowed_methods: set[str] = set() + has_sso_enforcement = False # Track if any org has SSO + verified domain + + print( + f"[DISCOVERY] Starting policy aggregation. EE={is_ee()}, org_ids={len(org_ids) if org_ids else 0}" + ) + + if is_ee() and org_ids: + # Check policy flags for each organization + for org_id in org_ids: + print(f"[DISCOVERY] Checking org {org_id}") + org_flags = await self._get_organization_flags(org_id) + print(f"[DISCOVERY] Org {org_id} flags: {org_flags}") + + if org_flags: + # Check if this org has verified domain (enables SSO enforcement) + has_verified_domain = org_id in domain_org_ids + print( + f"[DISCOVERY] Org {org_id} has verified domain: {has_verified_domain}" + ) + + # Check if this org has active SSO providers + has_active_sso = False + if self.providers_dao: + providers = await self.providers_dao.list_by_organization( + str(org_id) + ) + print( + f"[DISCOVERY] Org {org_id} SSO providers: {[(p.slug, p.flags) for p in providers]}" + ) + has_active_sso = any( + p.flags and p.flags.get("is_active", False) + for p in providers + ) + print( + f"[DISCOVERY] Org {org_id} has active SSO: {has_active_sso}" + ) + + # SSO enforcement: only SSO allowed if org has both verified domain + active SSO + if has_verified_domain and has_active_sso: + print(f"[DISCOVERY] Org {org_id} enforcing SSO-only") + has_sso_enforcement = True + all_allowed_methods.add("sso:*") + # Skip adding email/social methods for this org + continue + + # Otherwise, check normal policy flags + # Default to True if not explicitly set + if org_flags.get("allow_email", env.auth.email_enabled): + print(f"[DISCOVERY] Org {org_id} allows email") + all_allowed_methods.add("email:*") + if org_flags.get("allow_social", env.auth.oidc_enabled): + print(f"[DISCOVERY] Org {org_id} allows social") + all_allowed_methods.add("social:*") + if org_flags.get("allow_sso", False): + print(f"[DISCOVERY] Org {org_id} allows SSO") + all_allowed_methods.add("sso:*") + + print(f"[DISCOVERY] Aggregated methods: {all_allowed_methods}") + print(f"[DISCOVERY] SSO enforcement: {has_sso_enforcement}") + + # If user has no organizations, show globally configured auth methods + if not all_allowed_methods: + # Check what's actually enabled in the SuperTokens configuration + if env.auth.email_method == "password": + all_allowed_methods.add("email:password") + elif env.auth.email_method == "otp": + all_allowed_methods.add("email:otp") + + if env.auth.google_enabled: + all_allowed_methods.add("social:google") + + if env.auth.google_workspaces_enabled: + all_allowed_methods.add("social:google-workspaces") + + if env.auth.github_enabled: + all_allowed_methods.add("social:github") + + if env.auth.facebook_enabled: + all_allowed_methods.add("social:facebook") + + if env.auth.apple_enabled: + all_allowed_methods.add("social:apple") + + if env.auth.discord_enabled: + all_allowed_methods.add("social:discord") + + if env.auth.twitter_enabled: + all_allowed_methods.add("social:twitter") + + if env.auth.gitlab_enabled: + all_allowed_methods.add("social:gitlab") + + if env.auth.bitbucket_enabled: + all_allowed_methods.add("social:bitbucket") + + if env.auth.linkedin_enabled: + all_allowed_methods.add("social:linkedin") + + if env.auth.okta_enabled: + all_allowed_methods.add("social:okta") + + if env.auth.azure_ad_enabled: + all_allowed_methods.add("social:azure-ad") + + if env.auth.boxy_saml_enabled: + all_allowed_methods.add("social:boxy-saml") + + # Get SSO providers (EE only) + # Show SSO providers from user's organizations (if user exists and is a member) + sso_providers = [] + print( + f"[DISCOVERY] Collecting SSO providers. EE={is_ee()}, has_providers_dao={self.providers_dao is not None}, org_ids={org_ids}" + ) + + if is_ee() and self.providers_dao and org_ids: + provider_map = {} # Use dict to deduplicate by slug + + # Get SSO providers from all user's organizations + for org_id in org_ids: + organization = await db_manager.get_organization_by_id(str(org_id)) + if not organization or not organization.slug: + print( + f"[DISCOVERY] Org {org_id} missing slug; skipping SSO providers" + ) + continue + + providers = await self.providers_dao.list_by_organization(str(org_id)) + print( + f"[DISCOVERY] Org {org_id} SSO providers (raw): {[(p.slug, p.name, p.flags) for p in providers]}" + ) + for p in providers: + is_active = p.flags and p.flags.get("is_active", False) + print(f"[DISCOVERY] Provider {p.slug}: is_active={is_active}") + if is_active: + provider_map[p.slug] = { + "id": str(p.id), + "slug": p.slug, + "third_party_id": f"sso:{organization.slug}:{p.slug}", + } + print(f"[DISCOVERY] Added provider {p.slug} to map") + + sso_providers = list(provider_map.values()) + print(f"[DISCOVERY] Final SSO providers: {sso_providers}") + + # Build methods dict - only include methods that are true + methods = {} + + print( + f"[DISCOVERY] Building response. has_sso_enforcement={has_sso_enforcement}" + ) + + # If SSO enforcement is active, ONLY return SSO methods + if has_sso_enforcement: + print("[DISCOVERY] SSO enforcement active, returning SSO-only") + # SSO enforcement: only SSO providers, no email or social + if sso_providers: + methods["sso"] = {"providers": sso_providers} + response = { + "exists": user_exists, + "methods": methods, + } + print(f"[DISCOVERY] Final response (SSO enforcement): {response}") + return response + + # Otherwise, include all allowed methods based on policy + # Email methods - check both specific method and wildcard + # But respect the configured email_method (only one can be active) + if "email:*" in all_allowed_methods: + # Organization allows email, use the globally configured method + if env.auth.email_method == "password": + methods["email:password"] = True + elif env.auth.email_method == "otp": + methods["email:otp"] = True + else: + # Use specific methods from all_allowed_methods + if "email:password" in all_allowed_methods: + methods["email:password"] = True + if "email:otp" in all_allowed_methods: + methods["email:otp"] = True + + # Social methods - respect environment configuration + has_social_wildcard = "social:*" in all_allowed_methods + + if "social:google" in all_allowed_methods or ( + has_social_wildcard and env.auth.google_enabled + ): + methods["social:google"] = True + if "social:google-workspaces" in all_allowed_methods or ( + has_social_wildcard and env.auth.google_workspaces_enabled + ): + methods["social:google-workspaces"] = True + if "social:github" in all_allowed_methods or ( + has_social_wildcard and env.auth.github_enabled + ): + methods["social:github"] = True + if "social:facebook" in all_allowed_methods or ( + has_social_wildcard and env.auth.facebook_enabled + ): + methods["social:facebook"] = True + if "social:apple" in all_allowed_methods or ( + has_social_wildcard and env.auth.apple_enabled + ): + methods["social:apple"] = True + if "social:discord" in all_allowed_methods or ( + has_social_wildcard and env.auth.discord_enabled + ): + methods["social:discord"] = True + if "social:twitter" in all_allowed_methods or ( + has_social_wildcard and env.auth.twitter_enabled + ): + methods["social:twitter"] = True + if "social:gitlab" in all_allowed_methods or ( + has_social_wildcard and env.auth.gitlab_enabled + ): + methods["social:gitlab"] = True + if "social:bitbucket" in all_allowed_methods or ( + has_social_wildcard and env.auth.bitbucket_enabled + ): + methods["social:bitbucket"] = True + if "social:linkedin" in all_allowed_methods or ( + has_social_wildcard and env.auth.linkedin_enabled + ): + methods["social:linkedin"] = True + if "social:okta" in all_allowed_methods or ( + has_social_wildcard and env.auth.okta_enabled + ): + methods["social:okta"] = True + if "social:azure-ad" in all_allowed_methods or ( + has_social_wildcard and env.auth.azure_ad_enabled + ): + methods["social:azure-ad"] = True + if "social:boxy-saml" in all_allowed_methods or ( + has_social_wildcard and env.auth.boxy_saml_enabled + ): + methods["social:boxy-saml"] = True + + # SSO - only include if providers are available + if sso_providers: + methods["sso"] = {"providers": sso_providers} + print(f"[DISCOVERY] Including SSO providers in response: {sso_providers}") + + response = { + "exists": user_exists, + "methods": methods, + } + + print(f"[DISCOVERY] Final response: {response}") + return response + + # ============================================================================ + # AUTHENTICATION: Support authentication flows + # ============================================================================ + # Note: Actual authentication is handled by SuperTokens recipes. + # See supertokens_overrides.py for: + # - Dynamic OIDC provider configuration (get_dynamic_oidc_provider) + # - Post-authentication hooks (sign_in_up override) + # - Session creation with identities (create_new_session override) + + async def get_user_identities(self, user_id: UUID) -> List[str]: + """ + Get all authentication methods (identities) for a user. + + Returns list of method strings like: + - ["email:otp", "social:google", "sso:acme:okta"] + + Used to populate session payload and for policy validation. + """ + identities = await self.identities_dao.list_by_user(user_id) + return [identity.method for identity in identities] + + async def validate_provider_access( + self, provider_id: UUID, email: Optional[str] = None + ) -> bool: + """ + Validate if a user can access a given SSO provider (EE only). + + Checks: + 1. Provider exists and is enabled + 2. If provider has domain restriction, user's email domain matches + + Args: + provider_id: UUID of the organization_providers entry + email: User's email (optional, for domain validation) + + Returns: + True if user can access this provider + """ + if not is_ee() or not self.providers_dao: + return False + + provider = await self.providers_dao.get_by_id(provider_id) + + if not provider or not (provider.flags and provider.flags.get("is_active")): + return False + + # Note: domain_id FK removed - SSO providers can handle multiple domains + # Domain validation is now handled at discovery time, not provider validation time + + return True + + async def enforce_domain_policies(self, email: str, user_id: UUID) -> None: + """ + Enforce domain-based policies after successful authentication: + 1. Auto-join: Automatically add user to organizations with verified domain + auto_join flag + 2. Domains-only validation: Block if user's domain doesn't match org's verified domains + + This should be called during login/callback after user is authenticated. + + Args: + email: User's email address + user_id: Internal user UUID + + Raises: + Exception: If domains-only enforcement blocks access + """ + if not is_ee() or not self.domains_dao: + return + + # Extract domain from email + domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None + if not domain: + return + + # Check for verified domain matching user's email + domain_dto = await self.domains_dao.get_verified_by_slug(domain) + if not domain_dto: + return + + # Get organization and check flags + org_id = domain_dto.organization_id + org_flags = await self._get_organization_flags(org_id) + if not org_flags: + return + + # 1. Auto-join: Add user to organization if auto_join flag is enabled + auto_join = org_flags.get("auto_join", False) + if auto_join: + try: + # Check if user is already a member of this organization + user_orgs = await db_manager.get_user_organizations(str(user_id)) + is_member = any(org.id == org_id for org in user_orgs) + + if not is_member: + from ee.src.services import db_manager_ee + from ee.src.models.db_models import ( + OrganizationMemberDB, + WorkspaceMemberDB, + ProjectMemberDB, + ) + from oss.src.dbs.postgres.shared.engine import engine as db_engine + from sqlalchemy import select + + organization = await db_manager.get_organization_by_id(str(org_id)) + user = await db_manager.get_user_with_id(user_id=str(user_id)) + workspaces = await db_manager_ee.get_organization_workspaces( + str(org_id) + ) + + if not organization or not user or not workspaces: + raise ValueError( + "Auto-join requires organization, user, and at least one workspace" + ) + + async with db_engine.core_session() as session: + existing_org_member = await session.execute( + select(OrganizationMemberDB).filter_by( + user_id=user.id, organization_id=organization.id + ) + ) + if not existing_org_member.scalars().first(): + session.add( + OrganizationMemberDB( + user_id=user.id, + organization_id=organization.id, + role="member", + ) + ) + + for workspace in workspaces: + existing_workspace_member = await session.execute( + select(WorkspaceMemberDB).filter_by( + user_id=user.id, workspace_id=workspace.id + ) + ) + if not existing_workspace_member.scalars().first(): + session.add( + WorkspaceMemberDB( + user_id=user.id, + workspace_id=workspace.id, + role="editor", + ) + ) + + projects = await db_manager.fetch_projects_by_workspace( + str(workspace.id) + ) + if not projects: + continue + + existing_project_members = await session.execute( + select(ProjectMemberDB).filter( + ProjectMemberDB.project_id.in_( + [project.id for project in projects] + ), + ProjectMemberDB.user_id == user.id, + ) + ) + existing_project_ids = { + member.project_id + for member in existing_project_members.scalars().all() + } + + for project in projects: + if project.id in existing_project_ids: + continue + session.add( + ProjectMemberDB( + user_id=user.id, + project_id=project.id, + role="editor", + ) + ) + + await session.commit() + + log.info( + "Auto-join: added user to organization with editor access", + user_id=str(user_id), + organization_id=str(org_id), + ) + except Exception as e: + log.error("Error during auto-join: %s", e) + + # 2. Domains-only enforcement: Check if user has access + # This is enforced at the organization level via check_organization_access() + # when the user tries to access organization resources through the middleware. + # No action needed here during login - enforcement happens at access time. + + # ============================================================================ + # AUTHORIZATION: Validate access based on policies + # ============================================================================ + + async def check_organization_access( + self, user_id: UUID, organization_id: UUID, session_identities: List[str] + ) -> Optional[Dict[str, Any]]: + """ + Check if user's identities satisfy organization policy (EE only). + + This is the core authorization logic used by the middleware. + + Args: + user_id: User's UUID + organization_id: Organization's UUID + session_identities: List of authentication methods verified in session + + Returns: + None if access allowed + Dict with error details if access denied + + Possible error responses: + - NOT_A_MEMBER: User is not a member of the organization + - AUTH_UPGRADE_REQUIRED: User must authenticate with additional method + """ + # If EE not enabled, allow access (no policy enforcement in OSS) + if not is_ee(): + return None + + # Note: We don't check membership here - that's the responsibility of route handlers + # This function only validates authentication method policies + + # Get organization flags + org_flags = await self._get_organization_flags(organization_id) + + if not org_flags: + # No flags means no restrictions (default allow all) + return None + + # Check for root bypass: if user is owner and allow_root is True, bypass policy + is_owner = await self._is_organization_owner(user_id, organization_id) + + if is_owner and org_flags.get("allow_root", False): + # Owner with root access bypasses policy + return None + + # Build allowed methods from flags + # Default to True if not explicitly set + allowed_methods = [] + + allow_email = org_flags.get("allow_email", env.auth.email_enabled) + allow_social = org_flags.get("allow_social", env.auth.oidc_enabled) + allow_sso = org_flags.get("allow_sso", False) + + if allow_email: + allowed_methods.append("email:*") + if allow_social: + allowed_methods.append("social:*") + if allow_sso: + allowed_methods.append("sso:*") + + # If no methods are allowed, deny access + if not allowed_methods: + return { + "error": "AUTH_UPGRADE_REQUIRED", + "message": "No authentication methods are allowed for this organization", + "required_methods": [], + "current_identities": session_identities, + } + + # Check if identities satisfy allowed_methods + matches = self._matches_policy(session_identities, allowed_methods) + + if not matches: + # If the session used SSO but the org doesn't allow it (or provider inactive), + # block and instruct user to re-auth with allowed methods. + sso_identity = next( + ( + identity + for identity in session_identities + if identity.startswith("sso:") + ), + None, + ) + if sso_identity and self.providers_dao: + org_slug = await self._get_organization_slug(organization_id) + provider_slug = ( + sso_identity.split(":")[2] + if len(sso_identity.split(":")) > 2 + else None + ) + providers = await self.providers_dao.list_by_organization( + str(organization_id) + ) + active_provider_slugs = { + p.slug + for p in providers + if p.flags and p.flags.get("is_active", False) + } + sso_matches_org = bool( + org_slug and sso_identity.startswith(f"sso:{org_slug}:") + ) + sso_provider_active = bool( + provider_slug and provider_slug in active_provider_slugs + ) + + if not allow_sso or not sso_matches_org or not sso_provider_active: + required_methods = [] + if allow_email: + required_methods.append("email:*") + if allow_social: + required_methods.append("social:*") + return { + "error": "AUTH_SSO_DENIED", + "message": "SSO is denied for this organization", + "required_methods": required_methods, + "current_identities": session_identities, + } + sso_providers = [] + if "sso:*" in allowed_methods: + sso_providers = await self._get_active_sso_providers(organization_id) + return { + "error": "AUTH_UPGRADE_REQUIRED", + "message": "Additional authentication required", + "required_methods": allowed_methods, + "current_identities": session_identities, + "sso_providers": sso_providers, + } + + # Check domains_only enforcement + domains_only = org_flags.get("domains_only", False) + if domains_only and self.domains_dao: + # Get user's email to check domain + user = await db_manager.get_user(str(user_id)) + if user and user.email: + email_domain = user.email.split("@")[-1].lower() + + # Get verified domains for this organization + org_domains = await self.domains_dao.list_by_organization( + str(organization_id) + ) + verified_domain_slugs = { + d.slug.lower() + for d in org_domains + if d.flags and d.flags.get("is_verified", False) + } + + # If user's domain is not in the verified domains, deny access + if email_domain not in verified_domain_slugs: + return { + "error": "AUTH_DOMAIN_DENIED", + "message": f"Your email domain '{email_domain}' is not allowed for this organization", + "current_domain": email_domain, + "allowed_domains": list(verified_domain_slugs), + } + + return None + + def _matches_policy( + self, identities: List[str], allowed_methods: List[str] + ) -> bool: + """ + Check if user's identities satisfy the allowed_methods policy. + + Supports wildcards: + - "email:*" matches "email:otp", "email:password" + - "social:*" matches "social:google", "social:github" + - "sso:*" matches any SSO provider + - "sso:acme:*" matches any provider for organization 'acme' + + This is the same logic as middleware.matches_policy(). + """ + for identity in identities: + for allowed in allowed_methods: + # Exact match + if identity == allowed: + return True + + # Wildcard match + if allowed.endswith(":*"): + prefix = allowed[:-2] # Remove ":*" + if identity.startswith(f"{prefix}:"): + return True + + return False + + async def _get_active_sso_providers( + self, organization_id: UUID + ) -> List[Dict[str, str]]: + if not is_ee() or not self.providers_dao: + return [] + + organization = await db_manager.get_organization_by_id(str(organization_id)) + if not organization or not organization.slug: + return [] + + providers = await self.providers_dao.list_by_organization(str(organization_id)) + results = [] + for provider in providers: + if provider.flags and provider.flags.get("is_active", False): + results.append( + { + "id": str(provider.id), + "slug": provider.slug, + "third_party_id": f"sso:{organization.slug}:{provider.slug}", + } + ) + return results + + async def _get_organization_flags( + self, organization_id: UUID + ) -> Optional[Dict[str, Any]]: + """ + Get organization flags from organizations table (EE only). + + Returns flags JSONB field or None if organization not found. + """ + if not is_ee(): + return None + + async with db_manager.engine.core_session() as session: + stmt = select(OrganizationDB.flags).where( + OrganizationDB.id == organization_id + ) + result = await session.execute(stmt) + flags = result.scalar() + return flags or {} + + async def _get_organization_slug(self, organization_id: UUID) -> Optional[str]: + if not is_ee(): + return None + + async with db_manager.engine.core_session() as session: + stmt = select(OrganizationDB.slug).where( + OrganizationDB.id == organization_id + ) + result = await session.execute(stmt) + slug = result.scalar() + return slug or None + + async def _is_organization_member( + self, user_id: UUID, organization_id: UUID + ) -> bool: + """ + Check if user is a member of the organization (EE only). + """ + if not is_ee(): + return False + + async with db_manager.engine.core_session() as session: + stmt = select(OrganizationMemberDB).where( + OrganizationMemberDB.user_id == user_id, + OrganizationMemberDB.organization_id == organization_id, + ) + result = await session.execute(stmt) + return result.scalar() is not None + + async def _is_organization_owner( + self, user_id: UUID, organization_id: UUID + ) -> bool: + """ + Check if user is the owner of the organization (EE only). + """ + if not is_ee(): + return False + + async with db_manager.engine.core_session() as session: + stmt = select(OrganizationMemberDB.role).where( + OrganizationMemberDB.user_id == user_id, + OrganizationMemberDB.organization_id == organization_id, + ) + result = await session.execute(stmt) + role = result.scalar() + return role == "owner" diff --git a/api/oss/src/core/auth/state_store.py b/api/oss/src/core/auth/state_store.py new file mode 100644 index 0000000000..8d9569be27 --- /dev/null +++ b/api/oss/src/core/auth/state_store.py @@ -0,0 +1,50 @@ +"""In-memory state store for OIDC flows. TODO: Move to Redis for production.""" + +from typing import Dict, Optional +from datetime import datetime, timedelta +import asyncio + + +class StateStore: + """Simple in-memory state store with expiration.""" + + def __init__(self): + self._store: Dict[str, Dict] = {} + self._expiry: Dict[str, datetime] = {} + + async def set(self, key: str, value: Dict, ttl_seconds: int = 600) -> None: + """Store a value with TTL (default 10 minutes).""" + self._store[key] = value + self._expiry[key] = datetime.utcnow() + timedelta(seconds=ttl_seconds) + await self._cleanup_expired() + + async def get(self, key: str) -> Optional[Dict]: + """Get a value, return None if expired or not found.""" + await self._cleanup_expired() + + if key not in self._store: + return None + + if key in self._expiry and datetime.utcnow() > self._expiry[key]: + del self._store[key] + del self._expiry[key] + return None + + return self._store[key] + + async def delete(self, key: str) -> None: + """Delete a value.""" + self._store.pop(key, None) + self._expiry.pop(key, None) + + async def _cleanup_expired(self) -> None: + """Remove expired entries.""" + now = datetime.utcnow() + expired_keys = [k for k, exp in self._expiry.items() if now > exp] + for key in expired_keys: + self._store.pop(key, None) + self._expiry.pop(key, None) + + +# Singleton instance +state_store = StateStore() diff --git a/api/oss/src/core/auth/supertokens_config.py b/api/oss/src/core/auth/supertokens_config.py new file mode 100644 index 0000000000..256e049f89 --- /dev/null +++ b/api/oss/src/core/auth/supertokens_config.py @@ -0,0 +1,306 @@ +"""SuperTokens configuration and initialization.""" + +from typing import Dict, List, Any, Optional +from supertokens_python import init, InputAppInfo, SupertokensConfig +from supertokens_python.recipe import ( + passwordless, + session, + dashboard, + thirdparty, +) +from supertokens_python.recipe.passwordless import ( + ContactEmailOnlyConfig, + InputOverrideConfig as PasswordlessInputOverrideConfig, +) +from supertokens_python.recipe.thirdparty import ( + ProviderInput, + ProviderConfig, + ProviderClientConfig, + InputOverrideConfig as ThirdPartyInputOverrideConfig, +) +from supertokens_python.recipe.session import ( + InputOverrideConfig as SessionInputOverrideConfig, +) + +from oss.src.utils.env import env +from oss.src.core.auth.supertokens_overrides import ( + override_thirdparty_functions, + override_thirdparty_apis, + override_passwordless_functions, + override_session_functions, +) + + +def get_supertokens_config() -> Dict[str, Any]: + """Get SuperTokens configuration from environment.""" + return { + "connection_uri": env.supertokens.uri_core, + "api_key": env.supertokens.api_key, + } + + +def get_app_info() -> InputAppInfo: + """Get SuperTokens app info.""" + # Extract domain from full URL (e.g., "http://localhost/api" -> "http://localhost") + from urllib.parse import urlparse + + api_parsed = urlparse(env.agenta.api_url) + api_domain = f"{api_parsed.scheme}://{api_parsed.netloc}" + api_gateway_path = api_parsed.path or "/" + # Avoid double /api when app is already mounted under root_path="/api". + if api_gateway_path == "/api": + api_gateway_path = "/" + + app_info = InputAppInfo( + app_name="Agenta", + api_domain=api_domain, + website_domain=env.agenta.web_url, + api_gateway_path=api_gateway_path, + api_base_path="/auth", + website_base_path="/auth", + ) + print( + "[SUPERTOKENS] AppInfo: " + f"api_domain={api_domain} api_gateway_path={api_gateway_path} " + f"api_base_path=/auth website_domain={env.agenta.web_url}" + ) + return app_info + + +def get_thirdparty_providers() -> List[ProviderInput]: + """ + Get third-party OAuth providers configuration. + + This includes: + - Social providers (Google, GitHub, etc.) + - Dynamic OIDC providers will be added at runtime via override callbacks + """ + providers = [] + + def add_provider( + *, + provider_id: str, + client_id: str, + client_secret: str | None, + additional_config: Dict[str, Any] | None = None, + ) -> None: + providers.append( + ProviderInput( + config=ProviderConfig( + third_party_id=provider_id, + clients=[ + ProviderClientConfig( + client_id=client_id, + client_secret=client_secret, + additional_config=additional_config, + ), + ], + ) + ) + ) + + # Google OAuth + if env.auth.google_enabled: + assert env.auth.google_oauth_client_id is not None + assert env.auth.google_oauth_client_secret is not None + add_provider( + provider_id="google", + client_id=env.auth.google_oauth_client_id, + client_secret=env.auth.google_oauth_client_secret, + ) + + # Google Workspaces OAuth + if env.auth.google_workspaces_enabled: + assert env.auth.google_workspaces_oauth_client_id is not None + assert env.auth.google_workspaces_oauth_client_secret is not None + add_provider( + provider_id="google-workspaces", + client_id=env.auth.google_workspaces_oauth_client_id, + client_secret=env.auth.google_workspaces_oauth_client_secret, + additional_config={ + "hd": env.auth.google_workspaces_hd, + } + if env.auth.google_workspaces_hd + else None, + ) + + # GitHub OAuth + if env.auth.github_enabled: + assert env.auth.github_oauth_client_id is not None + assert env.auth.github_oauth_client_secret is not None + add_provider( + provider_id="github", + client_id=env.auth.github_oauth_client_id, + client_secret=env.auth.github_oauth_client_secret, + additional_config={"scope": ["user:email"]}, + ) + + # Facebook OAuth + if env.auth.facebook_enabled: + assert env.auth.facebook_oauth_client_id is not None + assert env.auth.facebook_oauth_client_secret is not None + add_provider( + provider_id="facebook", + client_id=env.auth.facebook_oauth_client_id, + client_secret=env.auth.facebook_oauth_client_secret, + ) + + # Apple OAuth + if env.auth.apple_enabled: + assert env.auth.apple_oauth_client_id is not None + additional_config = None + if ( + env.auth.apple_key_id + and env.auth.apple_team_id + and env.auth.apple_private_key + ): + additional_config = { + "keyId": env.auth.apple_key_id, + "teamId": env.auth.apple_team_id, + "privateKey": env.auth.apple_private_key, + } + add_provider( + provider_id="apple", + client_id=env.auth.apple_oauth_client_id, + client_secret=env.auth.apple_oauth_client_secret, + additional_config=additional_config, + ) + + # Discord OAuth + if env.auth.discord_enabled: + assert env.auth.discord_oauth_client_id is not None + assert env.auth.discord_oauth_client_secret is not None + add_provider( + provider_id="discord", + client_id=env.auth.discord_oauth_client_id, + client_secret=env.auth.discord_oauth_client_secret, + ) + + # Twitter OAuth + if env.auth.twitter_enabled: + assert env.auth.twitter_oauth_client_id is not None + assert env.auth.twitter_oauth_client_secret is not None + add_provider( + provider_id="twitter", + client_id=env.auth.twitter_oauth_client_id, + client_secret=env.auth.twitter_oauth_client_secret, + ) + + # GitLab OAuth + if env.auth.gitlab_enabled: + assert env.auth.gitlab_oauth_client_id is not None + assert env.auth.gitlab_oauth_client_secret is not None + add_provider( + provider_id="gitlab", + client_id=env.auth.gitlab_oauth_client_id, + client_secret=env.auth.gitlab_oauth_client_secret, + additional_config={ + "gitlabBaseUrl": env.auth.gitlab_base_url, + } + if env.auth.gitlab_base_url + else None, + ) + + # Bitbucket OAuth + if env.auth.bitbucket_enabled: + assert env.auth.bitbucket_oauth_client_id is not None + assert env.auth.bitbucket_oauth_client_secret is not None + add_provider( + provider_id="bitbucket", + client_id=env.auth.bitbucket_oauth_client_id, + client_secret=env.auth.bitbucket_oauth_client_secret, + ) + + # LinkedIn OAuth + if env.auth.linkedin_enabled: + assert env.auth.linkedin_oauth_client_id is not None + assert env.auth.linkedin_oauth_client_secret is not None + add_provider( + provider_id="linkedin", + client_id=env.auth.linkedin_oauth_client_id, + client_secret=env.auth.linkedin_oauth_client_secret, + ) + + # Okta OAuth + if env.auth.okta_enabled: + assert env.auth.okta_oauth_client_id is not None + assert env.auth.okta_oauth_client_secret is not None + assert env.auth.okta_domain is not None + add_provider( + provider_id="okta", + client_id=env.auth.okta_oauth_client_id, + client_secret=env.auth.okta_oauth_client_secret, + additional_config={ + "oktaDomain": env.auth.okta_domain, + }, + ) + + # Azure AD OAuth + if env.auth.azure_ad_enabled: + assert env.auth.azure_ad_oauth_client_id is not None + assert env.auth.azure_ad_oauth_client_secret is not None + assert env.auth.azure_ad_directory_id is not None + add_provider( + provider_id="azure-ad", + client_id=env.auth.azure_ad_oauth_client_id, + client_secret=env.auth.azure_ad_oauth_client_secret, + additional_config={ + "directoryId": env.auth.azure_ad_directory_id, + }, + ) + + # BoxySAML OAuth + if env.auth.boxy_saml_enabled: + assert env.auth.boxy_saml_oauth_client_id is not None + assert env.auth.boxy_saml_oauth_client_secret is not None + assert env.auth.boxy_saml_url is not None + add_provider( + provider_id="boxy-saml", + client_id=env.auth.boxy_saml_oauth_client_id, + client_secret=env.auth.boxy_saml_oauth_client_secret, + additional_config={ + "boxyURL": env.auth.boxy_saml_url, + }, + ) + + return providers + + +def init_supertokens(): + """Initialize SuperTokens with recipes and dynamic provider support.""" + + init( + supertokens_config=SupertokensConfig(**get_supertokens_config()), + app_info=get_app_info(), + framework="fastapi", + recipe_list=[ + # Email OTP (passwordless) + passwordless.init( + contact_config=ContactEmailOnlyConfig(), + flow_type="USER_INPUT_CODE_AND_MAGIC_LINK", + override=PasswordlessInputOverrideConfig( + functions=override_passwordless_functions, + ), + ), + # Third-party OAuth (social + dynamic OIDC) + thirdparty.init( + sign_in_and_up_feature=thirdparty.SignInAndUpFeature( + providers=get_thirdparty_providers() + ), + override=ThirdPartyInputOverrideConfig( + functions=override_thirdparty_functions, + apis=override_thirdparty_apis, + ), + ), + # Session management with custom identities payload + session.init( + get_token_transfer_method=lambda _, __, ___: "cookie", + override=SessionInputOverrideConfig( + functions=override_session_functions, + ), + ), + # SuperTokens dashboard + dashboard.init(), + ], + mode="asgi", + ) diff --git a/api/oss/src/core/auth/supertokens_overrides.py b/api/oss/src/core/auth/supertokens_overrides.py new file mode 100644 index 0000000000..f373756f8c --- /dev/null +++ b/api/oss/src/core/auth/supertokens_overrides.py @@ -0,0 +1,762 @@ +"""SuperTokens override functions for dynamic OIDC providers and custom session handling.""" + +from typing import Dict, Any, List, Optional, Union +from uuid import UUID + +from oss.src.utils.logging import get_module_logger + +from supertokens_python.recipe.thirdparty.provider import ( + Provider, + ProviderInput, + ProviderConfig, + ProviderClientConfig, +) +from supertokens_python.recipe.thirdparty.interfaces import ( + RecipeInterface as ThirdPartyRecipeInterface, + APIInterface as ThirdPartyAPIInterface, + SignInUpOkResult, +) +from supertokens_python.recipe.passwordless.interfaces import ( + RecipeInterface as PasswordlessRecipeInterface, + ConsumeCodeOkResult, +) +from supertokens_python.recipe.emailpassword.interfaces import ( + RecipeInterface as EmailPasswordRecipeInterface, + SignInOkResult as EmailPasswordSignInOkResult, + SignUpOkResult as EmailPasswordSignUpOkResult, +) +from supertokens_python.recipe.session.interfaces import ( + RecipeInterface as SessionRecipeInterface, +) +from supertokens_python.types import User, RecipeUserId + +from oss.src.utils.common import is_ee +from oss.src.dbs.postgres.users.dao import IdentitiesDAO +from oss.src.core.users.types import UserIdentityCreate +from oss.src.services import db_manager +from oss.src.core.auth.service import AuthService + +log = get_module_logger(__name__) + +# DAOs for accessing user identities (always available) +identities_dao = IdentitiesDAO() + +# Organization providers DAO (EE only) +if is_ee(): + from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO + from oss.src.core.secrets.services import VaultService + from oss.src.dbs.postgres.secrets.dao import SecretsDAO + + providers_dao = OrganizationProvidersDAO() +else: + providers_dao = None + +# Auth service for domain policy enforcement +auth_service = AuthService() + + +def _merge_session_identities( + session: Optional[Any], method: Optional[str] +) -> List[str]: + session_identities: List[str] = [] + if session is not None: + try: + payload = session.get_access_token_payload() + session_identities = payload.get("session_identities") or [] + except Exception: + session_identities = [] + if method: + if method not in session_identities: + session_identities = list(session_identities) + [method] + return session_identities or ([method] if method else []) + + +async def get_dynamic_oidc_provider(third_party_id: str) -> Optional[ProviderInput]: + """ + Fetch dynamic OIDC provider configuration from database (EE only). + + third_party_id format: "sso:{organization_slug}:{provider_slug}" + """ + # OIDC providers require EE + if not is_ee() or providers_dao is None: + log.debug(f"SSO provider {third_party_id} requested but EE not enabled") + return None + + try: + # Parse third_party_id: "sso:{organization_slug}:{provider_slug}" + if not third_party_id.startswith("sso:"): + return None + + parts = third_party_id.split(":", 2) + if len(parts) != 3: + return None + + _, organization_slug, provider_slug = parts + + from oss.src.services import db_manager + + organization = await db_manager.get_organization_by_slug(organization_slug) + if not organization: + return None + + # Fetch provider from database by organization_id and provider_slug + provider = await providers_dao.get_by_slug(provider_slug, str(organization.id)) + if not provider or not (provider.flags and provider.flags.get("is_active")): + return None + + # Extract OIDC config + vault_service = VaultService(SecretsDAO()) + secret = await vault_service.get_secret( + secret_id=provider.secret_id, + organization_id=organization.id, + ) + if not secret: + log.debug(f"Secret not found for provider id={provider.id}") + return None + + data = secret.data + provider_settings = None + if hasattr(data, "provider"): + provider_settings = data.provider.model_dump() + elif isinstance(data, dict): + provider_settings = data.get("provider") + + if not isinstance(provider_settings, dict): + log.debug(f"Invalid provider secret format for provider id={provider.id}") + return None + + issuer_url = provider_settings.get("issuer_url") + client_id = provider_settings.get("client_id") + client_secret = provider_settings.get("client_secret") + scopes = provider_settings.get("scopes", ["openid", "profile", "email"]) + + if not issuer_url or not client_id or not client_secret: + return None + + # Build ProviderInput for SuperTokens + return ProviderInput( + config=ProviderConfig( + third_party_id=third_party_id, + clients=[ + ProviderClientConfig( + client_id=client_id, + client_secret=client_secret, + scope=scopes, + ) + ], + oidc_discovery_endpoint=f"{issuer_url}/.well-known/openid-configuration", + ) + ) + except Exception as e: + # Log error but don't crash + log.debug(f"Error fetching dynamic OIDC provider {third_party_id}: {e}") + return None + + +def override_thirdparty_functions( + original_implementation: ThirdPartyRecipeInterface, +) -> ThirdPartyRecipeInterface: + """Override third-party recipe functions to support dynamic providers.""" + + original_sign_in_up = original_implementation.sign_in_up + original_get_provider = original_implementation.get_provider + + async def sign_in_up( + third_party_id: str, + third_party_user_id: str, + email: str, + is_verified: bool, + oauth_tokens: Dict[str, Any], + raw_user_info_from_provider: Dict[str, Any], + session: Optional[Any], + should_try_linking_with_session_user: Optional[bool], + tenant_id: str, + user_context: Dict[str, Any], + ) -> SignInUpOkResult: + """ + Override sign_in_up to: + 1. Create user_identity record after successful authentication + 2. Populate session with user_identities array + """ + internal_user = None + # Call original implementation + result = await original_sign_in_up( + third_party_id=third_party_id, + third_party_user_id=third_party_user_id, + email=email, + is_verified=is_verified, + oauth_tokens=oauth_tokens, + raw_user_info_from_provider=raw_user_info_from_provider, + session=session, + should_try_linking_with_session_user=should_try_linking_with_session_user, + tenant_id=tenant_id, + user_context=user_context, + ) + + # Determine method string based on third_party_id + if third_party_id.startswith("sso:"): + # Format: sso:{organization_slug}:{provider_slug} + method = third_party_id + elif third_party_id == "google": + method = "social:google" + elif third_party_id == "github": + method = "social:github" + else: + method = f"social:{third_party_id}" + + log.debug( + f"[AUTH-IDENTITY] third_party_id={third_party_id} method={method} email={email}" + ) + + # Extract domain from email + domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None + + # Create or update user_identity + try: + from oss.src.services.db_manager import get_user_with_email + + # Get internal user ID from database (not SuperTokens ID) + internal_user = await get_user_with_email(email) + if not internal_user: + raise Exception(f"User not found for email {email}") + + internal_user_id = internal_user.id + + # Check if identity already exists + existing = await identities_dao.get_by_method_subject( + method=method, + subject=third_party_user_id, + ) + + if not existing: + # Create new identity + await identities_dao.create( + UserIdentityCreate( + user_id=internal_user_id, + method=method, + subject=third_party_user_id, + domain=domain, + ) + ) + log.debug( + "[AUTH-IDENTITY] created", + { + "user_id": str(internal_user_id), + "method": method, + "subject": third_party_user_id, + }, + ) + else: + log.debug( + "[AUTH-IDENTITY] existing", + { + "user_id": str(internal_user_id), + "method": method, + "subject": third_party_user_id, + }, + ) + except Exception: + # Log error but don't block authentication + log.debug("[AUTH-IDENTITY] create failed", exc_info=True) + + # Fetch all user identities for session payload + try: + from oss.src.services.db_manager import get_user_with_email + + internal_user = await get_user_with_email(email) + if internal_user: + all_identities = await identities_dao.list_by_user(internal_user.id) + identities_array = [identity.method for identity in all_identities] + else: + identities_array = [method] + except Exception: + identities_array = [method] # Fallback to current method only + + # Store identity context for session creation + # user_identities = all known methods for user + # session_identities = methods verified in this session (accumulated) + user_context["user_identities"] = identities_array + session_identities = _merge_session_identities(session, method) + user_context["session_identities"] = session_identities + log.debug( + "[AUTH-IDENTITY] session_identities merge", + { + "method": method, + "session_identities": session_identities, + "user_identities": identities_array, + }, + ) + log.debug( + "[AUTH-IDENTITY] session user_identities", + { + "user_id": str(internal_user.id) if internal_user else None, + "user_identities": identities_array, + }, + ) + + # Enforce domain-based policies (auto-join, domains-only) + if internal_user: + try: + await auth_service.enforce_domain_policies( + email=email, + user_id=internal_user.id, + ) + except Exception as e: + log.debug(f"Error enforcing domain policies: {e}") + + return result + + original_implementation.sign_in_up = sign_in_up + + async def get_provider( + third_party_id: str, + client_type: Optional[str], + tenant_id: str, + user_context: Dict[str, Any], + ): + provider = await original_get_provider( + third_party_id=third_party_id, + client_type=client_type, + tenant_id=tenant_id, + user_context=user_context, + ) + if provider is not None: + return provider + + if not third_party_id.startswith("sso:"): + return None + + provider_input = await get_dynamic_oidc_provider(third_party_id) + if provider_input is None: + return None + + from supertokens_python.recipe.thirdparty.recipe_implementation import ( + find_and_create_provider_instance, + ) + + return await find_and_create_provider_instance( + [provider_input], + third_party_id, + client_type, + user_context, + ) + + original_implementation.get_provider = get_provider + return original_implementation + + +def override_thirdparty_apis( + original_implementation: ThirdPartyAPIInterface, +) -> ThirdPartyAPIInterface: + """Override third-party API interface if needed.""" + # For now, no API overrides needed + return original_implementation + + +def override_session_functions( + original_implementation: SessionRecipeInterface, +) -> SessionRecipeInterface: + """Override session functions to include identities in payload.""" + + original_create_new_session = original_implementation.create_new_session + + async def create_new_session( + user_id: str, + recipe_user_id: RecipeUserId, + access_token_payload: Optional[Dict[str, Any]], + session_data_in_database: Optional[Dict[str, Any]], + disable_anti_csrf: Optional[bool], + tenant_id: str, + user_context: Dict[str, Any], + ): + """ + Override create_new_session to inject user_identities array into access token payload. + """ + # Get identity context from user_context (populated by auth overrides) + user_identities = user_context.get("user_identities", []) + session_identities = user_context.get("session_identities", user_identities) + + # Merge with existing payload + if access_token_payload is None: + access_token_payload = {} + + access_token_payload["user_identities"] = user_identities + access_token_payload["session_identities"] = session_identities + + # Call original implementation + result = await original_create_new_session( + user_id=user_id, + recipe_user_id=recipe_user_id, + access_token_payload=access_token_payload, + session_data_in_database=session_data_in_database, + disable_anti_csrf=disable_anti_csrf, + tenant_id=tenant_id, + user_context=user_context, + ) + + return result + + original_implementation.create_new_session = create_new_session + return original_implementation + + +def override_passwordless_functions( + original_implementation: PasswordlessRecipeInterface, +) -> PasswordlessRecipeInterface: + """Override passwordless recipe functions to track email:otp identity.""" + + original_consume_code = original_implementation.consume_code + + async def consume_code( + pre_auth_session_id: str, + user_input_code: Optional[str], + device_id: Optional[str], + link_code: Optional[str], + session: Optional[Any], + should_try_linking_with_session_user: Optional[bool], + tenant_id: str, + user_context: Dict[str, Any], + ) -> Union[ConsumeCodeOkResult, Any]: + """ + Override consume_code to: + 1. Create user_identity record for email:otp after successful login + 2. Populate session with user_identities array + """ + # Call original implementation + result = await original_consume_code( + pre_auth_session_id=pre_auth_session_id, + user_input_code=user_input_code, + device_id=device_id, + link_code=link_code, + session=session, + should_try_linking_with_session_user=should_try_linking_with_session_user, + tenant_id=tenant_id, + user_context=user_context, + ) + + # Only process if successful + if not isinstance(result, ConsumeCodeOkResult): + return result + + # Determine method and subject + method = "email:otp" + user_id_str = result.user.id + email = result.user.emails[0] if result.user.emails else None + + if not email: + # Can't create identity without email + user_context["user_identities"] = [method] + session_identities = _merge_session_identities(session, method) + user_context["session_identities"] = session_identities + log.debug( + "[AUTH-IDENTITY] session_identities merge", + { + "method": method, + "session_identities": session_identities, + "user_identities": [method], + }, + ) + return result + + # Extract domain from email + domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None + + # Create or update user_identity + try: + from oss.src.services.db_manager import get_user_with_email + + # Get internal user ID from database (not SuperTokens ID) + internal_user = await get_user_with_email(email) + if not internal_user: + raise Exception(f"User not found for email {email}") + + internal_user_id = internal_user.id + + # Check if identity already exists + existing = await identities_dao.get_by_method_subject( + method=method, + subject=email, # For email:otp, subject is the email + ) + + if not existing: + # Create new identity + await identities_dao.create( + UserIdentityCreate( + user_id=internal_user_id, + method=method, + subject=email, + domain=domain, + # created_by_id is optional, leaving it as None + ) + ) + except Exception: + # Log error but don't block authentication + pass + + # Fetch all user identities for session payload + try: + from oss.src.services.db_manager import get_user_with_email + + internal_user = await get_user_with_email(email) + if internal_user: + all_identities = await identities_dao.list_by_user(internal_user.id) + identities_array = [identity.method for identity in all_identities] + else: + identities_array = [method] + except Exception: + identities_array = [method] # Fallback to current method only + + # Store identity context for session creation + # user_identities = all known methods for user + # session_identities = methods verified in this session (accumulated) + user_context["user_identities"] = identities_array + session_identities = _merge_session_identities(session, method) + user_context["session_identities"] = session_identities + log.debug( + "[AUTH-IDENTITY] session_identities merge", + { + "method": method, + "session_identities": session_identities, + "user_identities": identities_array, + }, + ) + + # Enforce domain-based policies (auto-join, domains-only) + if internal_user: + try: + await auth_service.enforce_domain_policies( + email=email, + user_id=internal_user.id, + ) + except Exception as e: + log.debug(f"Error enforcing domain policies: {e}") + + return result + + original_implementation.consume_code = consume_code + return original_implementation + + +def override_emailpassword_functions( + original_implementation: EmailPasswordRecipeInterface, +) -> EmailPasswordRecipeInterface: + """Override email/password recipe functions to track email:password identity.""" + + original_sign_in = original_implementation.sign_in + original_sign_up = original_implementation.sign_up + + async def sign_in( + email: str, + password: str, + tenant_id: str, + session: Optional[Any], + should_try_linking_with_session_user: Optional[bool], + user_context: Dict[str, Any], + ) -> Union[EmailPasswordSignInOkResult, Any]: + """ + Override sign_in to: + 1. Create user_identity record for email:password after successful login + 2. Populate session with user_identities array + """ + + # Call original implementation + result = await original_sign_in( + email=email, + password=password, + tenant_id=tenant_id, + session=session, + should_try_linking_with_session_user=should_try_linking_with_session_user, + user_context=user_context, + ) + + # Only process if successful + if not isinstance(result, EmailPasswordSignInOkResult): + return result + + # Method for email/password + method = "email:password" + + # Extract domain from email + domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None + + # Create or update user_identity + try: + from oss.src.services.db_manager import get_user_with_email + + # Get internal user ID from database (not SuperTokens ID) + internal_user = await get_user_with_email(email) + if not internal_user: + raise Exception(f"User not found for email {email}") + + internal_user_id = internal_user.id + + # Check if identity already exists + existing = await identities_dao.get_by_method_subject( + method=method, + subject=email, # For email:password, subject is the email + ) + + if not existing: + # Create new identity + await identities_dao.create( + UserIdentityCreate( + user_id=internal_user_id, + method=method, + subject=email, + domain=domain, + # created_by_id is optional, leaving it as None + ) + ) + except Exception: + # Log error but don't block authentication + pass + + # Fetch all user identities for session payload + try: + from oss.src.services.db_manager import get_user_with_email + + internal_user = await get_user_with_email(email) + if internal_user: + all_identities = await identities_dao.list_by_user(internal_user.id) + identities_array = [identity.method for identity in all_identities] + else: + identities_array = [method] + except Exception: + identities_array = [method] # Fallback to current method only + + # Store identity context for session creation + # user_identities = all known methods for user + # session_identities = methods verified in this session (accumulated) + user_context["user_identities"] = identities_array + session_identities = _merge_session_identities(session, method) + user_context["session_identities"] = session_identities + log.debug( + "[AUTH-IDENTITY] session_identities merge", + { + "method": method, + "session_identities": session_identities, + "user_identities": identities_array, + }, + ) + + # Enforce domain-based policies (auto-join, domains-only) + if internal_user: + try: + await auth_service.enforce_domain_policies( + email=email, + user_id=internal_user.id, + ) + except Exception as e: + log.debug(f"Error enforcing domain policies: {e}") + + return result + + async def sign_up( + email: str, + password: str, + tenant_id: str, + session: Optional[Any], + should_try_linking_with_session_user: Optional[bool], + user_context: Dict[str, Any], + ) -> Union[EmailPasswordSignUpOkResult, Any]: + """ + Override sign_up to: + 1. Create user_identity record for email:password after successful signup + 2. Populate session with user_identities array + """ + + # Call original implementation + result = await original_sign_up( + email=email, + password=password, + tenant_id=tenant_id, + session=session, + should_try_linking_with_session_user=should_try_linking_with_session_user, + user_context=user_context, + ) + + # Only process if successful + if not isinstance(result, EmailPasswordSignUpOkResult): + return result + + # Method for email/password + method = "email:password" + + # Extract domain from email + domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None + + # Create or update user_identity + try: + from oss.src.services.db_manager import get_user_with_email + + # Get internal user ID from database (not SuperTokens ID) + internal_user = await get_user_with_email(email) + if not internal_user: + raise Exception(f"User not found for email {email}") + + internal_user_id = internal_user.id + + # Check if identity already exists + existing = await identities_dao.get_by_method_subject( + method=method, + subject=email, # For email:password, subject is the email + ) + + if not existing: + # Create new identity + await identities_dao.create( + UserIdentityCreate( + user_id=internal_user_id, + method=method, + subject=email, + domain=domain, + # created_by_id is optional, leaving it as None + ) + ) + except Exception: + # Log error but don't block authentication + pass + + # Fetch all user identities for session payload + try: + from oss.src.services.db_manager import get_user_with_email + + internal_user = await get_user_with_email(email) + if internal_user: + all_identities = await identities_dao.list_by_user(internal_user.id) + identities_array = [identity.method for identity in all_identities] + else: + identities_array = [method] + except Exception: + identities_array = [method] # Fallback to current method only + + # Store identity context for session creation + # user_identities = all known methods for user + # session_identities = methods verified in this session (accumulated) + user_context["user_identities"] = identities_array + session_identities = _merge_session_identities(session, method) + user_context["session_identities"] = session_identities + log.debug( + "[AUTH-IDENTITY] session_identities merge", + { + "method": method, + "session_identities": session_identities, + "user_identities": identities_array, + }, + ) + + # Enforce domain-based policies (auto-join, domains-only) + if internal_user: + try: + await auth_service.enforce_domain_policies( + email=email, + user_id=internal_user.id, + ) + except Exception as e: + log.debug(f"Error enforcing domain policies: {e}") + + return result + + original_implementation.sign_in = sign_in + original_implementation.sign_up = sign_up + return original_implementation diff --git a/api/oss/src/core/auth/types.py b/api/oss/src/core/auth/types.py new file mode 100644 index 0000000000..68ce3f6381 --- /dev/null +++ b/api/oss/src/core/auth/types.py @@ -0,0 +1,82 @@ +"""Core authentication types (OSS).""" + +from enum import Enum + + +class MethodKind(str, Enum): + """ + Valid authentication method patterns. + + Supports exact matches and wildcards: + - email:otp - Email OTP authentication + - email:password - Email/password authentication (future) + - email:* - Any email-based authentication + - social:google - Google OAuth + - social:github - GitHub OAuth + - social:* - Any social provider + - sso:{organization_slug}:{provider_slug} - Specific SSO provider for organization + - sso:{organization_slug}:* - Any SSO provider for organization + - sso:* - Any SSO provider (any organization) + """ + + EMAIL_OTP = "email:otp" + EMAIL_PASSWORD = "email:password" + EMAIL_WILDCARD = "email:*" + SOCIAL_GOOGLE = "social:google" + SOCIAL_GITHUB = "social:github" + SOCIAL_WILDCARD = "social:*" + SSO_WILDCARD = "sso:*" + + @classmethod + def is_valid_pattern(cls, pattern: str) -> bool: + """ + Check if a pattern is a valid method kind. + + Allows: + - Exact enum values + - SSO patterns: sso:{organization_slug}:{provider_slug} or sso:{organization_slug}:* + """ + # Check if it's a known enum value + if pattern in cls._value2member_map_: + return True + + # Check SSO patterns + if pattern.startswith("sso:"): + parts = pattern.split(":") + if len(parts) == 3: + organization_slug, provider = parts[1], parts[2] + # Validate organization_slug is not empty + if organization_slug and (provider == "*" or provider): + return True + + return False + + @classmethod + def matches_pattern(cls, identity: str, allowed_pattern: str) -> bool: + """ + Check if an identity matches an allowed pattern. + + Args: + identity: Authentication method (e.g., "email:otp", "social:google") + allowed_pattern: Pattern to match against (supports wildcards) + + Returns: + True if identity matches the pattern + + Examples: + matches_pattern("email:otp", "email:*") → True + matches_pattern("social:google", "social:*") → True + matches_pattern("sso:acme:okta", "sso:acme:*") → True + matches_pattern("email:otp", "sso:*") → False + """ + # Exact match + if identity == allowed_pattern: + return True + + # Wildcard match + if allowed_pattern.endswith(":*"): + prefix = allowed_pattern[:-2] # Remove ":*" + if identity.startswith(f"{prefix}:"): + return True + + return False diff --git a/api/oss/src/core/evaluations/tasks/batch.py b/api/oss/src/core/evaluations/tasks/batch.py index 99bff2e6fd..1556bce18e 100644 --- a/api/oss/src/core/evaluations/tasks/batch.py +++ b/api/oss/src/core/evaluations/tasks/batch.py @@ -157,8 +157,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, diff --git a/api/oss/src/core/evaluations/tasks/legacy.py b/api/oss/src/core/evaluations/tasks/legacy.py index 9d7e86ac25..ca1e937623 100644 --- a/api/oss/src/core/evaluations/tasks/legacy.py +++ b/api/oss/src/core/evaluations/tasks/legacy.py @@ -466,7 +466,7 @@ async def setup_evaluation( references={ "testset": testset_references["artifact"], # "testset_variant": - # "testset_revision": + "testset_revision": testset_references["revision"], }, ) if testset and testset.id diff --git a/api/oss/src/core/evaluations/tasks/live.py b/api/oss/src/core/evaluations/tasks/live.py index b22e963824..9006f393ad 100644 --- a/api/oss/src/core/evaluations/tasks/live.py +++ b/api/oss/src/core/evaluations/tasks/live.py @@ -139,8 +139,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, diff --git a/api/oss/src/core/evaluators/service.py b/api/oss/src/core/evaluators/service.py index b0a76728f9..c8ef569393 100644 --- a/api/oss/src/core/evaluators/service.py +++ b/api/oss/src/core/evaluators/service.py @@ -1,13 +1,9 @@ from typing import Optional, List from uuid import UUID, uuid4 -from json import loads from oss.src.utils.helpers import get_slug_from_name_and_id from oss.src.services.db_manager import fetch_evaluator_config from oss.src.core.workflows.dtos import ( - WorkflowFlags, - WorkflowQueryFlags, - # WorkflowCreate, WorkflowEdit, WorkflowQuery, @@ -17,8 +13,6 @@ WorkflowVariantEdit, WorkflowVariantQuery, # - WorkflowRevisionData, - # WorkflowRevisionCreate, WorkflowRevisionEdit, WorkflowRevisionCommit, @@ -35,11 +29,7 @@ SimpleEvaluatorEdit, SimpleEvaluatorQuery, SimpleEvaluatorFlags, - SimpleEvaluatorQueryFlags, - # EvaluatorFlags, - EvaluatorQueryFlags, - # Evaluator, EvaluatorQuery, EvaluatorRevisionsLog, @@ -1435,11 +1425,33 @@ def _transfer_evaluator_revision_data( else None ) headers = None + # TODO: This function reconstructs output schemas from old evaluator settings. + # When fully migrating to the new workflow-based evaluator system, the output + # schema should be stored directly in the evaluator revision (workflow revision) + # at configuration time, rather than being inferred from settings here. + # For evaluators with dynamic outputs (auto_ai_critique, json_multi_field_match), + # the frontend/API should build and save the complete output schema when the + # user configures the evaluator. outputs_schema = None if str(old_evaluator.evaluator_key) == "auto_ai_critique": json_schema = old_evaluator.settings_values.get("json_schema", None) if json_schema and isinstance(json_schema, dict): outputs_schema = json_schema.get("schema", None) + # Handle json_multi_field_match with dynamic field-based properties + if str(old_evaluator.evaluator_key) == "json_multi_field_match": + # Build dynamic properties based on configured fields + fields = old_evaluator.settings_values.get("fields", []) + properties = {"aggregate_score": {"type": "number"}} + for field in fields: + # Each field becomes a numeric score (0 or 1) + properties[field] = {"type": "number"} + outputs_schema = { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": properties, + "required": ["aggregate_score"], + "additionalProperties": False, + } if not outputs_schema: properties = ( {"score": {"type": "number"}, "success": {"type": "boolean"}} diff --git a/api/oss/src/core/git/dtos.py b/api/oss/src/core/git/dtos.py index ff49f5ba89..9ebd98863c 100644 --- a/api/oss/src/core/git/dtos.py +++ b/api/oss/src/core/git/dtos.py @@ -90,6 +90,7 @@ class RevisionCommit(Slug, Header, Metadata): artifact_id: Optional[UUID] = None variant_id: Optional[UUID] = None + revision_id: Optional[UUID] = None class RevisionsLog(BaseModel): diff --git a/api/oss/src/core/organizations/__init__.py b/api/oss/src/core/organizations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/core/organizations/types.py b/api/oss/src/core/organizations/types.py new file mode 100644 index 0000000000..a7b1683051 --- /dev/null +++ b/api/oss/src/core/organizations/types.py @@ -0,0 +1,57 @@ +"""Core authentication method types (OSS).""" + +from enum import Enum + + +# ============================================================================ +# AUTHENTICATION METHOD KINDS +# ============================================================================ + + +class MethodKind(str, Enum): + """ + Valid authentication method patterns for organization policies. + + Supports exact matches and wildcards: + - email:otp - Email OTP authentication + - email:password - Email/password authentication (future) + - email:* - Any email-based authentication + - social:google - Google OAuth + - social:github - GitHub OAuth + - social:* - Any social provider + - sso:{organization_slug}:{provider_slug} - Specific SSO provider for organization + - sso:{organization_slug}:* - Any SSO provider for organization + - sso:* - Any SSO provider (any organization) + """ + + EMAIL_OTP = "email:otp" + EMAIL_PASSWORD = "email:password" + EMAIL_WILDCARD = "email:*" + SOCIAL_GOOGLE = "social:google" + SOCIAL_GITHUB = "social:github" + SOCIAL_WILDCARD = "social:*" + SSO_WILDCARD = "sso:*" + + @classmethod + def is_valid_pattern(cls, pattern: str) -> bool: + """ + Check if a pattern is a valid method kind. + + Allows: + - Exact enum values + - SSO patterns: sso:{organization_slug}:{provider_slug} or sso:{organization_slug}:* + """ + # Check if it's a known enum value + if pattern in cls._value2member_map_: + return True + + # Check SSO patterns + if pattern.startswith("sso:"): + parts = pattern.split(":") + if len(parts) == 3: + organization_slug, provider = parts[1], parts[2] + # Validate organization_slug is not empty + if organization_slug and (provider == "*" or provider): + return True + + return False diff --git a/api/oss/src/core/secrets/dtos.py b/api/oss/src/core/secrets/dtos.py index 7286701488..86825cb1fd 100644 --- a/api/oss/src/core/secrets/dtos.py +++ b/api/oss/src/core/secrets/dtos.py @@ -1,6 +1,6 @@ from typing import Optional, Union, List, Dict, Any -from pydantic import BaseModel, field_validator, model_validator +from pydantic import BaseModel, Field, field_validator, model_validator from oss.src.core.secrets.enums import ( SecretKind, @@ -45,14 +45,31 @@ class CustomProviderDTO(BaseModel): model_keys: Optional[List[str]] = None +class SSOProviderSettingsDTO(BaseModel): + client_id: str + client_secret: str + issuer_url: str + scopes: List[str] + extra: Dict[str, Any] = Field(default_factory=dict) + + +class SSOProviderDTO(BaseModel): + provider: SSOProviderSettingsDTO + + class SecretDTO(BaseModel): kind: SecretKind - data: Union[StandardProviderDTO, CustomProviderDTO] + data: Union[StandardProviderDTO, CustomProviderDTO, SSOProviderDTO] @model_validator(mode="before") def validate_secret_data_based_on_kind(cls, values: Dict[str, Any]): kind = values.get("kind") + if isinstance(kind, SecretKind): + kind = kind.value data = values.get("data", {}) + if isinstance(data, BaseModel): + data = data.model_dump() + values["data"] = data if kind == SecretKind.PROVIDER_KEY.value: if not isinstance(data, dict): @@ -82,6 +99,21 @@ def validate_secret_data_based_on_kind(cls, values: Dict[str, Any]): raise ValueError( "The provided kind in data is not a valid CustomProviderKind enum" ) + elif kind == SecretKind.SSO_PROVIDER.value: + if not isinstance(data, dict): + raise ValueError( + "The provided request secret dto is not a valid type for SSOProviderDTO" + ) + provider = data.get("provider") + if not isinstance(provider, dict): + raise ValueError( + "The provided request secret dto is missing required fields for SSOProviderSettingsDTO" + ) + required_fields = {"client_id", "client_secret", "issuer_url", "scopes"} + if not required_fields.issubset(provider.keys()): + raise ValueError( + "The provided request secret dto is missing required fields for SSOProviderSettingsDTO" + ) else: raise ValueError("The provided kind is not a valid SecretKind enum") diff --git a/api/oss/src/core/secrets/enums.py b/api/oss/src/core/secrets/enums.py index ee7b24751a..695260ffa2 100644 --- a/api/oss/src/core/secrets/enums.py +++ b/api/oss/src/core/secrets/enums.py @@ -4,6 +4,7 @@ class SecretKind(str, Enum): PROVIDER_KEY = "provider_key" CUSTOM_PROVIDER = "custom_provider" + SSO_PROVIDER = "sso_provider" class StandardProviderKind(str, Enum): diff --git a/api/oss/src/core/secrets/interfaces.py b/api/oss/src/core/secrets/interfaces.py index 6bc27078c9..6c2eda45a7 100644 --- a/api/oss/src/core/secrets/interfaces.py +++ b/api/oss/src/core/secrets/interfaces.py @@ -15,32 +15,38 @@ def __init__(self): async def create( self, *, - project_id: UUID, + project_id: Optional[UUID] = None, + organization_id: Optional[UUID] = None, create_secret_dto: CreateSecretDTO, ) -> SecretResponseDTO: raise NotImplementedError async def get( self, - project_id: UUID, secret_id: UUID, + project_id: Optional[UUID] = None, + organization_id: Optional[UUID] = None, ) -> Optional[SecretResponseDTO]: raise NotImplementedError - async def list(self, project_id: UUID) -> List[SecretResponseDTO]: + async def list( + self, project_id: Optional[UUID] = None, organization_id: Optional[UUID] = None + ) -> List[SecretResponseDTO]: raise NotImplementedError async def update( self, - project_id: UUID, secret_id: UUID, update_secret_dto: UpdateSecretDTO, + project_id: Optional[UUID] = None, + organization_id: Optional[UUID] = None, ) -> Optional[SecretResponseDTO]: raise NotImplementedError async def delete( self, - project_id: UUID, secret_id: UUID, + project_id: Optional[UUID] = None, + organization_id: Optional[UUID] = None, ) -> None: raise NotImplementedError diff --git a/api/oss/src/core/secrets/services.py b/api/oss/src/core/secrets/services.py index 540adfb3aa..f986ed911a 100644 --- a/api/oss/src/core/secrets/services.py +++ b/api/oss/src/core/secrets/services.py @@ -14,7 +14,8 @@ def __init__(self, secrets_dao: SecretsDAOInterface): async def create_secret( self, *, - project_id: UUID, + project_id: UUID | None = None, + organization_id: UUID | None = None, create_secret_dto: CreateSecretDTO, ): with set_data_encryption_key( @@ -22,57 +23,71 @@ async def create_secret( ): secret_dto = await self.secrets_dao.create( project_id=project_id, + organization_id=organization_id, create_secret_dto=create_secret_dto, ) return secret_dto async def get_secret( self, - project_id: UUID, secret_id: UUID, + project_id: UUID | None = None, + organization_id: UUID | None = None, ): with set_data_encryption_key( data_encryption_key=self._data_encryption_key, ): secret_dto = await self.secrets_dao.get( - project_id=project_id, secret_id=secret_id, + project_id=project_id, + organization_id=organization_id, ) return secret_dto - async def list_secrets(self, project_id: UUID): + async def list_secrets( + self, + project_id: UUID | None = None, + organization_id: UUID | None = None, + ): with set_data_encryption_key( data_encryption_key=self._data_encryption_key, ): - secrets_dtos = await self.secrets_dao.list(project_id=project_id) + secrets_dtos = await self.secrets_dao.list( + project_id=project_id, + organization_id=organization_id, + ) return secrets_dtos async def update_secret( self, - project_id: UUID, secret_id: UUID, update_secret_dto: UpdateSecretDTO, + project_id: UUID | None = None, + organization_id: UUID | None = None, ): with set_data_encryption_key( data_encryption_key=self._data_encryption_key, ): secret_dto = await self.secrets_dao.update( - project_id=project_id, secret_id=secret_id, update_secret_dto=update_secret_dto, + project_id=project_id, + organization_id=organization_id, ) return secret_dto async def delete_secret( self, - project_id: UUID, secret_id: UUID, + project_id: UUID | None = None, + organization_id: UUID | None = None, ) -> None: with set_data_encryption_key( data_encryption_key=self._data_encryption_key, ): await self.secrets_dao.delete( - project_id=project_id, secret_id=secret_id, + project_id=project_id, + organization_id=organization_id, ) return diff --git a/api/oss/src/core/secrets/utils.py b/api/oss/src/core/secrets/utils.py index 9edc02035f..9d7e1e838b 100644 --- a/api/oss/src/core/secrets/utils.py +++ b/api/oss/src/core/secrets/utils.py @@ -33,12 +33,12 @@ async def get_user_llm_providers_secrets(project_id: str) -> Dict[str, Any]: if not secrets: return {} - # 2: exclude custom_provider secrets + # 2: include only standard provider keys # value of secrets: [{data: {kind: ..., provider: {key: ...}}}] secrets = [ secret.model_dump(include={"data"}) for secret in secrets - if secret.kind != "custom_provider" + if secret.kind == "provider_key" ] # 3: convert secrets to readable format diff --git a/api/oss/src/core/testcases/service.py b/api/oss/src/core/testcases/service.py index 3afd298dd4..1b1db82f20 100644 --- a/api/oss/src/core/testcases/service.py +++ b/api/oss/src/core/testcases/service.py @@ -58,6 +58,31 @@ async def fetch_testcases( project_id: UUID, # testcase_ids: Optional[List[UUID]] = None, + ) -> List[Testcase]: + blobs = await self.testcases_dao.fetch_blobs( + project_id=project_id, + # + blob_ids=testcase_ids or [], + ) + + if not blobs: + return [] + + _testcases = [ + Testcase( + **blob.model_dump(mode="json"), + ) + for blob in blobs + ] + + return _testcases + + async def query_testcases( + self, + *, + project_id: UUID, + # + testcase_ids: Optional[List[UUID]] = None, # testset_id: Optional[UUID] = None, # diff --git a/api/oss/src/core/testsets/dtos.py b/api/oss/src/core/testsets/dtos.py index a4f88381e1..5c372dd81b 100644 --- a/api/oss/src/core/testsets/dtos.py +++ b/api/oss/src/core/testsets/dtos.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Dict +from typing import Optional, List, Tuple from uuid import UUID from pydantic import BaseModel, Field @@ -171,74 +171,52 @@ class TestsetRevisionQuery(RevisionQuery): flags: Optional[TestsetFlags] = None -class TestsetRevisionCommit( - RevisionCommit, - TestsetIdAlias, - TestsetVariantIdAlias, -): - flags: Optional[TestsetFlags] = None - - data: Optional[TestsetRevisionData] = None - - def model_post_init(self, __context) -> None: - sync_alias("testset_id", "artifact_id", self) - sync_alias("testset_variant_id", "variant_id", self) - +class TestsetRevisionDeltaColumns(BaseModel): + """Column-level operations applied to ALL testcases in the revision.""" -class TestsetColumnRename(BaseModel): - """Column rename operation""" - - old_name: str - new_name: str + # Add columns: array of column names to add + add: Optional[List[str]] = None + # Remove columns: array of column names to remove + remove: Optional[List[str]] = None + # Replace columns: array of (old column name, new column name) to replace + replace: Optional[List[Tuple[str, str]]] = None -class TestsetColumnOperations(BaseModel): - """Column-level operations applied to ALL testcases in the revision""" +class TestsetRevisionDeltaRows(BaseModel): + """Row-level operations applied to testcases in the revision.""" - # Rename columns: array of {old_name, new_name} - rename: Optional[List[TestsetColumnRename]] = None - # Add columns: array of column names to add (initialized to empty string) - add: Optional[List[str]] = None - # Delete columns: array of column names to remove - delete: Optional[List[str]] = None + # Add rows: array of testcases to add + add: Optional[List[Testcase]] = None + # Remove rows: array of testcase IDs to remove + remove: Optional[List[UUID]] = None + # Replace rows: array of testcases to replace + replace: Optional[List[Testcase]] = None -class TestsetRevisionPatchOperations(BaseModel): - """Operations to apply to a testset revision""" +class TestsetRevisionDelta(BaseModel): + """Operations to apply to a testset revision.""" - # Testcases to update (existing testcases with modified data) - update: Optional[List[Testcase]] = None - # New testcases to create - create: Optional[List[Testcase]] = None - # Testcase IDs to delete - delete: Optional[List[UUID]] = None - # Column-level operations (applied to ALL testcases) - columns: Optional[TestsetColumnOperations] = None + # Row-level operations + rows: Optional[TestsetRevisionDeltaRows] = None + # Column-level operations + columns: Optional[TestsetRevisionDeltaColumns] = None -class TestsetRevisionPatch( +class TestsetRevisionCommit( + RevisionCommit, TestsetIdAlias, TestsetVariantIdAlias, + TestsetRevisionIdAlias, ): - """Patch request for updating a testset revision with delta changes""" - flags: Optional[TestsetFlags] = None - # Base revision to apply patch to (defaults to latest if not specified) - base_revision_id: Optional[UUID] = None - - # Commit message - message: Optional[str] = None - - # Revision description (for the new revision) - description: Optional[str] = None - - # Patch operations - operations: Optional[TestsetRevisionPatchOperations] = None + data: Optional[TestsetRevisionData] = None + delta: Optional[TestsetRevisionDelta] = None def model_post_init(self, __context) -> None: sync_alias("testset_id", "artifact_id", self) sync_alias("testset_variant_id", "variant_id", self) + sync_alias("testset_revision_id", "revision_id", self) class SimpleTestset(Identifier, Slug, Lifecycle, Header, Metadata): diff --git a/api/oss/src/core/testsets/service.py b/api/oss/src/core/testsets/service.py index 5c3ba2274f..847ae594c5 100644 --- a/api/oss/src/core/testsets/service.py +++ b/api/oss/src/core/testsets/service.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional, List +from typing import Dict, Optional, List, Any from uuid import UUID, uuid4 from oss.src.utils.logging import get_module_logger @@ -50,7 +50,6 @@ TestsetRevisionEdit, TestsetRevisionQuery, TestsetRevisionCommit, - TestsetRevisionPatch, ) from oss.src.apis.fastapi.testsets.utils import ( csv_file_to_json_array, @@ -779,6 +778,14 @@ async def commit_testset_revision( # include_testcases: Optional[bool] = None, ) -> Optional[TestsetRevision]: + if testset_revision_commit.delta and not testset_revision_commit.data: + return await self._commit_testset_revision_delta( + project_id=project_id, + user_id=user_id, + testset_revision_commit=testset_revision_commit, + include_testcases=include_testcases, + ) + if testset_revision_commit.data and testset_revision_commit.data.testcases: if testset_revision_commit.data.testcases: for testcase in testset_revision_commit.data.testcases: @@ -862,55 +869,40 @@ async def log_testset_revisions( return testset_revisions - async def patch_testset_revision( + async def _commit_testset_revision_delta( self, *, project_id: UUID, user_id: UUID, # - testset_revision_patch: TestsetRevisionPatch, + testset_revision_commit: TestsetRevisionCommit, + # + include_testcases: Optional[bool] = None, ) -> Optional[TestsetRevision]: - """ - Apply a patch to a testset revision. - - This method: - 1. Fetches the base revision (latest if not specified) with all testcases - 2. Loads all current testcase data - 3. Applies the patch operations to build a complete testcases list: - - update: Replace testcase data for matching IDs - - create: Add new testcases - - delete: Remove testcases by ID - 4. Calls the regular commit flow with the full testcases data - - This approach ensures consistency with the regular commit flow and - avoids any deduplication issues. - """ + """Apply delta operations to a base revision and commit as a new revision.""" # Get the base revision to patch base_revision = await self.fetch_testset_revision( project_id=project_id, - testset_ref=Reference(id=testset_revision_patch.testset_id), + testset_ref=Reference(id=testset_revision_commit.testset_id), testset_revision_ref=( - Reference(id=testset_revision_patch.base_revision_id) - if testset_revision_patch.base_revision_id + Reference(id=testset_revision_commit.revision_id) + if testset_revision_commit.revision_id else None ), ) if not base_revision: log.error( - f"Base revision not found for testset {testset_revision_patch.testset_id}" + f"Base revision not found for testset {testset_revision_commit.testset_id}" ) return None - # Load all current testcases from the base revision + # Load all current testcases from the base revision, preserving order. current_testcases: List[Testcase] = [] - if base_revision.data and base_revision.data.testcase_ids: - current_testcases = await self.testcases_service.fetch_testcases( - project_id=project_id, - testcase_ids=base_revision.data.testcase_ids, - ) + if base_revision.data and base_revision.data.testcases: + current_testcases = list(base_revision.data.testcases) - operations = testset_revision_patch.operations + operations = testset_revision_commit.delta if not operations: # No operations, just return the base revision return base_revision @@ -918,20 +910,20 @@ async def patch_testset_revision( # Apply column operations to ALL testcases first # This ensures column changes are applied even to testcases not in update list if operations.columns: + replace_map = {} + if operations.columns.replace: + replace_map = {old: new for old, new in operations.columns.replace} + remove_set = set(operations.columns.remove or []) for tc in current_testcases: if tc.data: - # Apply column renames - if operations.columns.rename: - for rename_op in operations.columns.rename: - if rename_op.old_name in tc.data: - tc.data[rename_op.new_name] = tc.data.pop( - rename_op.old_name - ) - - # Apply column deletions - if operations.columns.delete: - for col_name in operations.columns.delete: - tc.data.pop(col_name, None) + # Preserve column order for replace/remove. + updated_data: Dict[str, Any] = {} + for key, value in tc.data.items(): + if key in remove_set: + continue + new_key = replace_map.get(key, key) + updated_data[new_key] = value + tc.data = updated_data # Apply column additions (initialize to empty string) if operations.columns.add: @@ -939,88 +931,69 @@ async def patch_testset_revision( if col_name not in tc.data: tc.data[col_name] = "" - # Build a map of current testcases by ID for easy lookup - testcases_by_id: Dict[UUID, Testcase] = { - tc.id: tc for tc in current_testcases if tc.id - } - - # Track IDs to delete - ids_to_delete: set[UUID] = set() - if operations.delete: - ids_to_delete.update(operations.delete) - - # Apply update operations - replace data for matching IDs - if operations.update: - for updated_tc in operations.update: - if updated_tc.id and updated_tc.id in testcases_by_id: - # Create a new Testcase with updated data - testcases_by_id[updated_tc.id] = Testcase( - id=None, # Will be assigned by create_testcases - set_id=testset_revision_patch.testset_id, - data=updated_tc.data, - ) - # Mark old ID for removal (we'll create a new testcase) - ids_to_delete.add(updated_tc.id) + # Build final testcases list, preserving base order. + remove_set: set[UUID] = ( + set(operations.rows.remove or []) if operations.rows else set() + ) + replace_map: Dict[UUID, Testcase] = {} + if operations.rows and operations.rows.replace: + replace_map = { + tc.id: tc for tc in operations.rows.replace if tc.id is not None + } - # Build final testcases list: - # 1. Keep existing testcases that weren't deleted or updated - # 2. Add updated testcases (with new data) - # 3. Add new testcases from create operations + # 1) Replace in place, 2) remove wherever it appears, 3) add at the end. final_testcases: List[Testcase] = [] - - # Add existing testcases that weren't deleted - for tc_id, tc in testcases_by_id.items(): - if tc_id not in ids_to_delete: - # Keep existing testcase data - final_testcases.append( - Testcase( - id=None, # Will be assigned by create_testcases - set_id=testset_revision_patch.testset_id, - data=tc.data, - ) + for tc in current_testcases: + if not tc.id: + continue + updated_tc = replace_map.get(tc.id) + if updated_tc is not None: + candidate = Testcase( + id=None, + set_id=testset_revision_commit.testset_id, + data=updated_tc.data, ) + else: + candidate = Testcase( + id=None, + set_id=testset_revision_commit.testset_id, + data=tc.data, + ) + if tc.id in remove_set: + continue + final_testcases.append(candidate) - # Add updated testcases - if operations.update: - for updated_tc in operations.update: - if updated_tc.id: - final_testcases.append( - Testcase( - id=None, - set_id=testset_revision_patch.testset_id, - data=updated_tc.data, - ) - ) - - # Add new testcases from create operations - if operations.create: - for new_tc in operations.create: + # 3) Add at the end. + if operations.rows and operations.rows.add: + for new_tc in operations.rows.add: final_testcases.append( Testcase( id=None, - set_id=testset_revision_patch.testset_id, + set_id=testset_revision_commit.testset_id, data=new_tc.data, ) ) # Get variant_id from base revision (required for commit) variant_id = ( - testset_revision_patch.testset_variant_id + testset_revision_commit.testset_variant_id or base_revision.testset_variant_id ) - # Generate a unique slug for the new revision - revision_slug = uuid4().hex[-12:] + # Generate a unique slug for the new revision if missing + revision_slug = testset_revision_commit.slug or uuid4().hex[-12:] # Create commit request with full testcases data # This will go through the regular commit flow testset_revision_commit = TestsetRevisionCommit( slug=revision_slug, - testset_id=testset_revision_patch.testset_id, + testset_id=testset_revision_commit.testset_id, testset_variant_id=variant_id, - message=testset_revision_patch.message or "Patched testset revision", - description=testset_revision_patch.description or base_revision.description, - flags=testset_revision_patch.flags, + message=testset_revision_commit.message or "Patched testset revision", + description=( + testset_revision_commit.description or base_revision.description + ), + flags=testset_revision_commit.flags, data=TestsetRevisionData( testcases=final_testcases, ), @@ -1031,6 +1004,7 @@ async def patch_testset_revision( project_id=project_id, user_id=user_id, testset_revision_commit=testset_revision_commit, + include_testcases=include_testcases, ) ## ------------------------------------------------------------------------- diff --git a/api/oss/src/core/users/__init__.py b/api/oss/src/core/users/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/core/users/types.py b/api/oss/src/core/users/types.py new file mode 100644 index 0000000000..ecebaa0b60 --- /dev/null +++ b/api/oss/src/core/users/types.py @@ -0,0 +1,45 @@ +from datetime import datetime +from uuid import UUID +from pydantic import BaseModel, field_validator +from typing import Optional + +from oss.src.core.auth.types import MethodKind + + +class UserIdentity(BaseModel): + id: UUID + user_id: UUID + method: str + subject: str + domain: Optional[str] = None + created_at: datetime + updated_at: Optional[datetime] = None + deleted_at: Optional[datetime] = None + created_by_id: Optional[UUID] = None + updated_by_id: Optional[UUID] = None + deleted_by_id: Optional[UUID] = None + + class Config: + from_attributes = True + + @field_validator("method") + @classmethod + def validate_method(cls, value: str) -> str: + if not MethodKind.is_valid_pattern(value): + raise ValueError(f"Invalid auth method: {value}") + return value + + +class UserIdentityCreate(BaseModel): + user_id: UUID + method: str + subject: str + domain: Optional[str] = None + created_by_id: Optional[UUID] = None + + @field_validator("method") + @classmethod + def validate_method(cls, value: str) -> str: + if not MethodKind.is_valid_pattern(value): + raise ValueError(f"Invalid auth method: {value}") + return value diff --git a/api/oss/src/crons/queries.sh b/api/oss/src/crons/queries.sh index b9e8c7a6e1..e742275b03 100644 --- a/api/oss/src/crons/queries.sh +++ b/api/oss/src/crons/queries.sh @@ -1,7 +1,8 @@ #!/bin/sh set -eu -AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-) +AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2- || true) +AGENTA_AUTH_KEY="${AGENTA_AUTH_KEY:-replace-me}" TRIGGER_INTERVAL=$(awk 'NR==2 {split($1, a, "/"); print (a[2] ? a[2] : 1)}' /etc/cron.d/queries-cron) NOW_UTC=$(date -u "+%Y-%m-%dT%H:%M:00Z") MINUTE=$(date -u "+%M" | sed 's/^0*//') @@ -21,4 +22,4 @@ curl \ -H "Authorization: Access ${AGENTA_AUTH_KEY}" \ "http://api:8000/admin/evaluations/runs/refresh?trigger_interval=${TRIGGER_INTERVAL}&trigger_datetime=${TRIGGER_DATETIME}" || echo "❌ CURL failed" -echo "[$(date)] queries.sh done" >> /proc/1/fd/1 \ No newline at end of file +echo "[$(date)] queries.sh done" >> /proc/1/fd/1 diff --git a/api/oss/src/dbs/postgres/blobs/dao.py b/api/oss/src/dbs/postgres/blobs/dao.py index 85a8616e87..0929ae517c 100644 --- a/api/oss/src/dbs/postgres/blobs/dao.py +++ b/api/oss/src/dbs/postgres/blobs/dao.py @@ -452,7 +452,7 @@ async def query_blobs( stmt = apply_windowing( stmt=stmt, DBE=self.BlobDBE, - attribute="id", # UUID7 - use id for cursor-based pagination + attribute="created_at", # Blob IDs are content-hashed (UUID5), use timestamp for ordering order="ascending", # data-style windowing=windowing, ) @@ -464,6 +464,24 @@ async def query_blobs( if not blob_dbes: return [] + # If blob_ids were provided, preserve their order in the result + if blob_query.blob_ids: + _blobs = { + blob_dbe.id: map_dbe_to_dto( # type: ignore + DTO=Blob, + dbe=blob_dbe, # type: ignore + ) + for blob_dbe in blob_dbes + } + + blobs = [ + _blobs[blob_id] + for blob_id in blob_query.blob_ids + if blob_id in _blobs + ] + + return blobs + blobs = [ map_dbe_to_dto( DTO=Blob, diff --git a/api/oss/src/dbs/postgres/organizations/__init__.py b/api/oss/src/dbs/postgres/organizations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/dbs/postgres/secrets/dao.py b/api/oss/src/dbs/postgres/secrets/dao.py index 51811500eb..62981cebb0 100644 --- a/api/oss/src/dbs/postgres/secrets/dao.py +++ b/api/oss/src/dbs/postgres/secrets/dao.py @@ -21,13 +21,32 @@ class SecretsDAO(SecretsDAOInterface): def __init__(self): pass + @staticmethod + def _validate_scope(project_id: UUID | None, organization_id: UUID | None) -> None: + if bool(project_id) == bool(organization_id): + raise ValueError( + "Exactly one of project_id or organization_id must be provided." + ) + + @staticmethod + def _scope_filter(project_id: UUID | None, organization_id: UUID | None) -> dict: + SecretsDAO._validate_scope(project_id, organization_id) + return ( + {"project_id": project_id} + if project_id + else {"organization_id": organization_id} + ) + async def create( self, - project_id: UUID, + project_id: UUID | None, + organization_id: UUID | None, create_secret_dto: CreateSecretDTO, ): + self._validate_scope(project_id, organization_id) secrets_dbe = map_secrets_dto_to_dbe( project_id=project_id, + organization_id=organization_id, secret_dto=create_secret_dto, ) async with engine.core_session() as session: @@ -39,13 +58,15 @@ async def create( async def get( self, - project_id: UUID, secret_id: UUID, + project_id: UUID | None, + organization_id: UUID | None, ): async with engine.core_session() as session: + scope_filter = self._scope_filter(project_id, organization_id) stmt = select(SecretsDBE).filter_by( id=secret_id, - project_id=project_id, + **scope_filter, ) result = await session.execute(stmt) # type: ignore secrets_dbe = result.scalar() @@ -56,9 +77,10 @@ async def get( secrets_dto = map_secrets_dbe_to_dto(secrets_dbe=secrets_dbe) return secrets_dto - async def list(self, project_id: UUID): + async def list(self, project_id: UUID | None, organization_id: UUID | None): async with engine.core_session() as session: - stmt = select(SecretsDBE).filter_by(project_id=project_id) + scope_filter = self._scope_filter(project_id, organization_id) + stmt = select(SecretsDBE).filter_by(**scope_filter) results = await session.execute(stmt) # type: ignore secrets_dbes = results.scalars().all() @@ -70,14 +92,16 @@ async def list(self, project_id: UUID): async def update( self, - project_id: UUID, secret_id: UUID, update_secret_dto: UpdateSecretDTO, + project_id: UUID | None, + organization_id: UUID | None, ): async with engine.core_session() as session: + scope_filter = self._scope_filter(project_id, organization_id) stmt = select(SecretsDBE).filter_by( id=secret_id, - project_id=project_id, + **scope_filter, ) result = await session.execute(stmt) secrets_dbe = result.scalar() @@ -97,13 +121,15 @@ async def update( async def delete( self, - project_id: UUID, secret_id: UUID, + project_id: UUID | None, + organization_id: UUID | None, ): async with engine.core_session() as session: + scope_filter = self._scope_filter(project_id, organization_id) stmt = select(SecretsDBE).filter_by( id=secret_id, - project_id=project_id, + **scope_filter, ) result = await session.execute(stmt) # type: ignore vault_secret_dbe = result.scalar() diff --git a/api/oss/src/dbs/postgres/secrets/dbas.py b/api/oss/src/dbs/postgres/secrets/dbas.py index ede7cac05f..6f01fff7c5 100644 --- a/api/oss/src/dbs/postgres/secrets/dbas.py +++ b/api/oss/src/dbs/postgres/secrets/dbas.py @@ -3,14 +3,13 @@ from oss.src.core.secrets.enums import SecretKind from oss.src.dbs.postgres.shared.dbas import ( - ProjectScopeDBA, LegacyLifecycleDBA, HeaderDBA, ) from oss.src.dbs.postgres.secrets.custom_fields import PGPString -class SecretsDBA(ProjectScopeDBA, LegacyLifecycleDBA, HeaderDBA): +class SecretsDBA(LegacyLifecycleDBA, HeaderDBA): __abstract__ = True id = Column( @@ -22,3 +21,11 @@ class SecretsDBA(ProjectScopeDBA, LegacyLifecycleDBA, HeaderDBA): ) kind = Column(SQLEnum(SecretKind, name="secretkind_enum")) # type: ignore data = Column(PGPString()) # type: ignore + project_id = Column( + UUID(as_uuid=True), + nullable=True, + ) + organization_id = Column( + UUID(as_uuid=True), + nullable=True, + ) diff --git a/api/oss/src/dbs/postgres/secrets/mappings.py b/api/oss/src/dbs/postgres/secrets/mappings.py index e79aeadd87..14397c3194 100644 --- a/api/oss/src/dbs/postgres/secrets/mappings.py +++ b/api/oss/src/dbs/postgres/secrets/mappings.py @@ -13,12 +13,16 @@ def map_secrets_dto_to_dbe( - *, project_id: uuid.UUID, secret_dto: CreateSecretDTO + *, + project_id: uuid.UUID | None, + organization_id: uuid.UUID | None, + secret_dto: CreateSecretDTO, ) -> SecretsDBE: vault_secret_dbe = SecretsDBE( name=secret_dto.header.name if secret_dto.header else None, description=(secret_dto.header.description if secret_dto.header else None), project_id=project_id, + organization_id=organization_id, kind=secret_dto.secret.kind.value, data=json.dumps(secret_dto.secret.data.model_dump(exclude_none=True)), ) diff --git a/api/oss/src/dbs/postgres/shared/dbas.py b/api/oss/src/dbs/postgres/shared/dbas.py index c6ada32dff..5b86d86c66 100644 --- a/api/oss/src/dbs/postgres/shared/dbas.py +++ b/api/oss/src/dbs/postgres/shared/dbas.py @@ -37,7 +37,7 @@ class LegacyLifecycleDBA: created_at = Column( TIMESTAMP(timezone=True), server_default=func.current_timestamp(), - nullable=False, + nullable=True, ) updated_at = Column( TIMESTAMP(timezone=True), diff --git a/api/oss/src/dbs/postgres/users/__init__.py b/api/oss/src/dbs/postgres/users/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/oss/src/dbs/postgres/users/dao.py b/api/oss/src/dbs/postgres/users/dao.py new file mode 100644 index 0000000000..2fbe1c71b9 --- /dev/null +++ b/api/oss/src/dbs/postgres/users/dao.py @@ -0,0 +1,67 @@ +from uuid import UUID +from typing import Optional, List +from sqlalchemy import select +from sqlalchemy.exc import IntegrityError + +from oss.src.dbs.postgres.shared.engine import engine +from oss.src.dbs.postgres.users.dbes import UserIdentityDBE +from oss.src.dbs.postgres.users.mappings import ( + map_identity_dbe_to_dto, + map_create_dto_to_dbe, +) +from oss.src.core.users.types import UserIdentity, UserIdentityCreate + + +class IdentitiesDAO: + async def create(self, dto: UserIdentityCreate) -> UserIdentity: + identity_dbe = map_create_dto_to_dbe(dto) + + async with engine.core_session() as session: + try: + session.add(identity_dbe) + await session.commit() + await session.refresh(identity_dbe) + except IntegrityError: + await session.rollback() + stmt = select(UserIdentityDBE).filter_by( + method=dto.method, + subject=dto.subject, + ) + result = await session.execute(stmt) + identity_dbe = result.scalar() + if identity_dbe is None: + raise + + return map_identity_dbe_to_dto(identity_dbe) + + async def get_by_method_subject( + self, method: str, subject: str + ) -> Optional[UserIdentity]: + async with engine.core_session() as session: + stmt = select(UserIdentityDBE).filter_by( + method=method, + subject=subject, + ) + result = await session.execute(stmt) + identity_dbe = result.scalar() + + if identity_dbe is None: + return None + + return map_identity_dbe_to_dto(identity_dbe) + + async def list_by_user(self, user_id: UUID) -> List[UserIdentity]: + async with engine.core_session() as session: + stmt = select(UserIdentityDBE).filter_by(user_id=user_id) + result = await session.execute(stmt) + identity_dbes = result.scalars().all() + + return [map_identity_dbe_to_dto(dbe) for dbe in identity_dbes] + + async def list_by_domain(self, domain: str) -> List[UserIdentity]: + async with engine.core_session() as session: + stmt = select(UserIdentityDBE).filter_by(domain=domain) + result = await session.execute(stmt) + identity_dbes = result.scalars().all() + + return [map_identity_dbe_to_dto(dbe) for dbe in identity_dbes] diff --git a/api/oss/src/dbs/postgres/users/dbas.py b/api/oss/src/dbs/postgres/users/dbas.py new file mode 100644 index 0000000000..9726fd6e40 --- /dev/null +++ b/api/oss/src/dbs/postgres/users/dbas.py @@ -0,0 +1,32 @@ +import uuid_utils.compat as uuid +from sqlalchemy import Column, String, UUID + +from oss.src.dbs.postgres.shared.dbas import LifecycleDBA + + +class UserIdentityDBA(LifecycleDBA): + __abstract__ = True + + id = Column( + UUID(as_uuid=True), + primary_key=True, + default=uuid.uuid7, + unique=True, + nullable=False, + ) + user_id = Column( + UUID(as_uuid=True), + nullable=False, + ) + method = Column( + String, + nullable=False, + ) + subject = Column( + String, + nullable=False, + ) + domain = Column( + String, + nullable=True, + ) diff --git a/api/oss/src/dbs/postgres/users/dbes.py b/api/oss/src/dbs/postgres/users/dbes.py new file mode 100644 index 0000000000..40f2b7c4cb --- /dev/null +++ b/api/oss/src/dbs/postgres/users/dbes.py @@ -0,0 +1,34 @@ +from sqlalchemy import ( + ForeignKeyConstraint, + UniqueConstraint, + Index, +) + +from oss.src.dbs.postgres.shared.base import Base +from oss.src.dbs.postgres.users.dbas import UserIdentityDBA + + +class UserIdentityDBE(Base, UserIdentityDBA): + __tablename__ = "user_identities" + + __table_args__ = ( + ForeignKeyConstraint( + ["user_id"], + ["users.id"], + ondelete="CASCADE", + ), + UniqueConstraint( + "method", + "subject", + name="uq_user_identities_method_subject", + ), + Index( + "ix_user_identities_user_method", + "user_id", + "method", + ), + Index( + "ix_user_identities_domain", + "domain", + ), + ) diff --git a/api/oss/src/dbs/postgres/users/mappings.py b/api/oss/src/dbs/postgres/users/mappings.py new file mode 100644 index 0000000000..f11d6aaf1e --- /dev/null +++ b/api/oss/src/dbs/postgres/users/mappings.py @@ -0,0 +1,23 @@ +from oss.src.core.users.types import UserIdentity, UserIdentityCreate +from oss.src.dbs.postgres.users.dbes import UserIdentityDBE + + +def map_identity_dbe_to_dto(identity_dbe: UserIdentityDBE) -> UserIdentity: + return UserIdentity( + id=identity_dbe.id, + user_id=identity_dbe.user_id, + method=identity_dbe.method, + subject=identity_dbe.subject, + domain=identity_dbe.domain, + created_at=identity_dbe.created_at, + updated_at=identity_dbe.updated_at, + ) + + +def map_create_dto_to_dbe(dto: UserIdentityCreate) -> UserIdentityDBE: + return UserIdentityDBE( + user_id=dto.user_id, + method=dto.method, + subject=dto.subject, + domain=dto.domain, + ) diff --git a/api/oss/src/models/api/evaluation_model.py b/api/oss/src/models/api/evaluation_model.py index 82e9f35cd1..dc006e11d5 100644 --- a/api/oss/src/models/api/evaluation_model.py +++ b/api/oss/src/models/api/evaluation_model.py @@ -20,6 +20,7 @@ class LegacyEvaluator(BaseModel): oss: Optional[bool] = False requires_llm_api_keys: Optional[bool] = False tags: List[str] + archived: Optional[bool] = False class EvaluatorConfig(BaseModel): diff --git a/api/oss/src/models/api/organization_models.py b/api/oss/src/models/api/organization_models.py index 8809330d5f..02e8232e6a 100644 --- a/api/oss/src/models/api/organization_models.py +++ b/api/oss/src/models/api/organization_models.py @@ -1,4 +1,5 @@ from typing import Optional, List, Dict, Any +from uuid import UUID from pydantic import BaseModel, Field @@ -7,10 +8,19 @@ class Organization(BaseModel): id: str - name: str - owner: str - description: str - type: Optional[str] = None + slug: Optional[str] = None + # + name: Optional[str] = None + description: Optional[str] = None + # + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + # + owner_id: UUID + # + members: List[str] = Field(default_factory=list) + invitations: List = Field(default_factory=list) workspaces: List[str] = Field(default_factory=list) diff --git a/api/oss/src/models/db_models.py b/api/oss/src/models/db_models.py index aafb1f9ccb..02fe011341 100644 --- a/api/oss/src/models/db_models.py +++ b/api/oss/src/models/db_models.py @@ -32,18 +32,54 @@ class OrganizationDB(Base): unique=True, nullable=False, ) - name = Column(String, default="agenta") - description = Column( + slug = Column( String, - default="The open-source LLM developer platform for cross-functional teams.", + unique=True, + nullable=True, ) - type = Column(String, nullable=True) - owner = Column(String, nullable=True) # TODO: deprecate and remove + # + name = Column(String, nullable=True) + description = Column(String, nullable=True) + # + flags = Column(JSONB, nullable=True) + tags = Column(JSONB, nullable=True) + meta = Column(JSONB, nullable=True) + # + owner_id = Column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="RESTRICT"), + nullable=False, + ) + # created_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + DateTime(timezone=True), + default=lambda: datetime.now(timezone.utc), + nullable=False, ) + # updated_at = Column( - DateTime(timezone=True), default=lambda: datetime.now(timezone.utc) + DateTime(timezone=True), + default=lambda: datetime.now(timezone.utc), + nullable=True, + ) + deleted_at = Column( + DateTime(timezone=True), + nullable=True, + ) + created_by_id = Column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="RESTRICT"), + nullable=False, + ) + updated_by_id = Column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, + ) + deleted_by_id = Column( + UUID(as_uuid=True), + ForeignKey("users.id", ondelete="SET NULL"), + nullable=True, ) diff --git a/api/oss/src/models/shared_models.py b/api/oss/src/models/shared_models.py index c2849bc6f5..149a69509e 100644 --- a/api/oss/src/models/shared_models.py +++ b/api/oss/src/models/shared_models.py @@ -3,6 +3,16 @@ from typing import Any, Dict, Optional +class OrganizationFlags(BaseModel): + is_demo: bool = False + is_personal: bool = False + + +class OrganizationQueryFlags(BaseModel): + is_demo: Optional[bool] = None + is_personal: Optional[bool] = None + + class ConfigDB(BaseModel): config_name: str parameters: Dict[str, Any] = Field(default_factory=dict) diff --git a/api/oss/src/resources/evaluators/evaluators.py b/api/oss/src/resources/evaluators/evaluators.py index aaac1a9ef7..392b23be45 100644 --- a/api/oss/src/resources/evaluators/evaluators.py +++ b/api/oss/src/resources/evaluators/evaluators.py @@ -375,6 +375,7 @@ "name": "JSON Field Match", "key": "field_match_test", "direct_use": False, + "archived": True, # Deprecated - use json_multi_field_match instead "settings_template": { "json_field": { "label": "JSON Field", @@ -398,6 +399,33 @@ "oss": True, "tags": ["classifiers"], }, + { + "name": "JSON Multi-Field Match", + "key": "json_multi_field_match", + "direct_use": False, + "settings_template": { + "fields": { + "label": "Fields to Compare", + "type": "fields_tags_editor", # Custom type - tag-based add/remove editor + "required": True, + "description": "Add fields to compare using dot notation for nested paths (e.g., user.name)", + }, + "correct_answer_key": { + "label": "Expected Answer Column", + "default": "correct_answer", + "type": "string", + "required": True, + "description": "Column name containing the expected JSON object", + "ground_truth_key": True, + "advanced": True, # Hidden in advanced section + }, + }, + "description": "Compares configured fields in expected JSON against LLM output. Each field becomes a separate metric (0 or 1), with an aggregate_score showing the percentage of matching fields. Useful for entity extraction validation.", + "requires_testcase": "always", + "requires_trace": "always", + "oss": True, + "tags": ["classifiers"], + }, { "name": "JSON Diff Match", "key": "auto_json_diff", diff --git a/api/oss/src/routers/admin_router.py b/api/oss/src/routers/admin_router.py index 42c2d8ef36..8a2134b79b 100644 --- a/api/oss/src/routers/admin_router.py +++ b/api/oss/src/routers/admin_router.py @@ -435,9 +435,12 @@ async def create_account( user = LegacyUserResponse(id=str(user_db.id)) create_org_payload = CreateOrganization( - name=account.scope.name, - owner=str(user.id), - type="default", + name="Organization", + # + is_demo=False, + is_personal=False, + # + owner_id=UUID(str(user_db.id)), ) organization_db, workspace_db, project_db = await legacy_create_organization( diff --git a/api/oss/src/routers/evaluation_router.py b/api/oss/src/routers/evaluation_router.py index 65cdc87c0a..27b1af4e31 100644 --- a/api/oss/src/routers/evaluation_router.py +++ b/api/oss/src/routers/evaluation_router.py @@ -106,8 +106,8 @@ ) # Redis client and TracingWorker for publishing spans to Redis Streams -if env.REDIS_URI_DURABLE: - redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False) +if env.redis.uri_durable: + redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False) tracing_worker = TracingWorker( service=tracing_service, redis_client=redis_client, diff --git a/api/oss/src/routers/organization_router.py b/api/oss/src/routers/organization_router.py index b9df822d28..1cf27d9343 100644 --- a/api/oss/src/routers/organization_router.py +++ b/api/oss/src/routers/organization_router.py @@ -72,10 +72,17 @@ async def list_organizations( response = [ Organization( id=str(organization_db.id), + slug=str(organization_db.slug), + # name=str(organization_db.name), - owner=organization_db.owner, description=str(organization_db.description), - type=organization_db.type, # type: ignore + # + flags=organization_db.flags, + tags=organization_db.tags, + meta=organization_db.meta, + # + owner_id=organization_db.owner_id, + # workspaces=[str(active_workspace.id)] if not is_ee() else [], ).model_dump(exclude_unset=True) for organization_db in organizations_db @@ -151,10 +158,17 @@ async def fetch_organization_details( return OrganizationDetails( id=str(organization_db.id), + slug=str(organization_db.slug), + # name=str(organization_db.name), - owner=organization_db.owner, description=str(organization_db.description), - type=organization_db.type, # type: ignore + # + flags=organization_db.flags, + tags=organization_db.tags, + meta=organization_db.meta, + # + owner_id=organization_db.owner_id, + # default_workspace={ "id": str(active_workspace.id), "name": str(active_workspace.name), @@ -192,60 +206,71 @@ async def invite_user_to_organization( HTTPException: If there is an error assigning the role to the user. """ - if len(payload) != 1: - return JSONResponse( - status_code=400, - content={"detail": "Only one user can be invited at a time."}, - ) - - if is_ee(): - user_org_workspace_data = await get_user_org_and_workspace_id( - request.state.user_id - ) - project = await db_manager_ee.get_project_by_workspace(workspace_id) - has_permission = await check_rbac_permission( - user_org_workspace_data=user_org_workspace_data, - project_id=str(project.id), - role=WorkspaceRole.WORKSPACE_ADMIN, - ) - if not has_permission: + try: + if len(payload) != 1: return JSONResponse( - status_code=403, - content={ - "detail": "You do not have permission to perform this action. Please contact your Organization Owner" - }, + status_code=400, + content={"detail": "Only one user can be invited at a time."}, ) - owner = await db_manager.get_organization_owner(organization_id) - owner_domain = owner.email.split("@")[-1].lower() if owner else "" - user_domain = payload[0].email.split("@")[-1].lower() - skip_meter = owner_domain != "agenta.ai" and user_domain == "agenta.ai" - - if not skip_meter: - check, _, _ = await check_entitlements( - organization_id=request.state.organization_id, - key=Gauge.USERS, - delta=1, + if is_ee(): + user_org_workspace_data = await get_user_org_and_workspace_id( + request.state.user_id ) + project = await db_manager_ee.get_project_by_workspace(workspace_id) + has_permission = await check_rbac_permission( + user_org_workspace_data=user_org_workspace_data, + project_id=str(project.id), + role=WorkspaceRole.WORKSPACE_ADMIN, + ) + if not has_permission: + return JSONResponse( + status_code=403, + content={ + "detail": "You do not have permission to perform this action. Please contact your Organization Owner" + }, + ) + + owner = await db_manager.get_organization_owner(organization_id) + owner_domain = owner.email.split("@")[-1].lower() if owner else "" + user_domain = payload[0].email.split("@")[-1].lower() + skip_meter = owner_domain != "agenta.ai" and user_domain == "agenta.ai" + + if not skip_meter: + check, _, _ = await check_entitlements( + organization_id=request.state.organization_id, + key=Gauge.USERS, + delta=1, + ) + + if not check: + return NOT_ENTITLED_RESPONSE(Tracker.GAUGES) + + invite_user = await workspace_manager.invite_user_to_workspace( + payload=payload, + organization_id=organization_id, + project_id=str(project.id), + workspace_id=workspace_id, + user_uid=request.state.user_id, + ) + return invite_user - if not check: - return NOT_ENTITLED_RESPONSE(Tracker.GAUGES) - - invite_user = await workspace_manager.invite_user_to_workspace( - payload=payload, + invitation_response = await organization_service.invite_user_to_organization( + payload=payload[0], + project_id=request.state.project_id, + user_id=request.state.user_id, + ) + return invitation_response + except Exception: + log.error( + "Invite user failed", organization_id=organization_id, - project_id=str(project.id), workspace_id=workspace_id, - user_uid=request.state.user_id, + project_id=getattr(request.state, "project_id", None), + user_id=getattr(request.state, "user_id", None), + exc_info=True, ) - return invite_user - - invitation_response = await organization_service.invite_user_to_organization( - payload=payload[0], - project_id=request.state.project_id, - user_id=request.state.user_id, - ) - return invitation_response + raise @router.post( diff --git a/api/oss/src/routers/projects_router.py b/api/oss/src/routers/projects_router.py index 8a9c0b29de..4bf76680be 100644 --- a/api/oss/src/routers/projects_router.py +++ b/api/oss/src/routers/projects_router.py @@ -58,7 +58,7 @@ async def _assert_org_owner(request: Request): if not organization: raise HTTPException(status_code=404, detail="Organization not found") - if str(organization.owner) != str(user_id): + if str(organization.owner_id) != str(user_id): raise HTTPException( status_code=403, detail="Only the organization owner can perform this action", @@ -69,7 +69,7 @@ async def _assert_org_owner(request: Request): def _get_oss_user_role(organization, user_id: str) -> str: """Owner vs editor logic used across OSS endpoints.""" - return "owner" if str(organization.owner) == str(user_id) else "editor" + return "owner" if str(organization.owner_id) == str(user_id) else "editor" async def _get_ee_membership_for_project(user_id, project_id): diff --git a/api/oss/src/routers/user_profile.py b/api/oss/src/routers/user_profile.py index 0b082467b6..2d8ceda662 100644 --- a/api/oss/src/routers/user_profile.py +++ b/api/oss/src/routers/user_profile.py @@ -1,12 +1,13 @@ -from fastapi import Request +from fastapi import Request, HTTPException from fastapi.responses import JSONResponse from oss.src.utils.logging import get_module_logger -from oss.src.utils.caching import get_cache, set_cache +from oss.src.utils.caching import get_cache, set_cache, invalidate_cache from oss.src.utils.common import is_ee from oss.src.utils.common import APIRouter from oss.src.models.api.user_models import User +from oss.src.models.api.user_models import UserUpdate from oss.src.services import db_manager, user_service @@ -63,6 +64,33 @@ async def user_profile(request: Request): return user +@router.put("/username", operation_id="update_user_username") +async def update_user_username(request: Request, payload: UserUpdate): + username = (payload.username or "").strip() + if not username: + raise HTTPException(status_code=400, detail="Username is required.") + + user = await db_manager.update_user_username( + user_id=request.state.user_id, + username=username, + ) + + await invalidate_cache( + project_id=request.state.project_id, + user_id=request.state.user_id, + namespace="user_profile", + ) + + return User( + id=str(user.id), + uid=str(user.uid), + email=str(user.email), + username=str(user.username), + created_at=str(user.created_at), + updated_at=str(user.updated_at), + ) + + @router.post("/reset-password", operation_id="reset_user_password") async def reset_user_password(request: Request, user_id: str): if is_ee(): diff --git a/api/oss/src/services/admin_manager.py b/api/oss/src/services/admin_manager.py index 0541bc2c1a..750e47fd0b 100644 --- a/api/oss/src/services/admin_manager.py +++ b/api/oss/src/services/admin_manager.py @@ -30,9 +30,12 @@ class CreateOrganization(BaseModel): name: str - owner: str description: Optional[str] = None - type: Optional[str] = None + # + is_demo: bool = False + is_personal: bool = False + # + owner_id: UUID class CreateWorkspace(BaseModel): @@ -73,21 +76,24 @@ class UserRequest(BaseModel): class OrganizationRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None + # + is_personal: bool class WorkspaceRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None + # is_default: bool # organization_ref: Reference class ProjectRequest(BaseModel): - name: str - description: str + name: Optional[str] = None + description: Optional[str] = None is_default: bool # workspace_ref: Reference @@ -159,8 +165,15 @@ async def legacy_create_organization( ) -> Union[OrganizationDB, WorkspaceDB]: async with engine.core_session() as session: create_org_data = payload.model_dump(exclude_unset=True) - if "owner" not in create_org_data: - create_org_data["owner"] = str(user.id) + + create_org_data["flags"] = { + "is_demo": payload.is_demo, + "is_personal": payload.is_personal, + } + + # Set required audit fields + create_org_data["owner_id"] = user.id + create_org_data["created_by_id"] = user.id # create organization organization_db = OrganizationDB(**create_org_data) @@ -172,14 +185,7 @@ async def legacy_create_organization( # construct workspace payload workspace_payload = CreateWorkspace( name=payload.name, - type=payload.type if payload.type else "", - description=( - "Default Workspace" - if payload.type == "default" - else payload.description - if payload.description - else "" - ), + type="default", ) # create workspace @@ -210,7 +216,7 @@ async def legacy_create_workspace( await session.refresh(workspace, attribute_names=["organization"]) project_db = await legacy_create_project( - project_name="Default Project", + project_name="Default", organization_id=str(organization.id), workspace_id=str(workspace.id), session=session, @@ -275,13 +281,13 @@ async def create_user( session.add(user_db) + await session.commit() + log.info( "[scopes] user created", user_id=user_db.id, ) - await session.commit() - response = Reference(id=user_db.id) return response @@ -289,27 +295,26 @@ async def create_user( async def create_organization( request: OrganizationRequest, + created_by_id: uuid.UUID, ) -> Reference: async with engine.core_session() as session: organization_db = OrganizationDB( - # id=uuid7() # use default - # name=request.name, description=request.description, - # - owner="", # move 'owner' from here to membership 'role' - # type=... # remove 'type' + flags={"is_demo": False, "is_personal": False}, + owner_id=created_by_id, + created_by_id=created_by_id, ) session.add(organization_db) + await session.commit() + log.info( "[scopes] organization created", organization_id=organization_db.id, ) - await session.commit() - response = Reference(id=organization_db.id) return response @@ -331,14 +336,14 @@ async def create_workspace( session.add(workspace_db) + await session.commit() + log.info( "[scopes] workspace created", organization_id=workspace_db.organization_id, workspace_id=workspace_db.id, ) - await session.commit() - response = Reference(id=workspace_db.id) return response @@ -361,6 +366,8 @@ async def create_project( session.add(project_db) + await session.commit() + log.info( "[scopes] project created", organization_id=project_db.organization_id, @@ -368,8 +375,6 @@ async def create_project( project_id=project_db.id, ) - await session.commit() - response = Reference(id=project_db.id) return response diff --git a/api/oss/src/services/auth_service.py b/api/oss/src/services/auth_service.py index fc44984ae0..88ffa87e02 100644 --- a/api/oss/src/services/auth_service.py +++ b/api/oss/src/services/auth_service.py @@ -93,7 +93,17 @@ async def authentication_middleware(request: Request, call_next): """ try: - await _authenticate(request) + if "authorisationurl" in request.url.path: + log.info( + "[AUTH-ROUTE] authorisationurl path=%s root_path=%s raw_path=%s", + request.scope.get("path"), + request.scope.get("root_path"), + request.scope.get("raw_path"), + ) + + await _check_authentication_token(request) + + await _check_organization_policy(request) response = await call_next(request) @@ -134,7 +144,7 @@ async def authentication_middleware(request: Request, call_next): ) -async def _authenticate(request: Request): +async def _check_authentication_token(request: Request): try: if request.url.path.startswith(_PUBLIC_ENDPOINTS): return @@ -155,6 +165,7 @@ async def _authenticate(request: Request): access_token = auth_header[len(_ACCESS_TOKEN_PREFIX) :] return await verify_access_token( + request=request, access_token=access_token, ) @@ -233,6 +244,7 @@ async def _authenticate(request: Request): async def verify_access_token( + request: Request, access_token: str, ): try: @@ -242,6 +254,8 @@ async def verify_access_token( if access_token != _SECRET_KEY: raise UnauthorizedException() + request.state.admin = True + return except UnauthorizedException as exc: @@ -450,8 +464,6 @@ async def verify_bearer_token( organization_id = project.organization_id elif not query_project_id and query_workspace_id: - log.warning("[AUTH] Missing project_id in query params!") - workspace = await db_manager.get_workspace( workspace_id=query_workspace_id, ) @@ -474,8 +486,6 @@ async def verify_bearer_token( organization_id = workspace.organization_id else: - log.warning("[AUTH] Missing project_id in query params!") - if is_ee(): workspace_id = await db_manager_ee.get_default_workspace_id( user_id=user_id, @@ -753,3 +763,98 @@ async def sign_secret_token( except Exception as exc: # pylint: disable=bare-except raise InternalServerErrorException() from exc + + +async def _check_organization_policy(request: Request): + """ + Check organization authentication policy for EE mode. + + This is called after authentication to ensure the user's authentication method + is allowed by the organization's policy flags. + + Skips policy checks for: + - Admin endpoints (using ACCESS_TOKEN) + - Invitation-related routes to allow users to accept invitations + """ + if not is_ee(): + return + + if hasattr(request.state, "admin") and request.state.admin: + return + + # Skip policy check for invitation routes + # Users must be able to accept invitations regardless of org auth policies + invitation_paths = [ + "/invite/accept", + "/invite/resend", + "/invite", + ] + + if any(path in request.url.path for path in invitation_paths): + return + + # Skip policy checks for org-agnostic endpoints (no explicit org context). + # This prevents SSO logins from being blocked by the default org policy + # before the frontend can redirect to the intended SSO org. + if ( + request.url.path in {"/api/profile", "/api/organizations"} + or request.url.path.startswith("/api/projects") + or request.url.path.startswith("/api/organizations/") + ): + # NOTE: These endpoints are hit during initial login bootstrap before the FE + # redirects to the intended org (e.g., SSO org). Enforcing org policy here + # can incorrectly fail against the default org and log the user out. + return + + organization_id = ( + request.state.organization_id + if hasattr(request.state, "organization_id") + else None + ) + user_id = request.state.user_id if hasattr(request.state, "user_id") else None + + if not organization_id or not user_id: + return + + from uuid import UUID + from oss.src.core.auth.service import AuthService + + # Get identities from session + try: + session = await get_session(request) # type: ignore + payload = session.get_access_token_payload() if session else {} # type: ignore + session_identities = payload.get("session_identities") or [] + user_identities = payload.get("user_identities", []) + except Exception: + session_identities = [] + user_identities = [] + return # Skip policy check on session errors + + auth_service = AuthService() + policy_error = await auth_service.check_organization_access( + UUID(user_id), UUID(organization_id), session_identities + ) + + if policy_error: + # Only enforce auth policy errors; skip membership errors (route handlers handle those) + error_code = policy_error.get("error") + if error_code in { + "AUTH_UPGRADE_REQUIRED", + "AUTH_SSO_DENIED", + "AUTH_DOMAIN_DENIED", + }: + detail = { + "error": policy_error.get("error"), + "message": policy_error.get( + "message", + "Authentication method not allowed for this organization", + ), + "required_methods": policy_error.get("required_methods", []), + "session_identities": session_identities, + "user_identities": user_identities, + "sso_providers": policy_error.get("sso_providers", []), + "current_domain": policy_error.get("current_domain"), + "allowed_domains": policy_error.get("allowed_domains", []), + } + raise HTTPException(status_code=403, detail=detail) + # If NOT_A_MEMBER, skip - let route handlers deal with it diff --git a/api/oss/src/services/db_manager.py b/api/oss/src/services/db_manager.py index 9c48adf279..48be7d88d0 100644 --- a/api/oss/src/services/db_manager.py +++ b/api/oss/src/services/db_manager.py @@ -19,6 +19,7 @@ from oss.src.models import converters from oss.src.services import user_service from oss.src.utils.common import is_ee +from oss.src.utils.env import env from oss.src.dbs.postgres.shared.engine import engine from oss.src.services.json_importer_helper import get_json from oss.src.utils.helpers import get_slug_from_name_and_id @@ -1071,10 +1072,12 @@ async def check_if_user_exists_and_create_organization(user_email: str): ) if user is None and (total_users == 0): - organization_name = user_email.split("@")[0] - organization_db = await create_organization(name=organization_name) + organization_db = await create_organization( + name="Organization", + ) workspace_db = await create_workspace( - name=organization_name, organization_id=str(organization_db.id) + name="Default", + organization_id=str(organization_db.id), ) # update default project with organization and workspace ids @@ -1082,7 +1085,7 @@ async def check_if_user_exists_and_create_organization(user_email: str): values_to_update={ "organization_id": organization_db.id, "workspace_id": workspace_db.id, - "project_name": organization_name, + "project_name": "Default", } ) return organization_db @@ -1302,7 +1305,7 @@ async def _assign_user_to_organization_oss( await get_organization_owner(organization_id=organization_id) except (NoResultFound, ValueError): await update_organization( - organization_id=organization_id, values_to_update={"owner": str(user_db.id)} + organization_id=organization_id, values_to_update={"owner_id": user_db.id} ) # Get project belonging to organization @@ -1339,28 +1342,52 @@ async def get_default_workspace_id_oss() -> str: return str(workspaces[0].id) -async def create_organization(name: str): +async def create_organization( + name: str, + owner_id: Optional[uuid.UUID] = None, + created_by_id: Optional[uuid.UUID] = None, +): """Create a new organization in the database. Args: name (str): The name of the organization + owner_id (Optional[uuid.UUID]): The UUID of the organization owner + created_by_id (Optional[uuid.UUID]): The UUID of the user who created the organization Returns: OrganizationDB: instance of organization """ async with engine.core_session() as session: - organization_db = OrganizationDB(name=name) + # For bootstrap scenario, use a placeholder UUID if not provided + _owner_id = owner_id or uuid.uuid4() + _created_by_id = created_by_id or _owner_id + + organization_db = OrganizationDB( + name=name, + flags={ + "is_demo": False, + "is_personal": False, + "allow_email": env.auth.email_enabled, + "allow_social": env.auth.oidc_enabled, + "allow_sso": False, + "allow_root": False, + "domains_only": False, + "auto_join": False, + }, + owner_id=_owner_id, + created_by_id=_created_by_id, + ) session.add(organization_db) + await session.commit() + log.info( "[scopes] organization created", organization_id=organization_db.id, ) - await session.commit() - return organization_db @@ -1385,14 +1412,14 @@ async def create_workspace(name: str, organization_id: str): session.add(workspace_db) + await session.commit() + log.info( "[scopes] workspace created", organization_id=organization_id, workspace_id=workspace_db.id, ) - await session.commit() - return workspace_db @@ -1413,6 +1440,15 @@ async def update_organization(organization_id: str, values_to_update: Dict[str, if organization is None: raise Exception(f"Organization with ID {organization_id} not found") + # Validate slug immutability: once set, cannot be changed + if "slug" in values_to_update: + new_slug = values_to_update["slug"] + if organization.slug is not None and new_slug != organization.slug: + raise ValueError( + f"Organization slug cannot be changed once set. " + f"Current slug: '{organization.slug}'" + ) + for key, value in values_to_update.items(): if hasattr(organization, key): setattr(organization, key, value) @@ -1433,7 +1469,7 @@ async def create_or_update_default_project(values_to_update: Dict[str, Any]): project = result.scalar() if project is None: - project = ProjectDB(project_name="Default Project", is_default=True) + project = ProjectDB(project_name="Default", is_default=True) session.add(project) @@ -1478,6 +1514,25 @@ async def get_organization_by_id(organization_id: str) -> OrganizationDB: return organization +async def get_organization_by_slug(organization_slug: str) -> OrganizationDB: + """ + Retrieve an organization from the database by its slug. + + Args: + organization_slug (str): The slug of the organization + + Returns: + OrganizationDB: The organization object if found, None otherwise. + """ + + async with engine.core_session() as session: + result = await session.execute( + select(OrganizationDB).filter_by(slug=organization_slug) + ) + organization = result.scalar() + return organization + + async def get_organization_owner(organization_id: str): """ Retrieve the owner of an organization from the database by its ID. @@ -1497,7 +1552,39 @@ async def get_organization_owner(organization_id: str): if organization is None: raise NoResultFound(f"Organization with ID {organization_id} not found") - return await get_user_with_id(user_id=str(organization.owner)) + return await get_user_with_id(user_id=str(organization.owner_id)) + + +async def get_user_organizations(user_id: str) -> List[OrganizationDB]: + """ + Retrieve all organizations that a user is a member of. + + Args: + user_id (str): The ID of the user + + Returns: + List[OrganizationDB]: List of organizations the user belongs to + """ + # Import OrganizationMemberDB conditionally (EE only) + if is_ee(): + from ee.src.models.db_models import OrganizationMemberDB + + async with engine.core_session() as session: + # Query organizations through organization_members table + result = await session.execute( + select(OrganizationDB) + .join( + OrganizationMemberDB, + OrganizationDB.id == OrganizationMemberDB.organization_id, + ) + .filter(OrganizationMemberDB.user_id == uuid.UUID(user_id)) + ) + organizations = result.scalars().all() + return list(organizations) + else: + # OSS mode: return empty list or implement simplified logic + # In OSS, users might only have one default organization + return [] async def get_workspace(workspace_id: str) -> WorkspaceDB: @@ -1623,6 +1710,23 @@ async def get_user_with_id(user_id: str) -> UserDB: return user +async def update_user_username(user_id: str, username: str) -> UserDB: + """Update a user's username.""" + + async with engine.core_session() as session: + result = await session.execute(select(UserDB).filter_by(id=uuid.UUID(user_id))) + user = result.scalars().first() + if user is None: + log.error("Failed to get user with id for username update") + raise NoResultFound(f"User with id {user_id} not found") + + user.username = username + user.updated_at = datetime.now(timezone.utc) + await session.commit() + await session.refresh(user) + return user + + async def get_user_with_email(email: str): """ Retrieves a user from the database based on their email address. diff --git a/api/oss/src/services/email_service.py b/api/oss/src/services/email_service.py index 650de559d9..1fa88996e2 100644 --- a/api/oss/src/services/email_service.py +++ b/api/oss/src/services/email_service.py @@ -16,7 +16,10 @@ log.info("✓ SendGrid enabled") else: sg = None - log.warn("✗ SendGrid disabled") + if env.sendgrid.api_key and not env.sendgrid.from_address: + log.warn("✗ SendGrid disabled: missing sender email address") + else: + log.warn("✗ SendGrid disabled") def read_email_template(template_file_path): diff --git a/api/oss/src/services/evaluators_service.py b/api/oss/src/services/evaluators_service.py index fc676b2b48..f3bddaf66c 100644 --- a/api/oss/src/services/evaluators_service.py +++ b/api/oss/src/services/evaluators_service.py @@ -1,33 +1,30 @@ -import re import json +import re import traceback -from typing import Any, Dict, Union, List, Optional +from typing import Any, Dict, List, Optional, Union -import litellm import httpx +import litellm +from agenta.sdk.managers.secrets import SecretsManager from fastapi import HTTPException from openai import AsyncOpenAI - -# COMMENTED OUT: autoevals dependency removed -# from autoevals.ragas import Faithfulness, ContextRelevancy - -from oss.src.utils.logging import get_module_logger -from oss.src.services.security import sandbox -from oss.src.models.shared_models import Error, Result from oss.src.models.api.evaluation_model import ( EvaluatorInputInterface, - EvaluatorOutputInterface, EvaluatorMappingInputInterface, EvaluatorMappingOutputInterface, + EvaluatorOutputInterface, ) +from oss.src.models.shared_models import Error, Result + +# COMMENTED OUT: autoevals dependency removed +# from autoevals.ragas import Faithfulness, ContextRelevancy +from oss.src.utils.logging import get_module_logger from oss.src.utils.traces import ( - remove_trace_prefix, process_distributed_trace_into_trace_tree, get_field_value_from_trace_tree, ) from agenta.sdk.contexts.running import RunningContext -from agenta.sdk.managers.secrets import SecretsManager from agenta.sdk.models.workflows import ( WorkflowServiceRequest, WorkflowServiceRequestData, @@ -261,7 +258,7 @@ async def auto_exact_match( message=str(e), ), ) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -360,6 +357,139 @@ async def field_match_test(input: EvaluatorInputInterface) -> EvaluatorOutputInt return {"outputs": {"success": result}} +def get_nested_value(obj: Any, path: str) -> Any: + """ + Get value from nested object using resolve_any() with graceful None on failure. + + Supports multiple path formats: + - Dot notation: "user.address.city", "items.0.name" + - JSON Path: "$.user.address.city", "$.items[0].name" + - JSON Pointer: "/user/address/city", "/items/0/name" + + Args: + obj: The object to traverse (dict or nested structure) + path: Path expression in any supported format + + Returns: + The value at the specified path, or None if path doesn't exist or resolution fails + """ + if obj is None: + return None + + try: + return resolve_any(path, obj) + except (KeyError, IndexError, ValueError, TypeError, ImportError): + return None + + +async def auto_json_multi_field_match( + inputs: Dict[str, Any], # pylint: disable=unused-argument + output: Union[str, Dict[str, Any]], + data_point: Dict[str, Any], + app_params: Dict[str, Any], # pylint: disable=unused-argument + settings_values: Dict[str, Any], + lm_providers_keys: Dict[str, Any], # pylint: disable=unused-argument +) -> Result: + """ + Evaluator that compares multiple configured fields in expected JSON against LLM output JSON. + Each configured field becomes a separate score in the output. + + Returns a Result with: + - type="object" containing one score per configured field plus overall score + - Each field score is 1.0 (match) or 0.0 (no match) + - Overall 'score' is the average of all field scores + """ + try: + output = validate_string_output("json_multi_field_match", output) + correct_answer = get_correct_answer(data_point, settings_values) + eval_inputs = {"ground_truth": correct_answer, "prediction": output} + response = await json_multi_field_match( + input=EvaluatorInputInterface( + **{"inputs": eval_inputs, "settings": settings_values} + ) + ) + return Result(type="object", value=response["outputs"]) + except ValueError as e: + return Result( + type="error", + value=None, + error=Error( + message=str(e), + ), + ) + except Exception: + return Result( + type="error", + value=None, + error=Error( + message="Error during JSON Multi-Field Match evaluation", + stacktrace=str(traceback.format_exc()), + ), + ) + + +async def json_multi_field_match( + input: EvaluatorInputInterface, +) -> EvaluatorOutputInterface: + """ + Compare configured fields in expected JSON against LLM output JSON. + Each configured field becomes a separate score in the output. + + Args: + input: EvaluatorInputInterface with: + - inputs.prediction: JSON string from LLM output + - inputs.ground_truth: JSON string from test data column + - settings.fields: List of field paths (strings) e.g., ["name", "email", "user.address.city"] + + Returns: + EvaluatorOutputInterface with one score per configured field plus overall score + """ + fields = input.settings.get("fields", []) + + if not fields: + raise ValueError("No fields configured for comparison") + + # Parse both JSON objects + prediction = input.inputs.get("prediction", "") + ground_truth = input.inputs.get("ground_truth", "") + + try: + if isinstance(ground_truth, str): + expected = json.loads(ground_truth) + else: + expected = ground_truth + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON in ground truth: {str(e)}") + + try: + if isinstance(prediction, str): + actual = json.loads(prediction) + else: + actual = prediction + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON in prediction: {str(e)}") + + results: Dict[str, Any] = {} + matches = 0 + + for field_path in fields: + # Support nested fields with dot notation + expected_val = get_nested_value(expected, field_path) + actual_val = get_nested_value(actual, field_path) + + # Exact match comparison (v1 - always exact) + match = expected_val == actual_val + + results[field_path] = 1.0 if match else 0.0 + if match: + matches += 1 + + # Aggregate score is the percentage of matching fields + results["aggregate_score"] = matches / len(fields) if fields else 0.0 + + return {"outputs": results} + + async def auto_webhook_test( inputs: Dict[str, Any], output: Union[str, Dict[str, Any]], @@ -383,7 +513,7 @@ async def auto_webhook_test( type="error", value=None, error=Error( - message=f"[webhook evaluation] HTTP - {repr(e)}", + message=f"[webhook evaluator] HTTP - {repr(e)}", stacktrace=traceback.format_exc(), ), ) @@ -392,7 +522,7 @@ async def auto_webhook_test( type="error", value=None, error=Error( - message=f"[webhook evaluation] JSON - {repr(e)}", + message=f"[webhook evaluator] JSON - {repr(e)}", stacktrace=traceback.format_exc(), ), ) @@ -401,7 +531,7 @@ async def auto_webhook_test( type="error", value=None, error=Error( - message=f"[webhook evaluation] Exception - {repr(e)} ", + message=f"[webhook evaluator] Exception - {repr(e)} ", stacktrace=traceback.format_exc(), ), ) @@ -437,13 +567,13 @@ async def auto_custom_code_run( "prediction": output, "ground_truth": correct_answer, } - response = await custom_code_run( + response = await sdk_custom_code_run( input=EvaluatorInputInterface( **{"inputs": inputs, "settings": settings_values} ) ) return Result(type="number", value=response["outputs"]["score"]) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -454,18 +584,6 @@ async def auto_custom_code_run( ) -async def custom_code_run(input: EvaluatorInputInterface) -> EvaluatorOutputInterface: - result = sandbox.execute_code_safely( - app_params=input.inputs["app_config"], - inputs=input.inputs, - output=input.inputs["prediction"], - correct_answer=input.inputs["ground_truth"], - code=input.settings["code"], - datapoint=input.inputs["ground_truth"], - ) - return {"outputs": {"score": result}} - - async def sdk_custom_code_run( input: EvaluatorInputInterface, ) -> EvaluatorOutputInterface: @@ -483,7 +601,7 @@ async def sdk_custom_code_run( ) threshold = settings.get("threshold", 0.5) - runtime = settings.get("runtime") + runtime = settings.get("runtime", "python") workflow = sdk_auto_custom_code_run( code=str(code), @@ -504,6 +622,12 @@ async def sdk_custom_code_run( ) response = await workflow.invoke(request=request) + + # Check for error status and propagate it + if response.status and response.status.code and response.status.code >= 400: + error_message = response.status.message or "Custom code execution failed" + raise RuntimeError(error_message) + result = response.data.outputs if response.data else None if isinstance(result, dict) and "score" in result: @@ -560,7 +684,7 @@ async def auto_ai_critique( ) ) return Result(type="number", value=response["outputs"]["score"]) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -571,9 +695,7 @@ async def auto_ai_critique( ) -import json -import re -from typing import Any, Dict, Iterable, Tuple, Optional +from typing import Any, Dict, Iterable, Tuple try: import jsonpath # ✅ use module API @@ -841,7 +963,7 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac if inputs and isinstance(inputs, dict) and correct_answer_key: correct_answer = inputs[correct_answer_key] - secrets = await SecretsManager.retrieve_secrets() + secrets, _, _ = await SecretsManager.retrieve_secrets() openai_api_key = None # secrets.get("OPENAI_API_KEY") anthropic_api_key = None # secrets.get("ANTHROPIC_API_KEY") @@ -1025,7 +1147,7 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac if inputs and isinstance(inputs, dict) and correct_answer_key: correct_answer = inputs[correct_answer_key] - secrets = await SecretsManager.retrieve_secrets() + secrets, _, _ = await SecretsManager.retrieve_secrets() openai_api_key = None # secrets.get("OPENAI_API_KEY") anthropic_api_key = None # secrets.get("ANTHROPIC_API_KEY") @@ -1210,7 +1332,7 @@ async def auto_starts_with( ) ) return Result(type="bool", value=response["outputs"]["success"]) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1252,7 +1374,7 @@ async def auto_ends_with( ) result = Result(type="bool", value=response["outputs"]["success"]) return result - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1294,7 +1416,7 @@ async def auto_contains( ) result = Result(type="bool", value=response["outputs"]["success"]) return result - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1336,7 +1458,7 @@ async def auto_contains_any( ) result = Result(type="bool", value=response["outputs"]["success"]) return result - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1379,7 +1501,7 @@ async def auto_contains_all( ) result = Result(type="bool", value=response["outputs"]["success"]) return result - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1427,7 +1549,7 @@ async def auto_contains_json( input=EvaluatorInputInterface(**{"inputs": {"prediction": output}}) ) return Result(type="bool", value=response["outputs"]["success"]) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1445,7 +1567,7 @@ async def contains_json(input: EvaluatorInputInterface) -> EvaluatorOutputInterf potential_json = str(input.inputs["prediction"])[start_index:end_index] json.loads(potential_json) contains_json = True - except (ValueError, json.JSONDecodeError) as e: + except (ValueError, json.JSONDecodeError): contains_json = False return {"outputs": {"success": contains_json}} @@ -1908,7 +2030,7 @@ async def auto_levenshtein_distance( message=str(e), ), ) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -1948,7 +2070,7 @@ async def auto_similarity_match( message=str(e), ), ) - except Exception as e: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except return Result( type="error", value=None, @@ -2058,6 +2180,7 @@ async def auto_semantic_similarity( "auto_exact_match": auto_exact_match, "auto_regex_test": auto_regex_test, "field_match_test": auto_field_match_test, + "json_multi_field_match": auto_json_multi_field_match, "auto_webhook_test": auto_webhook_test, "auto_custom_code_run": auto_custom_code_run, "auto_ai_critique": auto_ai_critique, @@ -2080,6 +2203,7 @@ async def auto_semantic_similarity( "auto_exact_match": exact_match, "auto_regex_test": regex_test, "field_match_test": field_match_test, + "json_multi_field_match": json_multi_field_match, "auto_webhook_test": webhook_test, "auto_custom_code_run": sdk_custom_code_run, "auto_ai_critique": ai_critique, diff --git a/api/oss/src/services/organization_service.py b/api/oss/src/services/organization_service.py index 53d0d35852..401e5e8072 100644 --- a/api/oss/src/services/organization_service.py +++ b/api/oss/src/services/organization_service.py @@ -119,7 +119,10 @@ async def send_invitation_email( username_placeholder=user.username, action_placeholder="invited you to join", workspace_placeholder="their organization", - call_to_action=f"""Click the link below to accept the invitation:


Accept Invitation""", + call_to_action=( + "Click the link below to accept the invitation:


" + f'Accept Invitation' + ), ) if not env.sendgrid.from_address: @@ -249,8 +252,13 @@ async def resend_user_organization_invite( if existing_invitation: invitation = existing_invitation elif existing_role: - # Create a new invitation - invitation = await create_invitation("editor", project_id, payload.email) + # Create a new invitation with the previous role + invitation = await create_invitation(existing_role, project_id, payload.email) + else: + raise HTTPException( + status_code=404, + detail="No existing invitation found for the user", + ) # Get project by id project_db = await db_manager.get_project_by_id(project_id=project_id) diff --git a/api/oss/src/services/security/sandbox.py b/api/oss/src/services/security/sandbox.py deleted file mode 100644 index 1102dac308..0000000000 --- a/api/oss/src/services/security/sandbox.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import Union, Text, Dict, Any - -from RestrictedPython import safe_builtins, compile_restricted, utility_builtins -from RestrictedPython.Eval import ( - default_guarded_getiter, - default_guarded_getitem, -) -from RestrictedPython.Guards import ( - guarded_iter_unpack_sequence, - full_write_guard, -) - - -def is_import_safe(python_code: Text) -> bool: - """Checks if the imports in the python code contains a system-level import. - - Args: - python_code (str): The Python code to be executed - - Returns: - bool - module is secured or not - """ - - disallowed_imports = ["os", "subprocess", "threading", "multiprocessing"] - for import_ in disallowed_imports: - if import_ in python_code: - return False - return True - - -def execute_code_safely( - app_params: Dict[str, str], - inputs: Dict[str, str], - output: Union[str, Dict[str, Any]], - correct_answer: str, # for backward compatibility reasons - code: Text, - datapoint: Dict[str, str], -) -> Union[float, None]: - """ - Execute the provided Python code safely using RestrictedPython. - - Args: - - app_params (Dict[str, str]): The parameters of the app variant. - - inputs (dict): Inputs to be used during code execution. - - output (str): The output of the app variant after being called. - - correct_answer (str): The correct answer (or target) of the app variant. - - code (Text): The Python code to be executed. - - datapoint (Dict[str, str]): The test datapoint. - - Returns: - - (float): Result of the execution if successful. Should be between 0 and 1. - - None if execution fails or result is not a float between 0 and 1. - """ - # Define the available built-ins - local_builtins = safe_builtins.copy() - - # Add the __import__ built-in function to the local builtins - local_builtins["__import__"] = __import__ - - # Define supported packages - allowed_imports = [ - "math", - "random", - "datetime", - "json", - "httpx", - "typing", - ] - - # Create a dictionary to simulate allowed imports - allowed_modules = {} - for package_name in allowed_imports: - allowed_modules[package_name] = __import__(package_name) - - # Add the allowed modules to the local built-ins - local_builtins.update(allowed_modules) - local_builtins.update(utility_builtins) - - # Define the environment for the code execution - environment = { - "_getiter_": default_guarded_getiter, - "_getitem_": default_guarded_getitem, - "_iter_unpack_sequence_": guarded_iter_unpack_sequence, - "_write_": full_write_guard, - "__builtins__": local_builtins, - } - - # Compile the code in a restricted environment - byte_code = compile_restricted(code, filename="", mode="exec") - - # Call the evaluation function, extract the result if it exists - # and is a float between 0 and 1 - try: - # Execute the code - exec(byte_code, environment) - - # Call the evaluation function, extract the result - result = environment["evaluate"](app_params, inputs, output, correct_answer) - - # Attempt to convert result to float - if isinstance(result, (float, int, str)): - try: - result = float(result) - except ValueError as e: - raise ValueError(f"Result cannot be converted to float: {e}") - - if not isinstance(result, float): - raise TypeError(f"Result is not a float after conversion: {type(result)}") - - return result - - except KeyError as e: - raise KeyError(f"Missing expected key in environment: {e}") - - except SyntaxError as e: - raise SyntaxError(f"Syntax error in provided code: {e}") - - except Exception as e: - raise RuntimeError(f"Error during code execution: {e}") diff --git a/api/oss/src/services/user_service.py b/api/oss/src/services/user_service.py index e05662da68..d254510e72 100644 --- a/api/oss/src/services/user_service.py +++ b/api/oss/src/services/user_service.py @@ -1,5 +1,5 @@ from sqlalchemy.future import select -from sqlalchemy.exc import NoResultFound +from sqlalchemy.exc import NoResultFound, IntegrityError from supertokens_python.recipe.emailpassword.asyncio import create_reset_password_link from oss.src.utils.env import env @@ -12,32 +12,87 @@ log = get_module_logger(__name__) -async def create_new_user(payload: dict) -> UserDB: +async def check_user_exists(email: str) -> bool: """ - This function creates a new user. + Check if a user with the given email already exists. Args: - payload (dict): The payload data to create the user. + email (str): The email to check. Returns: - UserDB: The created user object. + bool: True if user exists, False otherwise. """ + user = await db_manager.get_user_with_email(email) + return user is not None - async with engine.core_session() as session: - user = UserDB(**payload) - session.add(user) +async def delete_user(user_id: str) -> None: + """ + Delete a user by their ID. - log.info( - "[scopes] user created", - user_id=user.id, - ) + Args: + user_id (str): The ID of the user to delete. + Raises: + NoResultFound: If user with the given ID is not found. + """ + async with engine.core_session() as session: + result = await session.execute(select(UserDB).filter_by(id=user_id)) + user = result.scalars().first() + + if not user: + raise NoResultFound(f"User with id {user_id} not found.") + + await session.delete(user) await session.commit() - await session.refresh(user) - return user +async def create_new_user(payload: dict) -> UserDB: + """ + Create a new user or return existing user if already exists (idempotent). + + This function is safe to call multiple times in parallel with the same email. + It implements check-before-create with error fallback to handle race conditions. + + Args: + payload (dict): The payload data to create the user (must include 'email'). + + Returns: + UserDB: The created or existing user object. + """ + + # Check if user already exists (happy path optimization) + existing_user = await db_manager.get_user_with_email(payload["email"]) + if existing_user: + return existing_user + + # Attempt to create new user + try: + async with engine.core_session() as session: + user = UserDB(**payload) + + session.add(user) + + await session.commit() + + await session.refresh(user) + + log.info( + "[scopes] user created", + user_id=user.id, + ) + + return user + + except IntegrityError: + # Race condition: another request created user between check and create + # Fetch and return the existing user + existing_user = await db_manager.get_user_with_email(payload["email"]) + if existing_user: + return existing_user + else: + # Should never happen, but re-raise if user still doesn't exist + raise async def update_user(user_uid: str, payload: UserUpdate) -> UserDB: diff --git a/api/oss/src/utils/caching.py b/api/oss/src/utils/caching.py index b87b4a0ffe..543888e683 100644 --- a/api/oss/src/utils/caching.py +++ b/api/oss/src/utils/caching.py @@ -12,6 +12,7 @@ log = get_module_logger(__name__) +AGENTA_LOCK_TTL = 15 # 5 seconds AGENTA_CACHE_TTL = 5 * 60 # 5 minutes AGENTA_CACHE_LOCAL_TTL = 60 # 60 seconds for local in-memory cache (Layer 1) @@ -315,6 +316,10 @@ async def set_cache( value: Optional[Any] = None, ttl: Optional[int] = AGENTA_CACHE_TTL, ) -> Optional[bool]: + # Noop if caching is disabled + if not env.redis.cache_enabled: + return None + try: cache_name = _pack( namespace=namespace, @@ -386,6 +391,10 @@ async def get_cache( jitter: Optional[float] = AGENTA_CACHE_JITTER_SPREAD, leakage: Optional[float] = AGENTA_CACHE_LEAKAGE_PROBABILITY, ) -> Optional[Any]: + # Noop if caching is disabled - always return cache miss + if not env.redis.cache_enabled: + return None + try: cache_name = _pack( namespace=namespace, @@ -441,6 +450,10 @@ async def invalidate_cache( project_id: Optional[str] = None, user_id: Optional[str] = None, ) -> Optional[bool]: + # Noop if caching is disabled + if not env.redis.cache_enabled: + return None + try: cache_name = None @@ -535,3 +548,131 @@ async def invalidate_cache( log.warn(e) return None + + +async def acquire_lock( + namespace: str, + key: Optional[Union[str, dict]] = None, + project_id: Optional[str] = None, + user_id: Optional[str] = None, + ttl: int = AGENTA_LOCK_TTL, +) -> Optional[str]: + """Acquire a distributed lock using Redis SET NX (atomic check-and-set). + + This prevents race conditions in distributed systems by ensuring only one + process can acquire the lock at a time. + + Args: + namespace: Lock namespace (e.g., "account-creation", "task-processing") + key: Unique identifier for the lock (e.g., email, user_id, task_id) + project_id: Optional project scope + user_id: Optional user scope + ttl: Lock expiration time in seconds (default: 10). Auto-releases after TTL. + + Returns: + Lock key string if lock was acquired, None if lock is already held by another process. + + Example: + lock_key = await acquire_lock(namespace="account-creation", key=email, ttl=10) + if not lock_key: + # Another process has the lock + return + + try: + # Do work while holding the lock + await create_account(email) + finally: + # Always release the lock + await release_lock(lock_key) + """ + try: + lock_key = _pack( + namespace=f"lock:{namespace}", + key=key, + project_id=project_id, + user_id=user_id, + ) + + # Atomic SET NX: Returns True if lock acquired, False if already held + acquired = await r.set(lock_key, "1", nx=True, ex=ttl) + + if acquired: + if CACHE_DEBUG: + log.debug( + "[lock] ACQUIRED", + key=lock_key, + ttl=ttl, + ) + return lock_key + else: + if CACHE_DEBUG: + log.debug( + "[lock] BLOCKED", + key=lock_key, + ) + return None + + except Exception as e: + log.error( + f"[lock] ACQUIRE ERROR: namespace={namespace} key={key} error={e}", + exc_info=True, + ) + return None + + +async def release_lock( + namespace: str, + key: Optional[Union[str, dict]] = None, + project_id: Optional[str] = None, + user_id: Optional[str] = None, +) -> bool: + """Release a distributed lock acquired with acquire_lock(). + + Args: + namespace: Lock namespace (same as used in acquire_lock) + key: Lock key (same as used in acquire_lock) + project_id: Optional project ID (same as used in acquire_lock) + user_id: Optional user ID (same as used in acquire_lock) + + Returns: + True if lock was released, False if already expired or on error + + Example: + lock_acquired = await acquire_lock(namespace="account-creation", key=email) + if lock_acquired: + try: + # ... critical section ... + finally: + await release_lock(namespace="account-creation", key=email) + """ + try: + lock_key = _pack( + namespace=f"lock:{namespace}", + key=key, + project_id=project_id, + user_id=user_id, + ) + + deleted = await r.delete(lock_key) + + if deleted: + if CACHE_DEBUG: + log.debug( + "[lock] RELEASED", + key=lock_key, + ) + return True + else: + if CACHE_DEBUG: + log.debug( + "[lock] ALREADY EXPIRED", + key=lock_key, + ) + return False + + except Exception as e: + log.error( + f"[lock] RELEASE ERROR: namespace={namespace} key={key} error={e}", + exc_info=True, + ) + return False diff --git a/api/oss/src/utils/env.py b/api/oss/src/utils/env.py index d7e6fba7b7..7298f5a000 100644 --- a/api/oss/src/utils/env.py +++ b/api/oss/src/utils/env.py @@ -43,27 +43,92 @@ def validate_config(self) -> None: class AuthConfig(BaseModel): """Authentication configuration - auto-detects enabled methods from env vars""" - authn_email: str | None = os.getenv("AGENTA_AUTHN_EMAIL") + supertokens_email_disabled: bool = ( + os.getenv("SUPERTOKENS_EMAIL_DISABLED") or "false" + ).lower() in _TRUTHY google_oauth_client_id: str | None = os.getenv("GOOGLE_OAUTH_CLIENT_ID") google_oauth_client_secret: str | None = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET") + google_workspaces_oauth_client_id: str | None = os.getenv( + "GOOGLE_WORKSPACES_OAUTH_CLIENT_ID" + ) + google_workspaces_oauth_client_secret: str | None = os.getenv( + "GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET" + ) + google_workspaces_hd: str | None = os.getenv("GOOGLE_WORKSPACES_HD") + github_oauth_client_id: str | None = os.getenv("GITHUB_OAUTH_CLIENT_ID") github_oauth_client_secret: str | None = os.getenv("GITHUB_OAUTH_CLIENT_SECRET") + facebook_oauth_client_id: str | None = os.getenv("FACEBOOK_OAUTH_CLIENT_ID") + facebook_oauth_client_secret: str | None = os.getenv("FACEBOOK_OAUTH_CLIENT_SECRET") + + apple_oauth_client_id: str | None = os.getenv("APPLE_OAUTH_CLIENT_ID") + apple_oauth_client_secret: str | None = os.getenv("APPLE_OAUTH_CLIENT_SECRET") + apple_key_id: str | None = os.getenv("APPLE_KEY_ID") + apple_team_id: str | None = os.getenv("APPLE_TEAM_ID") + apple_private_key: str | None = os.getenv("APPLE_PRIVATE_KEY") + + discord_oauth_client_id: str | None = os.getenv("DISCORD_OAUTH_CLIENT_ID") + discord_oauth_client_secret: str | None = os.getenv("DISCORD_OAUTH_CLIENT_SECRET") + + twitter_oauth_client_id: str | None = os.getenv("TWITTER_OAUTH_CLIENT_ID") + twitter_oauth_client_secret: str | None = os.getenv("TWITTER_OAUTH_CLIENT_SECRET") + + gitlab_oauth_client_id: str | None = os.getenv("GITLAB_OAUTH_CLIENT_ID") + gitlab_oauth_client_secret: str | None = os.getenv("GITLAB_OAUTH_CLIENT_SECRET") + gitlab_base_url: str | None = os.getenv("GITLAB_BASE_URL") + + bitbucket_oauth_client_id: str | None = os.getenv("BITBUCKET_OAUTH_CLIENT_ID") + bitbucket_oauth_client_secret: str | None = os.getenv( + "BITBUCKET_OAUTH_CLIENT_SECRET" + ) + + linkedin_oauth_client_id: str | None = os.getenv("LINKEDIN_OAUTH_CLIENT_ID") + linkedin_oauth_client_secret: str | None = os.getenv("LINKEDIN_OAUTH_CLIENT_SECRET") + + okta_oauth_client_id: str | None = os.getenv("OKTA_OAUTH_CLIENT_ID") + okta_oauth_client_secret: str | None = os.getenv("OKTA_OAUTH_CLIENT_SECRET") + okta_domain: str | None = os.getenv("OKTA_DOMAIN") + + azure_ad_oauth_client_id: str | None = os.getenv( + "AZURE_AD_OAUTH_CLIENT_ID" + ) or os.getenv("ACTIVE_DIRECTORY_OAUTH_CLIENT_ID") + azure_ad_oauth_client_secret: str | None = os.getenv( + "AZURE_AD_OAUTH_CLIENT_SECRET" + ) or os.getenv("ACTIVE_DIRECTORY_OAUTH_CLIENT_SECRET") + azure_ad_directory_id: str | None = os.getenv("AZURE_AD_DIRECTORY_ID") or os.getenv( + "ACTIVE_DIRECTORY_DIRECTORY_ID" + ) + + boxy_saml_oauth_client_id: str | None = os.getenv("BOXY_SAML_OAUTH_CLIENT_ID") + boxy_saml_oauth_client_secret: str | None = os.getenv( + "BOXY_SAML_OAUTH_CLIENT_SECRET" + ) + boxy_saml_url: str | None = os.getenv("BOXY_SAML_URL") + model_config = ConfigDict(extra="ignore") def model_post_init(self, _): - """Ensure at least one auth method is enabled; fallback to password email.""" - if not self.authn_email and not self.oidc_enabled: - self.authn_email = "password" + """Keep config normalized without relying on deprecated AGENTA_AUTHN_EMAIL.""" + return @property def email_method(self) -> str: """Returns email auth method: 'password', 'otp', or '' (disabled)""" - if self.authn_email in ("password", "otp"): - return self.authn_email - return "" + if self.supertokens_email_disabled: + return "" + + sendgrid_enabled = bool( + os.getenv("SENDGRID_API_KEY") + and ( + os.getenv("SENDGRID_FROM_ADDRESS") + or os.getenv("AGENTA_AUTHN_EMAIL_FROM") + or os.getenv("AGENTA_SEND_EMAIL_FROM_ADDRESS") + ) + ) + return "otp" if sendgrid_enabled else "password" @property def email_enabled(self) -> bool: @@ -75,15 +140,107 @@ def google_enabled(self) -> bool: """Google OAuth enabled if both credentials present""" return bool(self.google_oauth_client_id and self.google_oauth_client_secret) + @property + def google_workspaces_enabled(self) -> bool: + """Google Workspaces OAuth enabled if both credentials present""" + return bool( + self.google_workspaces_oauth_client_id + and self.google_workspaces_oauth_client_secret + ) + @property def github_enabled(self) -> bool: """GitHub OAuth enabled if both credentials present""" return bool(self.github_oauth_client_id and self.github_oauth_client_secret) + @property + def facebook_enabled(self) -> bool: + """Facebook OAuth enabled if both credentials present""" + return bool(self.facebook_oauth_client_id and self.facebook_oauth_client_secret) + + @property + def apple_enabled(self) -> bool: + """Apple OAuth enabled if client ID present and secret or key data provided""" + return bool( + self.apple_oauth_client_id + and ( + self.apple_oauth_client_secret + or (self.apple_key_id and self.apple_team_id and self.apple_private_key) + ) + ) + + @property + def discord_enabled(self) -> bool: + """Discord OAuth enabled if both credentials present""" + return bool(self.discord_oauth_client_id and self.discord_oauth_client_secret) + + @property + def twitter_enabled(self) -> bool: + """Twitter OAuth enabled if both credentials present""" + return bool(self.twitter_oauth_client_id and self.twitter_oauth_client_secret) + + @property + def gitlab_enabled(self) -> bool: + """GitLab OAuth enabled if both credentials present""" + return bool(self.gitlab_oauth_client_id and self.gitlab_oauth_client_secret) + + @property + def bitbucket_enabled(self) -> bool: + """Bitbucket OAuth enabled if both credentials present""" + return bool( + self.bitbucket_oauth_client_id and self.bitbucket_oauth_client_secret + ) + + @property + def linkedin_enabled(self) -> bool: + """LinkedIn OAuth enabled if both credentials present""" + return bool(self.linkedin_oauth_client_id and self.linkedin_oauth_client_secret) + + @property + def okta_enabled(self) -> bool: + """Okta OAuth enabled if credentials and domain are present""" + return bool( + self.okta_oauth_client_id + and self.okta_oauth_client_secret + and self.okta_domain + ) + + @property + def azure_ad_enabled(self) -> bool: + """Azure AD OAuth enabled if credentials and directory ID are present""" + return bool( + self.azure_ad_oauth_client_id + and self.azure_ad_oauth_client_secret + and self.azure_ad_directory_id + ) + + @property + def boxy_saml_enabled(self) -> bool: + """BoxySAML OAuth enabled if credentials and Boxy URL are present""" + return bool( + self.boxy_saml_oauth_client_id + and self.boxy_saml_oauth_client_secret + and self.boxy_saml_url + ) + @property def oidc_enabled(self) -> bool: """Any OIDC provider enabled""" - return self.google_enabled or self.github_enabled + return ( + self.google_enabled + or self.google_workspaces_enabled + or self.github_enabled + or self.facebook_enabled + or self.apple_enabled + or self.discord_enabled + or self.twitter_enabled + or self.gitlab_enabled + or self.bitbucket_enabled + or self.linkedin_enabled + or self.okta_enabled + or self.azure_ad_enabled + or self.boxy_saml_enabled + ) @property def any_enabled(self) -> bool: @@ -96,17 +253,24 @@ def validate_config(self) -> None: if not self.any_enabled: raise ValueError( "At least one authentication method must be configured:\n" - " - AGENTA_AUTHN_EMAIL=password or AGENTA_AUTHN_EMAIL=otp\n" - " - GOOGLE_OAUTH_CLIENT_ID + GOOGLE_OAUTH_CLIENT_SECRET\n" - " - GITHUB_OAUTH_CLIENT_ID + GITHUB_OAUTH_CLIENT_SECRET\n" + " - SUPERTOKENS_EMAIL_DISABLED must be false (or unset) for email auth\n" + " - Any supported OAuth provider credentials, e.g.\n" + " GOOGLE_OAUTH_CLIENT_ID + GOOGLE_OAUTH_CLIENT_SECRET\n" + " GITHUB_OAUTH_CLIENT_ID + GITHUB_OAUTH_CLIENT_SECRET\n" + " FACEBOOK_OAUTH_CLIENT_ID + FACEBOOK_OAUTH_CLIENT_SECRET\n" + " APPLE_OAUTH_CLIENT_ID + APPLE_OAUTH_CLIENT_SECRET (or APPLE_KEY_ID/APPLE_TEAM_ID/APPLE_PRIVATE_KEY)\n" + " DISCORD_OAUTH_CLIENT_ID + DISCORD_OAUTH_CLIENT_SECRET\n" + " TWITTER_OAUTH_CLIENT_ID + TWITTER_OAUTH_CLIENT_SECRET\n" + " GITLAB_OAUTH_CLIENT_ID + GITLAB_OAUTH_CLIENT_SECRET\n" + " BITBUCKET_OAUTH_CLIENT_ID + BITBUCKET_OAUTH_CLIENT_SECRET\n" + " LINKEDIN_OAUTH_CLIENT_ID + LINKEDIN_OAUTH_CLIENT_SECRET\n" + " OKTA_OAUTH_CLIENT_ID + OKTA_OAUTH_CLIENT_SECRET + OKTA_DOMAIN\n" + " AZURE_AD_OAUTH_CLIENT_ID + AZURE_AD_OAUTH_CLIENT_SECRET + AZURE_AD_DIRECTORY_ID\n" + " BOXY_SAML_OAUTH_CLIENT_ID + BOXY_SAML_OAUTH_CLIENT_SECRET + BOXY_SAML_URL\n" + " GOOGLE_WORKSPACES_OAUTH_CLIENT_ID + GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET\n" ) - # Email auth value must be valid - if self.authn_email and self.authn_email not in ("password", "otp"): - raise ValueError( - f"Invalid AGENTA_AUTHN_EMAIL value: '{self.authn_email}'. " - "Must be 'password', 'otp', or empty (disabled)." - ) + return class PostHogConfig(BaseModel): @@ -117,7 +281,10 @@ class PostHogConfig(BaseModel): or os.getenv("POSTHOG_HOST") or "https://alef.agenta.ai" ) - api_key: str | None = os.getenv("POSTHOG_API_KEY") + api_key: str | None = ( + os.getenv("POSTHOG_API_KEY") + or "phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7" + ) model_config = ConfigDict(extra="ignore") @@ -165,6 +332,8 @@ class SendgridConfig(BaseModel): from_address: str | None = ( os.getenv("SENDGRID_FROM_ADDRESS") # + or os.getenv("AGENTA_AUTHN_EMAIL_FROM") + # or os.getenv("AGENTA_SEND_EMAIL_FROM_ADDRESS") ) @@ -172,8 +341,8 @@ class SendgridConfig(BaseModel): @property def enabled(self) -> bool: - """SendGrid enabled if API key present""" - return bool(self.api_key) + """SendGrid enabled only if API key and from address are present""" + return bool(self.api_key and self.from_address) class CrispConfig(BaseModel): @@ -315,6 +484,12 @@ class RedisConfig(BaseModel): or "redis://redis-durable:6381/0" ) + # Cache control flag - defaults to true + cache_enabled: bool = os.getenv("AGENTA_CACHE_ENABLED", "true").lower() in ( + "true", + "1", + ) + model_config = ConfigDict(extra="ignore") @property @@ -328,17 +503,19 @@ class AgentaConfig(BaseModel): license: str = _LICENSE - api_url: str = os.getenv("AGENTA_API_URL") or "http://localhost/api" web_url: str = os.getenv("AGENTA_WEB_URL") or "http://localhost" services_url: str = os.getenv("AGENTA_SERVICES_URL") or "http://localhost/services" + api_url: str = os.getenv("AGENTA_API_URL") or "http://localhost/api" - auth_key: str = os.getenv("AGENTA_AUTH_KEY") or "" - crypt_key: str = os.getenv("AGENTA_CRYPT_KEY") or "" + auth_key: str = os.getenv("AGENTA_AUTH_KEY") or "replace-me" + crypt_key: str = os.getenv("AGENTA_CRYPT_KEY") or "replace-me" runtime_prefix: str = os.getenv("AGENTA_RUNTIME_PREFIX") or "" auto_migrations: bool = ( - os.getenv("AGENTA_AUTO_MIGRATIONS") or "true" + os.getenv("ALEMBIC_AUTO_MIGRATIONS") + or os.getenv("AGENTA_AUTO_MIGRATIONS") + or "true" ).lower() in _TRUTHY demos: str = os.getenv("AGENTA_DEMOS") or "" @@ -375,22 +552,9 @@ class PostgresConfig(BaseModel): f"postgresql://username:password@postgres:5432/agenta_{_LICENSE}_supertokens" ) - username: str = ( - os.getenv("POSTGRES_USERNAME") - # - or os.getenv("POSTGRES_USER") - or "username" - ) + username: str = os.getenv("POSTGRES_USER") or "username" password: str = os.getenv("POSTGRES_PASSWORD") or "password" - username_admin: str = ( - os.getenv("POSTGRES_USERNAME_ADMIN") - # - or os.getenv("POSTGRES_USER_ADMIN") - or "username" - ) - password_admin: str = os.getenv("POSTGRES_PASSWORD_ADMIN") or "password" - model_config = ConfigDict(extra="ignore") diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py b/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py index 313ba8bd6a..ff2bb51216 100644 --- a/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py +++ b/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py @@ -65,11 +65,11 @@ async def get_second_user_object(): async def get_or_create_project_from_db(): async with engine.core_session() as session: result = await session.execute( - select(ProjectDB).filter_by(project_name="Default Project", is_default=True) + select(ProjectDB).filter_by(project_name="Default", is_default=True) ) project = result.scalars().first() if project is None: - create_project = ProjectDB(project_name="Default Project", is_default=True) + create_project = ProjectDB(project_name="Default", is_default=True) session.add(create_project) await session.commit() await session.refresh(create_project) diff --git a/api/oss/tests/manual/annotations/crud.http b/api/oss/tests/manual/annotations/crud.http index daf6295322..20fa119c6f 100644 --- a/api/oss/tests/manual/annotations/crud.http +++ b/api/oss/tests/manual/annotations/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/annotations ### diff --git a/api/oss/tests/manual/auth/admin.http b/api/oss/tests/manual/auth/admin.http index e31140d3b5..bac6786f20 100644 --- a/api/oss/tests/manual/auth/admin.http +++ b/api/oss/tests/manual/auth/admin.http @@ -1,6 +1,6 @@ @host = http://localhost @base_url = {{host}}/api -@access_token = change-me +@access_token = replace-me ### diff --git a/api/oss/tests/manual/evaluations/crud.http b/api/oss/tests/manual/evaluations/crud.http index aa0ef43d36..a2687e9fd2 100644 --- a/api/oss/tests/manual/evaluations/crud.http +++ b/api/oss/tests/manual/evaluations/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/evaluations ### diff --git a/api/oss/tests/manual/evaluations/live.http b/api/oss/tests/manual/evaluations/live.http index 6a43280046..7d28c9d76a 100644 --- a/api/oss/tests/manual/evaluations/live.http +++ b/api/oss/tests/manual/evaluations/live.http @@ -1,4 +1,4 @@ -@auth_key = {{$dotenv.AGENTA_AUTH_KEY}} || change-me +@auth_key = {{$dotenv.AGENTA_AUTH_KEY}} || replace-me @api_url = {{$dotenv AGENTA_API_URL}} @api_key = {{$dotenv AGENTA_API_KEY}} diff --git a/api/oss/tests/manual/evaluators/crud.http b/api/oss/tests/manual/evaluators/crud.http index 21b60c97b5..9efda040c4 100644 --- a/api/oss/tests/manual/evaluators/crud.http +++ b/api/oss/tests/manual/evaluators/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/evaluators ### diff --git a/api/oss/tests/manual/folders/crud.http b/api/oss/tests/manual/folders/crud.http index e443744f66..4c3d6486d3 100644 --- a/api/oss/tests/manual/folders/crud.http +++ b/api/oss/tests/manual/folders/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me-auth +@token = replace-me @api_url = {{host}}/api @folders_url = {{api_url}}/folders @apps_url = {{api_url}}/apps diff --git a/api/oss/tests/manual/testsets/crud.http b/api/oss/tests/manual/testsets/crud.http index 72f3149fcc..1b2d197c91 100644 --- a/api/oss/tests/manual/testsets/crud.http +++ b/api/oss/tests/manual/testsets/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/testsets ### diff --git a/api/oss/tests/manual/testsets/testcase-inclusion.http b/api/oss/tests/manual/testsets/testcase-inclusion.http index a8db7ac89f..9cba2e66c4 100644 --- a/api/oss/tests/manual/testsets/testcase-inclusion.http +++ b/api/oss/tests/manual/testsets/testcase-inclusion.http @@ -26,7 +26,7 @@ # ============================================================================ @host = http://localhost -@token = change-me-auth +@token = replace-me @base_url = {{host}}/api/preview/testsets @simple_base_url = {{host}}/api/preview/simple/testsets diff --git a/api/oss/tests/manual/tracing/crud.http b/api/oss/tests/manual/tracing/crud.http index e5580efa47..8d532b9541 100644 --- a/api/oss/tests/manual/tracing/crud.http +++ b/api/oss/tests/manual/tracing/crud.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/00_user_id.http b/api/oss/tests/manual/tracing/filtering/00_user_id.http index 6a39dbd828..c3dc743a6c 100644 --- a/api/oss/tests/manual/tracing/filtering/00_user_id.http +++ b/api/oss/tests/manual/tracing/filtering/00_user_id.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/01_trace_id.http b/api/oss/tests/manual/tracing/filtering/01_trace_id.http index 3f86d6575c..906cd2d59f 100644 --- a/api/oss/tests/manual/tracing/filtering/01_trace_id.http +++ b/api/oss/tests/manual/tracing/filtering/01_trace_id.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/02_span_id.http b/api/oss/tests/manual/tracing/filtering/02_span_id.http index 33973dba57..26ce14bc5c 100644 --- a/api/oss/tests/manual/tracing/filtering/02_span_id.http +++ b/api/oss/tests/manual/tracing/filtering/02_span_id.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/03_parent_id.http b/api/oss/tests/manual/tracing/filtering/03_parent_id.http index 5fa8782df0..29d8907115 100644 --- a/api/oss/tests/manual/tracing/filtering/03_parent_id.http +++ b/api/oss/tests/manual/tracing/filtering/03_parent_id.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/04_span_kind.http b/api/oss/tests/manual/tracing/filtering/04_span_kind.http index c4a757d7b5..6e406201d7 100644 --- a/api/oss/tests/manual/tracing/filtering/04_span_kind.http +++ b/api/oss/tests/manual/tracing/filtering/04_span_kind.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/05_span_name.http b/api/oss/tests/manual/tracing/filtering/05_span_name.http index 894c42e445..27c71516a3 100644 --- a/api/oss/tests/manual/tracing/filtering/05_span_name.http +++ b/api/oss/tests/manual/tracing/filtering/05_span_name.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/06_start_time.http b/api/oss/tests/manual/tracing/filtering/06_start_time.http index 1f20b6bc36..a399834fd4 100644 --- a/api/oss/tests/manual/tracing/filtering/06_start_time.http +++ b/api/oss/tests/manual/tracing/filtering/06_start_time.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/07_end_time.http b/api/oss/tests/manual/tracing/filtering/07_end_time.http index f59f4baeb1..c061382cd9 100644 --- a/api/oss/tests/manual/tracing/filtering/07_end_time.http +++ b/api/oss/tests/manual/tracing/filtering/07_end_time.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/08_status_code.http b/api/oss/tests/manual/tracing/filtering/08_status_code.http index 7a9860c7d7..2133400e0a 100644 --- a/api/oss/tests/manual/tracing/filtering/08_status_code.http +++ b/api/oss/tests/manual/tracing/filtering/08_status_code.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/09_status_message.http b/api/oss/tests/manual/tracing/filtering/09_status_message.http index 6f8d4f59c4..d603b37536 100644 --- a/api/oss/tests/manual/tracing/filtering/09_status_message.http +++ b/api/oss/tests/manual/tracing/filtering/09_status_message.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/10_attributes.http b/api/oss/tests/manual/tracing/filtering/10_attributes.http index fe4006a974..5f416b4e85 100644 --- a/api/oss/tests/manual/tracing/filtering/10_attributes.http +++ b/api/oss/tests/manual/tracing/filtering/10_attributes.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/11_links.http b/api/oss/tests/manual/tracing/filtering/11_links.http index 68fd7bec53..46ba66f4f1 100644 --- a/api/oss/tests/manual/tracing/filtering/11_links.http +++ b/api/oss/tests/manual/tracing/filtering/11_links.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/tracing/filtering/12_references.http b/api/oss/tests/manual/tracing/filtering/12_references.http index 271a374f44..37cb9451e8 100644 --- a/api/oss/tests/manual/tracing/filtering/12_references.http +++ b/api/oss/tests/manual/tracing/filtering/12_references.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/tracing ### diff --git a/api/oss/tests/manual/workflows/artifacts.http b/api/oss/tests/manual/workflows/artifacts.http index d5e3f24693..bafd14e017 100644 --- a/api/oss/tests/manual/workflows/artifacts.http +++ b/api/oss/tests/manual/workflows/artifacts.http @@ -1,5 +1,5 @@ @host = http://localhost -@token = change-me +@token = replace-me @base_url = {{host}}/api/preview/workflows ### diff --git a/api/poetry.lock b/api/poetry.lock index ce0474f070..ff8191c37a 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2,36 +2,33 @@ [[package]] name = "agenta" -version = "0.72.1" +version = "0.74.0" description = "The SDK for agenta is an open-source LLMOps platform." optional = false python-versions = "<4.0,>=3.11" groups = ["main"] files = [ - {file = "agenta-0.72.1-py3-none-any.whl", hash = "sha256:d1b1dd36cc18e25f7b5ab20bb19c24bce0ca3eaeca32edabf539adf16f6c4c57"}, - {file = "agenta-0.72.1.tar.gz", hash = "sha256:c1e32fff97131f2466b7861f4a3ca1dcceea752a01ead9b063cbb0ba8bded4f8"}, + {file = "agenta-0.74.0-py3-none-any.whl", hash = "sha256:a91385e9ab856f7bbaa36f4787a4b29ee0d50f957300b8512b5482095b6eff4f"}, + {file = "agenta-0.74.0.tar.gz", hash = "sha256:3c109d26cb590b96ca92ea0d8a0406974620e4eb1defa64a90b3a2ea2d247b06"}, ] [package.dependencies] -daytona = ">=0.121.0,<0.122.0" -fastapi = ">=0.125" +daytona = ">=0.128,<0.129" +fastapi = ">=0.127" httpx = ">=0.28,<0.29" -importlib-metadata = ">=8,<9" jinja2 = ">=3,<4" litellm = ">=1,<2" openai = ">=2,<3" opentelemetry-api = ">=1,<2" opentelemetry-exporter-otlp-proto-http = ">=1,<2" -opentelemetry-instrumentation = ">=0.59b0,<0.60" +opentelemetry-instrumentation = ">=0.60b1,<0.61" opentelemetry-sdk = ">=1,<2" orjson = ">=3,<4" pydantic = ">=2,<3" -python-dotenv = ">=1,<2" python-jsonpath = ">=2,<3" pyyaml = ">=6,<7" restrictedpython = {version = ">=8,<9", markers = "python_version >= \"3.11\" and python_version < \"3.14\""} structlog = ">=25,<26" -toml = ">=0.10,<0.11" [[package]] name = "aiofiles" @@ -59,132 +56,132 @@ files = [ [[package]] name = "aiohttp" -version = "3.13.2" +version = "3.13.3" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"}, - {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"}, - {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"}, - {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"}, - {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"}, - {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"}, - {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"}, - {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"}, - {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"}, - {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"}, - {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"}, - {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"}, - {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"}, - {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"}, - {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"}, - {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"}, - {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"}, - {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11"}, + {file = "aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd"}, + {file = "aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29"}, + {file = "aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239"}, + {file = "aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a"}, + {file = "aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046"}, + {file = "aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591"}, + {file = "aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf"}, + {file = "aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43"}, + {file = "aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1"}, + {file = "aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa"}, + {file = "aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767"}, + {file = "aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f"}, + {file = "aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1"}, + {file = "aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538"}, + {file = "aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88"}, ] [package.dependencies] @@ -197,7 +194,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi"] +speedups = ["Brotli (>=1.2)", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi (>=1.2)"] [[package]] name = "aiohttp-retry" @@ -292,14 +289,14 @@ files = [ [[package]] name = "anyio" -version = "4.12.0" +version = "4.12.1" description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb"}, - {file = "anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0"}, + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, ] [package.dependencies] @@ -460,14 +457,14 @@ files = [ [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"}, - {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"}, + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, ] [[package]] @@ -798,41 +795,41 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "daytona" -version = "0.121.0" +version = "0.128.1" description = "Python SDK for Daytona" optional = false python-versions = "<4.0,>=3.9" groups = ["main"] files = [ - {file = "daytona-0.121.0-py3-none-any.whl", hash = "sha256:33b92a33729d8866606571cb6924c05d7d471dd281ca1440a5c2342e3f65a737"}, - {file = "daytona-0.121.0.tar.gz", hash = "sha256:7ef327645ea64c99319217a3491ea26878da2e3b9891f53e15878eaaf25f20ec"}, + {file = "daytona-0.128.1-py3-none-any.whl", hash = "sha256:d00536a899d5bab299837d0ad7d59a8c9cb1e76d5618f01fd0106fc59fbeac69"}, + {file = "daytona-0.128.1.tar.gz", hash = "sha256:4d9ab0cc2504b48223dbd2f4429b304b484292cdff3d942af8080bb9f359d6b1"}, ] [package.dependencies] aiofiles = ">=24.1.0,<24.2.0" -daytona-api-client = "0.121.0" -daytona-api-client-async = "0.121.0" -daytona-toolbox-api-client = "0.121.0" -daytona-toolbox-api-client-async = "0.121.0" +daytona-api-client = "0.128.1" +daytona-api-client-async = "0.128.1" +daytona-toolbox-api-client = "0.128.1" +daytona-toolbox-api-client-async = "0.128.1" Deprecated = ">=1.2.18,<2.0.0" environs = ">=10.0.0,<15.0.0" httpx = ">=0.28.0,<0.29.0" multipart = ">=1.0.0,<2.0.0" -obstore = ">=0.7.0,<0.8.0" +obstore = ">=0.8.0,<0.9.0" pydantic = ">=2.4.2,<3.0.0" toml = ">=0.10.0,<0.11.0" websockets = ">=15.0.0,<16.0.0" [[package]] name = "daytona-api-client" -version = "0.121.0" +version = "0.128.1" description = "Daytona" optional = false python-versions = "<4.0,>=3.8" groups = ["main"] files = [ - {file = "daytona_api_client-0.121.0-py3-none-any.whl", hash = "sha256:01fa21e8366436a55ef41c1830a14e3587dbaaf707100403207df29930859693"}, - {file = "daytona_api_client-0.121.0.tar.gz", hash = "sha256:9af863df12ad561b4609363fde11d96bd1e3e867ec1d7e1858cb78b9783d1df8"}, + {file = "daytona_api_client-0.128.1-py3-none-any.whl", hash = "sha256:eede1d93ec8995d7280e077127018508b94a488c6288363f50794128af56d339"}, + {file = "daytona_api_client-0.128.1.tar.gz", hash = "sha256:e9db105bf5ea7ad4b55431e3bb7db1e3a8937557ffbca7dba6167bc5a6a63c96"}, ] [package.dependencies] @@ -843,14 +840,14 @@ urllib3 = ">=2.1.0,<3.0.0" [[package]] name = "daytona-api-client-async" -version = "0.121.0" +version = "0.128.1" description = "Daytona" optional = false python-versions = "<4.0,>=3.8" groups = ["main"] files = [ - {file = "daytona_api_client_async-0.121.0-py3-none-any.whl", hash = "sha256:de7c90046c2edb38f9010cd3379d703f6473ef706ea9c22e6300c110fb50f391"}, - {file = "daytona_api_client_async-0.121.0.tar.gz", hash = "sha256:81f93bd4b26d1c4cb76844a516ce2eeb2610342142360ffd7b658c7115fba4cf"}, + {file = "daytona_api_client_async-0.128.1-py3-none-any.whl", hash = "sha256:c0fb378e0df95fe0ae125d9f60cebfd4badc08e089d18584bfa6158a0002893b"}, + {file = "daytona_api_client_async-0.128.1.tar.gz", hash = "sha256:2fb7507cb4122ae2011aa1f52a38556c1ce9c137173648aa96ca227ef072eadd"}, ] [package.dependencies] @@ -863,14 +860,14 @@ urllib3 = ">=2.1.0,<3.0.0" [[package]] name = "daytona-toolbox-api-client" -version = "0.121.0" +version = "0.128.1" description = "Daytona Daemon API" optional = false python-versions = "<4.0,>=3.8" groups = ["main"] files = [ - {file = "daytona_toolbox_api_client-0.121.0-py3-none-any.whl", hash = "sha256:9d121959b446b85e5cdf00eaa15a2bdc2af8d61cae121bef50b940ebc73f5cad"}, - {file = "daytona_toolbox_api_client-0.121.0.tar.gz", hash = "sha256:b248652d2279562bc199943d240f6feb18c9f116040e778376961d5d9fb5e043"}, + {file = "daytona_toolbox_api_client-0.128.1-py3-none-any.whl", hash = "sha256:dd22da7e7fc823802c657a35996f1f4824deeda1b248192504c67e6936a1a120"}, + {file = "daytona_toolbox_api_client-0.128.1.tar.gz", hash = "sha256:869ee431f485ed535868a93154e29c10e46fb2c36a0a7af79020385830e23c8f"}, ] [package.dependencies] @@ -881,14 +878,14 @@ urllib3 = ">=2.1.0,<3.0.0" [[package]] name = "daytona-toolbox-api-client-async" -version = "0.121.0" +version = "0.128.1" description = "Daytona Daemon API" optional = false python-versions = "<4.0,>=3.8" groups = ["main"] files = [ - {file = "daytona_toolbox_api_client_async-0.121.0-py3-none-any.whl", hash = "sha256:b7e5a09ce06f4e1a918ad852e3b191bb9d6fdfb142d9c32d39d0a253b4c096fe"}, - {file = "daytona_toolbox_api_client_async-0.121.0.tar.gz", hash = "sha256:9829d79be0c78689f6164e5386d15f2bc11c357330d6223df80448b51e987696"}, + {file = "daytona_toolbox_api_client_async-0.128.1-py3-none-any.whl", hash = "sha256:2ae4afb86f2d2568bd1df031764df0d41837e8ccab2200f2ed70fd61d18231d3"}, + {file = "daytona_toolbox_api_client_async-0.128.1.tar.gz", hash = "sha256:d9ef0ec4d17fcc611e5c8d17ae300afb825b32bf8346fa6a2a8576d760ef0304"}, ] [package.dependencies] @@ -929,6 +926,43 @@ files = [ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] +[[package]] +name = "dnspython" +version = "2.8.0" +description = "DNS toolkit" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af"}, + {file = "dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f"}, +] + +[package.extras] +dev = ["black (>=25.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.17.0)", "mypy (>=1.17)", "pylint (>=3)", "pytest (>=8.4)", "pytest-cov (>=6.2.0)", "quart-trio (>=0.12.0)", "sphinx (>=8.2.0)", "sphinx-rtd-theme (>=3.0.0)", "twine (>=6.1.0)", "wheel (>=0.45.0)"] +dnssec = ["cryptography (>=45)"] +doh = ["h2 (>=4.2.0)", "httpcore (>=1.0.0)", "httpx (>=0.28.0)"] +doq = ["aioquic (>=1.2.0)"] +idna = ["idna (>=3.10)"] +trio = ["trio (>=0.30)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "email-validator" +version = "2.3.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "email_validator-2.3.0-py3-none-any.whl", hash = "sha256:80f13f623413e6b197ae73bb10bf4eb0908faf509ad8362c5edeb0be7fd450b4"}, + {file = "email_validator-2.3.0.tar.gz", hash = "sha256:9fc05c37f2f6cf439ff414f8fc46d917929974a82244c20eb10231ba60c54426"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + [[package]] name = "environs" version = "14.5.0" @@ -982,14 +1016,14 @@ python-dateutil = ">=2.4" [[package]] name = "fastapi" -version = "0.127.0" +version = "0.128.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"}, - {file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"}, + {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"}, + {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"}, ] [package.dependencies] @@ -1093,14 +1127,14 @@ files = [ [[package]] name = "filelock" -version = "3.20.1" +version = "3.20.2" description = "A platform independent file lock." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a"}, - {file = "filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c"}, + {file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"}, + {file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"}, ] [[package]] @@ -1642,14 +1676,14 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "1.2.3" +version = "1.2.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.9.0" groups = ["main"] files = [ - {file = "huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642"}, - {file = "huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7"}, + {file = "huggingface_hub-1.2.4-py3-none-any.whl", hash = "sha256:2db69b91877d9d34825f5cd2a63b94f259011a77dcf761b437bf510fbe9522e9"}, + {file = "huggingface_hub-1.2.4.tar.gz", hash = "sha256:7a1d9ec4802e64372d1d152d69fb8e26d943f15a2289096fbc8e09e7b90c21a5"}, ] [package.dependencies] @@ -1662,13 +1696,13 @@ pyyaml = ">=5.1" shellingham = "*" tqdm = ">=4.42.1" typer-slim = "*" -typing-extensions = ">=3.7.4.3" +typing-extensions = ">=4.1.0" [package.extras] all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-xet = ["hf-xet (>=1.1.3,<2.0.0)"] +hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"] mcp = ["mcp (>=1.8.0)"] oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"] @@ -2068,19 +2102,19 @@ files = [ [[package]] name = "marshmallow" -version = "4.1.2" +version = "4.2.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "marshmallow-4.1.2-py3-none-any.whl", hash = "sha256:a8cfa18bd8d0e5f7339e734edf84815fe8db1bdb57358c7ccc05472b746eeadc"}, - {file = "marshmallow-4.1.2.tar.gz", hash = "sha256:083f250643d2e75fd363f256aeb6b1af369a7513ad37647ce4a601f6966e3ba5"}, + {file = "marshmallow-4.2.0-py3-none-any.whl", hash = "sha256:1dc369bd13a8708a9566d6f73d1db07d50142a7580f04fd81e1c29a4d2e10af4"}, + {file = "marshmallow-4.2.0.tar.gz", hash = "sha256:908acabd5aa14741419d3678d3296bda6abe28a167b7dcd05969ceb8256943ac"}, ] [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] -docs = ["autodocsumm (==0.2.14)", "furo (==2025.9.25)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"] +docs = ["autodocsumm (==0.2.14)", "furo (==2025.12.19)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"] tests = ["pytest", "simplejson"] [[package]] @@ -2299,89 +2333,102 @@ infinite-tracing = ["grpcio", "protobuf"] [[package]] name = "obstore" -version = "0.7.3" +version = "0.8.2" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "obstore-0.7.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:8c89b6205672490fb99e16159bb290a12d4d8e6f9b27904720faafd4fd8ae436"}, - {file = "obstore-0.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26357df7b3824f431ced44e26fe334f686410cb5e8c218569759d6aa32ab7242"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca3380121cc5ce6d040698fcf126c1acab4a00282db5a6bc8e5026bba22fc43d"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1eca930fa0229f7fd5d881bc03deffca51e96ad754cbf256e4aa27ac7c50db6"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b91fec58a65350303b643ce1da7a890fb2cc411c2a9d86672ad30febb196df"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4eba1c87af7002d95cce8c2c67fac814056938f16500880e1fb908a0e8c7a7f5"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5e8ad65c5b481f168080db1c5290cf55ad7ab77b45fd467c4d25367db2a3ae"}, - {file = "obstore-0.7.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:b680dd856d238a892a14ef3115daee33e267502229cee248266a20e03dbe98d0"}, - {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c3dccb74ebfec1f5517c2160503f30629b62685c78bbe15ad03492969fadd858"}, - {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:cd614e53a00d22b2facfd1fb9b516fa210cd788ecce513dd532a8e65fa07d55d"}, - {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:32841a2b4bef838412302e9a8612fc3ba1c51bd808b77b4854efe6b1f7a65f0d"}, - {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a58f3952b43fb5f7b0f0f9f08272983e4dd50f83b16a05943f89581b0e6bff20"}, - {file = "obstore-0.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:8745e2437e79e073c3cf839454f803909540fa4f6cd9180c9ab4ce742c716c8b"}, - {file = "obstore-0.7.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65ffe43fd63c9968172bed649fcaf6345b41a124be5d34f46adb94604e9ccef8"}, - {file = "obstore-0.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2947609a1fab1f9b808235a8088e7e99814fbaf3b6000833d760fd90f68fa7cd"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15409f75acc4e10f924fe118f7018607d6d96a72330ac4cc1663d36b7c6847b1"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5224d834bbe7a9f2592b130e4ddd86340fa172e5a3a51284e706f6515d95c036"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b1af6c1a33d98db9954f7ceab8eb5e543aea683a79a0ffd72b6c8d176834a9b"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:708c27c4e5e85799fe7a2d2ae443fbd96c2ad36b561c815a9b01b5333ab536ad"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7da327920bef8bbd02445f33947487fe4e94fcb9e084c810108e88be57d0877b"}, - {file = "obstore-0.7.3-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:8f3b23a40ad374fe7a65fab4678a9978978ec83a597156a2a9d1dbeab433a469"}, - {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9b3e7d0c7e85e4f67e479f7efab5dea26ceaace10897d639d38f77831ef0cdaf"}, - {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:dfee24c5e9d5b7e0f43e4bbf8cc15069e5c60bfdb86873ce97c0eb487afa5da8"}, - {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:99e187cee4a6e13605886b906b34fec7ae9902dd25b1e9aafae863a9d55c6e47"}, - {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5de3b0859512b9ddbf57ac34db96ad41fb85fc9597e422916044d1bf550427d"}, - {file = "obstore-0.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:35fdd1cd8856984de1b5a11fced83f6fd6623eb459736e57b9975400ff5baf5a"}, - {file = "obstore-0.7.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6cbe5dde68bf6ab5a88f3bb467ca8f123bcce3efc03e22fd8339688559d36199"}, - {file = "obstore-0.7.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6db23cbcb3aec10e09a31fd0883950cb9b7f77f4fcf1fb0e8a276e1d1961bf3"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00fde287770bdbdbb06379670d30c257b20e77a4a11b36f1e232b5bc6ef07b7a"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c420036356269666197f0704392c9495f255bb3ff9b667c69fb49bc65bd50dcd"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28482626ca9481569ad16ba0c0c36947ce96e8147c64011dc0af6d58be8ff9c"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cead20055221337ddf218098afe8138f8624395b0cf2a730da72a4523c11b2f"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c71017142a593022848f4af0ac1e39af1a56927981cc2c89542888edb206eb33"}, - {file = "obstore-0.7.3-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:8aebc2bf796a0d1525318a9ac69608a96d03abc621ca1e6d810e08a70bd695c1"}, - {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c0ebf03969b81ee559c377c5ebca9dcdffbef0e6650d43659676aeaeb302a272"}, - {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e0f5d97064ec35fdef3079f867afe6fa5e76ab2bb3e809855ab34a1aa34c9dcd"}, - {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3a80541671646c5e49493de61361a1851c8c172cf28981b76aa4248a9f02f5b1"}, - {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5ce6385ad89afad106d05d37296f724ba10f8f4e57ab8ad7f4ecce0aa226d3d"}, - {file = "obstore-0.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:632522ba63a44768977defc0a93fc5dd59ea0455bfd6926cd3121971306da4e5"}, - {file = "obstore-0.7.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:dcb71412dc8d2bd464b340d1f36d8c0ceb7894c01c2ceaaa5f2ac45376503fa2"}, - {file = "obstore-0.7.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d486bb01438039d686401ce4207d82c02b8b639227baa5bdd578efdab388dea"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaaf0c9223b5592658c131ff32a0574be995c7e237f406266f9a68ea2266769"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8ae6cde734df3cc542c14152029170d9ae70ce50b957831ed71073113bd3d60"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30da82ae3bfdf24fa80af38967e323ae8da0bb7c36cce01f0dda7689faaf1272"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5daa9f912eac8cdf218161d34e13f38cbb594e934eaaf8a7c09dca5a394b231"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef06cad4e8978d672357b328b4f61c48827b2b79d7eaf58b68ee31ac0e652b8"}, - {file = "obstore-0.7.3-cp313-cp313-manylinux_2_24_aarch64.whl", hash = "sha256:d34920539a94da2b87195787b80004960638dfd0aa2f4369fc9239e0a41470a8"}, - {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcdaa779f376745ff493cce7f19cbbe8d75f68304bf1062e757ab60bd62de1"}, - {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ae095f679e4796b8f6ef80ed3813ddd14a477ae219a0c059c23cf294f9288ded"}, - {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6def59e79c19b8804743fec6407f542b387dc1630c2254412ae8bd3a0b98e7e4"}, - {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f97797c42476ab19853ef4a161b903eaf96c2363a23b9e0187d66b0daee350cb"}, - {file = "obstore-0.7.3-cp313-cp313-win_amd64.whl", hash = "sha256:8f0ecc01b1444bc08ff98e368b80ea2c085a7783621075298e86d3aba96f8e27"}, - {file = "obstore-0.7.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b0a337b6d2b430040e752effdf9584b0d6adddef2ead2bbbc3c204957a2f69d2"}, - {file = "obstore-0.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:439874c31a78198211c45ebde0b3535650dc3585353be51b361bd017bc492090"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:360034e4b1fe84da59bc3b090798acdd1b4a8b75cc1e56d2656591c7cc8776f2"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44989c9be1156c8ad02522bcb0358e813fd71fa061e51c3331cc11f4b6d36525"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bf0b9c28b3149138ff3db0c2cfb3acb329d3a3bef02a3146edec6d2419b27ad"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98fd91e90442ff3bf8832c713189c81cd892299a8423fc5d8c4534e84db62643"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eccae18d75d753129d58c080716cd91738fd1f913b7182eb5695f483d6cbd94"}, - {file = "obstore-0.7.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:bbe0488ca1573020af14ca585ddc5e5aa7593f8fc42ec5d1f53b83393ccaefa5"}, - {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6765cef76ca62b13d4cfec4648fbf6048410d34c2e11455323d011d208977b89"}, - {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:00f8d1211d247fc24c9f5d5614f2ed25872fe2c4af2e283f3e6cc85544a3dee5"}, - {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ebc387320a00918c8afb5f2d76c07157003a661d60ff03763103278670bc75e3"}, - {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8b526bdc5b5392ac55b3a45bf04f2eba3a33c132dfa04418e7ffba38763d7b5d"}, - {file = "obstore-0.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:1af6dfef86b37e74ff812bd70d8643619e16485559fcaee01b3f2442b70d4918"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:848eb12ed713f447a7b1f7de3f0bff570de99546f76c37e6315102f5bbdaf71c"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:091998d57331aa0e648a9dca0adebf6dc09eb53a4e6935c9c06625998120acc1"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed7c957d19a6a994e8c9198b1e58b31e0fc3748ca056e27f738a4ead789eb80b"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af8daa0568c89ce863986ccf14570c30d1dc817b51ed2146eecb76fddc82704e"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe42053413a35a964e88ea156af3253defac30bedd973797b55b8e230cc50fe4"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2faa2ac90672334cdaabbf930c82e91efa184928dc55b55bcbf84b152bc4df1"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49f20fdabd295a5a001569957c19a51615d288cd255fb80dcf966e2307ca0cec"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_24_aarch64.whl", hash = "sha256:aa131d089565fb7a5225220fcdfe260e3b1fc6821c0a2eef2e3a23c5ba9c79bd"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:73df8b270b89a97ef9e87fc8e552d97d426bbfcb61c55097f5d452a7457ee9d5"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:25cea5cf5a727800b14cf4d09fd2b799c28fb755cc04e5635e7fb36d413bf772"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:aae7fea048d7e73e5c206efef1627bff677455f6eed5c94a596906c4fcedc744"}, - {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:b4ee1ee4f8846ae891f1715a19a8f89d16a00c9e8913bf60c9f3acf24d905de2"}, + {file = "obstore-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:49104c0d72688c180af015b02c691fbb6cf6a45b03a9d71b84059ed92dbec704"}, + {file = "obstore-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c49776abd416e4d80d003213522d82ad48ed3517bee27a6cf8ce0f0cf4e6337e"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1636372b5e171a98369612d122ea20b955661daafa6519ed8322f4f0cb43ff74"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2efed0d86ad4ebffcbe3d0c4d84f26c2c6b20287484a0a748499c169a8e1f2c4"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00c5542616dc5608de82ab6f6820633c9dbab6ff048e770fb8a5fcd1d30cd656"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d9df46aaf25ce80fff48c53382572adc67b6410611660b798024450281a3129"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ccf0f03a7fe453fb8640611c922bce19f021c6aaeee6ee44d6d8fb57db6be48"}, + {file = "obstore-0.8.2-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:ddfbfadc88c5e9740b687ef0833384329a56cea07b34f44e1c4b00a0e97d94a9"}, + {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:53ad53bb16e64102f39559ec470efd78a5272b5e3b84c53aa0423993ac5575c1"}, + {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:b0b905b46354db0961ab818cad762b9c1ac154333ae5d341934c90635a6bd7ab"}, + {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fee235694406ebb2dc4178752cf5587f471d6662659b082e9786c716a0a9465c"}, + {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6c36faf7ace17dd0832aa454118a63ea21862e3d34f71b9297d0c788d00f4985"}, + {file = "obstore-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:948a1db1d34f88cfc7ab7e0cccdcfd84cf3977365634599c95ba03b4ef80d1c4"}, + {file = "obstore-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2edaa97687c191c5324bb939d72f6fe86a7aa8191c410f1648c14e8296d05c1c"}, + {file = "obstore-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c4fb7ef8108f08d14edc8bec9e9a6a2e5c4d14eddb8819f5d0da498aff6e8888"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fda8f658c0edf799ab1e264f9b12c7c184cd09a5272dc645d42e987810ff2772"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87fe2bc15ce4051ecb56abd484feca323c2416628beb62c1c7b6712114564d6e"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2482aa2562ab6a4ca40250b26bea33f8375b59898a9b5615fd412cab81098123"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4153b928f5d2e9c6cb645e83668a53e0b42253d1e8bcb4e16571fc0a1434599a"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbfa9c38620cc191be98c8b5558c62071e495dc6b1cc724f38293ee439aa9f92"}, + {file = "obstore-0.8.2-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:0822836eae8d52499f10daef17f26855b4c123119c6eb984aa4f2d525ec2678d"}, + {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8ef6435dfd586d83b4f778e7927a5d5b0d8b771e9ba914bc809a13d7805410e6"}, + {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0f2cba91f4271ca95a932a51aa8dda1537160342b33f7836c75e1eb9d40621a2"}, + {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:23c876d603af0627627808d19a58d43eb5d8bfd02eecd29460bc9a58030fed55"}, + {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ff3c4b5d07629b70b9dee494cd6b94fff8465c3864752181a1cb81a77190fe42"}, + {file = "obstore-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:aadb2cb72de7227d07f4570f82729625ffc77522fadca5cf13c3a37fbe8c8de9"}, + {file = "obstore-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:bb70ce297a47392b1d9a3e310f18d59cd5ebbb9453428210fef02ed60e4d75d1"}, + {file = "obstore-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1619bf618428abf1f607e0b219b2e230a966dcf697b717deccfa0983dd91f646"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a4605c3ed7c9515aeb4c619b5f7f2c9986ed4a79fe6045e536b5e59b804b1476"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce42670417876dd8668cbb8659e860e9725e5f26bbc86449fd259970e2dd9d18"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4a3e893b2a06585f651c541c1972fe1e3bf999ae2a5fda052ee55eb7e6516f5"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08462b32f95a9948ed56ed63e88406e2e5a4cae1fde198f9682e0fb8487100ed"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a0bf7763292a8fc47d01cd66e6f19002c5c6ad4b3ed4e6b2729f5e190fa8a0d"}, + {file = "obstore-0.8.2-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:bcd47f8126cb192cbe86942b8f73b1c45a651ce7e14c9a82c5641dfbf8be7603"}, + {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57eda9fd8c757c3b4fe36cf3918d7e589cc1286591295cc10b34122fa36dd3fd"}, + {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ea44442aad8992166baa69f5069750979e4c5d9ffce772e61565945eea5774b9"}, + {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:41496a3ab8527402db4142aaaf0d42df9d7d354b13ba10d9c33e0e48dd49dd96"}, + {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43da209803f052df96c7c3cbec512d310982efd2407e4a435632841a51143170"}, + {file = "obstore-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:1836f5dcd49f9f2950c75889ab5c51fb290d3ea93cdc39a514541e0be3af016e"}, + {file = "obstore-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:212f033e53fe6e53d64957923c5c88949a400e9027f7038c705ec2e9038be563"}, + {file = "obstore-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bee21fa4ba148d08fa90e47a96df11161661ed31e09c056a373cb2154b0f2852"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4c66594b59832ff1ced4c72575d9beb8b5f9b4e404ac1150a42bfb226617fd50"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:089f33af5c2fe132d00214a0c1f40601b28f23a38e24ef9f79fb0576f2730b74"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d87f658dfd340d5d9ea2d86a7c90d44da77a0db9e00c034367dca335735110cf"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e2e4fa92828c4fbc2d487f3da2d3588701a1b67d9f6ca3c97cc2afc912e9c63"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab440e89c5c37a8ec230857dd65147d4b923e0cada33297135d05e0f937d696a"}, + {file = "obstore-0.8.2-cp313-cp313-manylinux_2_24_aarch64.whl", hash = "sha256:b9beed107c5c9cd995d4a73263861fcfbc414d58773ed65c14f80eb18258a932"}, + {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b75b4e7746292c785e31edcd5aadc8b758238372a19d4c5e394db5c305d7d175"}, + {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f33e6c366869d05ab0b7f12efe63269e631c5450d95d6b4ba4c5faf63f69de70"}, + {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:12c885a9ce5ceb09d13cc186586c0c10b62597eff21b985f6ce8ff9dab963ad3"}, + {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4accc883b93349a81c9931e15dd318cc703b02bbef2805d964724c73d006d00e"}, + {file = "obstore-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ec850adf9980e5788a826ccfd5819989724e2a2f712bfa3258e85966c8d9981e"}, + {file = "obstore-0.8.2-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:1431e40e9bb4773a261e51b192ea6489d0799b9d4d7dbdf175cdf813eb8c0503"}, + {file = "obstore-0.8.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ddb39d4da303f50b959da000aa42734f6da7ac0cc0be2d5a7838b62c97055bb9"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e01f4e13783db453e17e005a4a3ceff09c41c262e44649ba169d253098c775e8"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df0fc2d0bc17caff9b538564ddc26d7616f7e8b7c65b1a3c90b5048a8ad2e797"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e439d06c99a140348f046c9f598ee349cc2dcd9105c15540a4b231f9cc48bbae"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e37d9046669fcc59522d0faf1d105fcbfd09c84cccaaa1e809227d8e030f32c"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2646fdcc4bbe92dc2bb5bcdff15574da1211f5806c002b66d514cee2a23c7cb8"}, + {file = "obstore-0.8.2-cp314-cp314-manylinux_2_24_aarch64.whl", hash = "sha256:e31a7d37675056d93dfc244605089dee67f5bba30f37c88436623c8c5ad9ba9d"}, + {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:656313dd8170dde0f0cd471433283337a63912e8e790a121f7cc7639c83e3816"}, + {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:329038c9645d6d1741e77fe1a53e28a14b1a5c1461cfe4086082ad39ebabf981"}, + {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:1e4df99b369790c97c752d126b286dc86484ea49bff5782843a265221406566f"}, + {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9e1c65c65e20cc990414a8a9af88209b1bbc0dd9521b5f6b0293c60e19439bb7"}, + {file = "obstore-0.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2ca19d5310ba2736a3052d756e682cc1aafbcc4069e62c05b7222b7d8434b543"}, + {file = "obstore-0.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e5f3df9b64c683e288fa1e47fac237c6a1e1021e7c8cadcc75f1bcb3098e824d"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cd293ace46ee175b50e21c0d8c94f606de6cd68f2f199877c55fe8837c585a5"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5a39750feedf5b95b4f62bacaded0b95a53be047d9462d6b24dc8f8b6fc6ec8"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb76517cca57f6ee9d74be18074a1c0f5ff0e62b4c6e1e0f893993dda93ebbfc"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7cd653932bbb7afe611786388cdb403a4b19b13205e0e43d8b0e4890e0accfd0"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4952d69843bb78c73c9a81258f448003f74ff7b298a60899f015788db98a1cd1"}, + {file = "obstore-0.8.2-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:2e3cd6d0822888b7e79c92c1258997289ebf0224598aad8f46ada17405666852"}, + {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:feb4a6e5a3f2d323b3f61356d4ef99dd3f430aaacdaf5607ced5f857d992d2d4"}, + {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:61e29fd6a27df284027c23dc49851dbeeacb2d40cb3d945bd3d6ec6cb0650450"}, + {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8f9e18ff6c32997bd9a9fd636a98439bcbd3f44f13bae350243eacfb75803161"}, + {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6ebc814302485d453b61df956c09662ebb33471684add5bbc321de7ba265b723"}, + {file = "obstore-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:36478c16fd7c7f880f28ece352251eec1fc6f6b69dbf2b78cec9754eb80a4b41"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6ea04118980a9c22fc8581225ff4507b6a161baf8949d728d96e68326ebaab59"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5f33a7570b6001b54252260fbec18c3f6d21e25d3ec57e9b6c5e7330e8290eb2"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11fa78dfb749edcf5a041cd6db20eae95b3e8b09dfdd9b38d14939da40e7c115"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872bc0921ff88305884546ba05e258ccd95672a03d77db123f0d0563fd3c000b"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72556a2fbf018edd921286283e5c7eec9f69a21c6d12516d8a44108eceaa526a"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75fa1abf21499dfcfb0328941a175f89a9aa58245bf00e3318fe928e4b10d297"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f54f72f30cd608c4399679781c884bf8a0e816c1977a2fac993bf5e1fb30609f"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_24_aarch64.whl", hash = "sha256:b044ebf1bf7b8f7b0ca309375c1cd9e140be79e072ae8c70bbd5d9b2ad1f7678"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:b1326cd2288b64d6fe8857cc22d3a8003b802585fc0741eff2640a8dc35e8449"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:ba6863230648a9b0e11502d2745d881cf74262720238bc0093c3eabd22a3b24c"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:887615da9eeefeb2df849d87c380e04877487aa29dbeb367efc3f17f667470d3"}, + {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4eec1fb32ffa4fb9fe9ad584611ff031927a5c22732b56075ee7204f0e35ebdf"}, + {file = "obstore-0.8.2.tar.gz", hash = "sha256:a467bc4e97169e2ba749981b4fd0936015428d9b8f3fb83a5528536b1b6f377f"}, ] [package.dependencies] @@ -2417,14 +2464,14 @@ voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] [[package]] name = "opentelemetry-api" -version = "1.38.0" +version = "1.39.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"}, - {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"}, + {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"}, + {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"}, ] [package.dependencies] @@ -2433,68 +2480,71 @@ typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.38.0" +version = "1.39.1" description = "OpenTelemetry Protobuf encoding" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464"}, ] [package.dependencies] -opentelemetry-proto = "1.38.0" +opentelemetry-proto = "1.39.1" [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.38.0" +version = "1.39.1" description = "OpenTelemetry Collector Protobuf over HTTP Exporter" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb"}, ] [package.dependencies] googleapis-common-protos = ">=1.52,<2.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.38.0" -opentelemetry-proto = "1.38.0" -opentelemetry-sdk = ">=1.38.0,<1.39.0" +opentelemetry-exporter-otlp-proto-common = "1.39.1" +opentelemetry-proto = "1.39.1" +opentelemetry-sdk = ">=1.39.1,<1.40.0" requests = ">=2.7,<3.0" typing-extensions = ">=4.5.0" +[package.extras] +gcp-auth = ["opentelemetry-exporter-credential-provider-gcp (>=0.59b0)"] + [[package]] name = "opentelemetry-instrumentation" -version = "0.59b0" +version = "0.60b1" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee"}, - {file = "opentelemetry_instrumentation-0.59b0.tar.gz", hash = "sha256:6010f0faaacdaf7c4dff8aac84e226d23437b331dcda7e70367f6d73a7db1adc"}, + {file = "opentelemetry_instrumentation-0.60b1-py3-none-any.whl", hash = "sha256:04480db952b48fb1ed0073f822f0ee26012b7be7c3eac1a3793122737c78632d"}, + {file = "opentelemetry_instrumentation-0.60b1.tar.gz", hash = "sha256:57ddc7974c6eb35865af0426d1a17132b88b2ed8586897fee187fd5b8944bd6a"}, ] [package.dependencies] opentelemetry-api = ">=1.4,<2.0" -opentelemetry-semantic-conventions = "0.59b0" +opentelemetry-semantic-conventions = "0.60b1" packaging = ">=18.0" wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-proto" -version = "1.38.0" +version = "1.39.1" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"}, - {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"}, + {file = "opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007"}, + {file = "opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8"}, ] [package.dependencies] @@ -2502,35 +2552,35 @@ protobuf = ">=5.0,<7.0" [[package]] name = "opentelemetry-sdk" -version = "1.38.0" +version = "1.39.1" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"}, - {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"}, + {file = "opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c"}, + {file = "opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6"}, ] [package.dependencies] -opentelemetry-api = "1.38.0" -opentelemetry-semantic-conventions = "0.59b0" +opentelemetry-api = "1.39.1" +opentelemetry-semantic-conventions = "0.60b1" typing-extensions = ">=4.5.0" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.59b0" +version = "0.60b1" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"}, - {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"}, + {file = "opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb"}, + {file = "opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953"}, ] [package.dependencies] -opentelemetry-api = "1.38.0" +opentelemetry-api = "1.39.1" typing-extensions = ">=4.5.0" [[package]] @@ -2760,14 +2810,14 @@ files = [ [[package]] name = "posthog" -version = "7.4.2" +version = "7.4.3" description = "Integrate PostHog into any python application." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "posthog-7.4.2-py3-none-any.whl", hash = "sha256:36954f06f4adede905d97faeb24926a705a4d86f4a308506b15b41b661ef064c"}, - {file = "posthog-7.4.2.tar.gz", hash = "sha256:5953f31a21c5e2485ac57eb5d600a231a70118f884f438c0e8b493c30373c409"}, + {file = "posthog-7.4.3-py3-none-any.whl", hash = "sha256:ae068f8954ee7a56d10ce35261580f1b8d99c6a2b6e878964eeacea1ec906b4a"}, + {file = "posthog-7.4.3.tar.gz", hash = "sha256:02484a32c8bf44ab489dcef270ada46e5ce324021258c322f0d1b567c2d6f174"}, ] [package.dependencies] @@ -3028,6 +3078,7 @@ files = [ [package.dependencies] annotated-types = ">=0.6.0" +email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"email\""} pydantic-core = "2.41.5" typing-extensions = ">=4.14.1" typing-inspection = ">=0.4.2" @@ -3345,14 +3396,14 @@ files = [ [[package]] name = "python-jsonpath" -version = "2.0.1" +version = "2.0.2" description = "JSONPath, JSON Pointer and JSON Patch for Python." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "python_jsonpath-2.0.1-py3-none-any.whl", hash = "sha256:ebd518b7c883acc5b976518d76b6c96288405edec7d9ef838641869c1e1a5eb7"}, - {file = "python_jsonpath-2.0.1.tar.gz", hash = "sha256:32a84ebb2dc0ec1b42a6e165b0f9174aef8310bad29154ad9aee31ac37cca18f"}, + {file = "python_jsonpath-2.0.2-py3-none-any.whl", hash = "sha256:3f8ab612f815ce10c03bf0deaede87235f3381b109a60b4a22744069953627e3"}, + {file = "python_jsonpath-2.0.2.tar.gz", hash = "sha256:41abb6660b3ee54d5ae77e4b0e901049fb1662ad90de241f038df47edc75ee60"}, ] [package.extras] @@ -4220,14 +4271,14 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tldextract" -version = "5.3.0" +version = "5.3.1" description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "tldextract-5.3.0-py3-none-any.whl", hash = "sha256:f70f31d10b55c83993f55e91ecb7c5d84532a8972f22ec578ecfbe5ea2292db2"}, - {file = "tldextract-5.3.0.tar.gz", hash = "sha256:b3d2b70a1594a0ecfa6967d57251527d58e00bb5a91a74387baa0d87a0678609"}, + {file = "tldextract-5.3.1-py3-none-any.whl", hash = "sha256:6bfe36d518de569c572062b788e16a659ccaceffc486d243af0484e8ecf432d9"}, + {file = "tldextract-5.3.1.tar.gz", hash = "sha256:a72756ca170b2510315076383ea2993478f7da6f897eef1f4a5400735d5057fb"}, ] [package.dependencies] @@ -4242,27 +4293,36 @@ testing = ["mypy", "pytest", "pytest-gitignore", "pytest-mock", "responses", "ru [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73"}, - {file = "tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390"}, - {file = "tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82"}, - {file = "tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138"}, - {file = "tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9"}, + {file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"}, + {file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5"}, + {file = "tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319f659ee992222f04e58f84cbf407cfa66a65fe3a8de44e8ad2bc53e7d99012"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e50f8554d504f617d9e9d6e4c2c2884a12b388a97c5c77f0bc6cf4cd032feee"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a62ba2c5faa2dd175aaeed7b15abf18d20266189fb3406c5d0550dd34dd5f37"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143b999bdc46d10febb15cbffb4207ddd1f410e2c755857b5a0797961bbdc113"}, + {file = "tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917"}, ] [package.dependencies] @@ -4271,7 +4331,7 @@ huggingface-hub = ">=0.16.4,<2.0" [package.extras] dev = ["tokenizers[testing]"] docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] -testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"] +testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", "ty"] [[package]] name = "toml" @@ -4327,14 +4387,14 @@ requests = ">=2.0.0" [[package]] name = "typer-slim" -version = "0.21.0" +version = "0.21.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e"}, - {file = "typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557"}, + {file = "typer_slim-0.21.1-py3-none-any.whl", hash = "sha256:6e6c31047f171ac93cc5a973c9e617dbc5ab2bddc4d0a3135dc161b4e2020e0d"}, + {file = "typer_slim-0.21.1.tar.gz", hash = "sha256:73495dd08c2d0940d611c5a8c04e91c2a0a98600cbd4ee19192255a233b6dbfd"}, ] [package.dependencies] @@ -4839,4 +4899,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "ef7e131d700fdd953b5104884661b7cfa251aff86e8f9e52ab3f4f34a8447af3" +content-hash = "b92d54d17a02394a5732d27c113605af042bd47a4500511a0d639eedb74ee9c1" diff --git a/api/pyproject.toml b/api/pyproject.toml index a9bc733424..bd75182a82 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "api" -version = "0.72.1" +version = "0.76.0" description = "Agenta API" authors = [ { name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" }, @@ -22,7 +22,7 @@ agenta = ">=0.72.1" # Core framework dependencies fastapi = ">=0.127" -pydantic = "^2" +pydantic = { version = "^2", extras = ["email"] } uvicorn = "^0.40" gunicorn = "^23" httpx = "^0.28" @@ -52,9 +52,6 @@ taskiq-redis = "^1" redis = "^7" cachetools = "^6" -# Sandbox -restrictedpython = { version = "^8", python = ">=3.11,<3.14" } - # Integrations & external services supertokens-python = "^0.29" openai = "^2" @@ -62,6 +59,7 @@ sendgrid = "^6" stripe = "^14" posthog = "^7" newrelic = "^11" +dnspython = "^2" # Observability (limited - see comment below) opentelemetry-proto = "^1" diff --git a/api/test-auth.http b/api/test-auth.http new file mode 100644 index 0000000000..145d262fd6 --- /dev/null +++ b/api/test-auth.http @@ -0,0 +1,409 @@ +### Auth Endpoints Testing +### Base URL +@baseUrl = http://localhost +@apiBaseUrl = {{baseUrl}}/api/auth + +### NOTE: FastAPI app has root_path="/api" (line 150 in entrypoints/routers.py) +### So auth router mounted at "/auth" becomes "/api/auth" in final URLs + +################################################################################ +# 1. Discover Authentication Methods (ALWAYS AVAILABLE) +################################################################################ +# This endpoint works regardless of configuration +# Returns available authentication methods based on: +# - Environment variables (AGENTA_AUTHN_EMAIL, AGENTA_AUTHN_GOOGLE_ENABLED, etc.) +# - Organization policies (EE only) +# - User's existing identities + +POST {{apiBaseUrl}}/discover +Content-Type: application/json + +{ + "email": "jp@agenta.ai" +} + +# Expected response (default config with no env vars): +# { +# "user_exists": false, +# "primary_method": "email:password", +# "methods": { +# "email:password": true, +# "email:otp": false, +# "social:google": false, +# "social:github": false, +# "sso": false +# } +# } + +################################################################################ +# 2. EMAIL/PASSWORD AUTHENTICATION (DEFAULT) +################################################################################ +# Available when: AGENTA_AUTHN_EMAIL is NOT set OR = "password" (default) +# This is the DEFAULT authentication method when no env vars are configured + +### 2a. Sign Up (Email/Password) +POST {{apiBaseUrl}}/signup +Content-Type: application/json + +{ + "formFields": [ + { + "id": "email", + "value": "test@example.com" + }, + { + "id": "password", + "value": "SecurePassword123!" + } + ] +} + +### 2b. Sign In (Email/Password) +POST {{apiBaseUrl}}/signin +Content-Type: application/json + +{ + "formFields": [ + { + "id": "email", + "value": "test@example.com" + }, + { + "id": "password", + "value": "SecurePassword123!" + } + ] +} + +################################################################################ +# 3. EMAIL OTP AUTHENTICATION (PASSWORDLESS) +################################################################################ +# Available when: AGENTA_AUTHN_EMAIL="otp" +# NOTE: These endpoints return 404 if email OTP is NOT enabled! + +### 3a. Create OTP Code (Email OTP) +POST {{apiBaseUrl}}/signinup/code +Content-Type: application/json + +{ + "email": "jp@agenta.ai" +} + +# Response contains: +# { +# "status": "OK", +# "deviceId": "...", +# "preAuthSessionId": "...", +# "flowType": "USER_INPUT_CODE" +# } + +### 3b. Consume OTP Code (Email OTP) +POST {{apiBaseUrl}}/signinup/code/consume +Content-Type: application/json + +{ + "preAuthSessionId": "your-pre-session-id", + "deviceId": "your-device-id", + "userInputCode": "599869" +} + +################################################################################ +# 4. SOCIAL OAUTH AUTHENTICATION +################################################################################ +# Available when: +# - Google: AGENTA_AUTHN_GOOGLE_ENABLED=true + client ID/secret configured +# - GitHub: AGENTA_AUTHN_GITHUB_ENABLED=true + client ID/secret configured + +### 4a. Google - Get Authorization URL +GET {{apiBaseUrl}}/authorisationurl?thirdPartyId=google&redirectURIOnProviderDashboard={{baseUrl}}/auth/callback/google + +# Response: +# { +# "status": "OK", +# "urlWithQueryParams": "https://accounts.google.com/o/oauth2/v2/auth?..." +# } + +### 4b. GitHub - Get Authorization URL +GET {{apiBaseUrl}}/authorisationurl?thirdPartyId=github&redirectURIOnProviderDashboard={{baseUrl}}/auth/callback/github + +### 4c. OAuth Callback (handled automatically by SuperTokens) +# After user authorizes on provider: +# Provider redirects to: {{baseUrl}}/auth/callback/google +# SuperTokens handles the callback, creates session, and redirects to frontend + +################################################################################ +# 5. OIDC/SSO AUTHENTICATION (EE ONLY) +################################################################################ +# Available when: EE edition + organization has configured OIDC providers +# Requires organization_providers table with enabled providers + +### 5a. OIDC - Initiate Authorization +GET {{apiBaseUrl}}/authorize/oidc?provider_id=PROVIDER_UUID&redirect=/dashboard + +# This endpoint: +# 1. Validates provider access +# 2. Redirects to SuperTokens OIDC flow +# 3. SuperTokens handles OIDC exchange +# 4. Creates session with user identities +# 5. Redirects to specified redirect path + +### 5b. OIDC Callback (handled automatically by SuperTokens) +# Provider redirects to: {{baseUrl}}/auth/callback/custom +# SuperTokens processes OIDC callback and creates session + +################################################################################ +# 6. SESSION MANAGEMENT (ALWAYS AVAILABLE) +################################################################################ +# These endpoints are available regardless of which auth method was used + +### 6a. Verify Session +GET {{apiBaseUrl}}/session/verify +Cookie: sAccessToken=YOUR_ACCESS_TOKEN; sRefreshToken=YOUR_REFRESH_TOKEN + +### 6b. Refresh Session +POST {{apiBaseUrl}}/session/refresh +Cookie: sRefreshToken=YOUR_REFRESH_TOKEN + +### 6c. Sign Out +POST {{apiBaseUrl}}/signout +Cookie: sAccessToken=YOUR_ACCESS_TOKEN + +### 6d. Get Session User Info +GET {{apiBaseUrl}}/session +Cookie: sAccessToken=YOUR_ACCESS_TOKEN + +################################################################################ +# 7. SUPERTOKENS DASHBOARD (ADMIN) +################################################################################ +# Available when: Dashboard recipe is enabled (default) +GET {{baseUrl}}/auth/dashboard + + +################################################################################ +# CURL COMMANDS +################################################################################ + +### 1. Discover (custom endpoint - always works) +# curl -X POST http://localhost/api/auth/discover \ +# -H "Content-Type: application/json" \ +# -d '{"email": "test@example.com"}' + +### 2a. Email/Password - Sign Up (DEFAULT - works without env vars) +# curl -X POST http://localhost/api/auth/signup \ +# -H "Content-Type: application/json" \ +# -d '{ +# "formFields": [ +# {"id": "email", "value": "test@example.com"}, +# {"id": "password", "value": "SecurePassword123!"} +# ] +# }' + +### 2b. Email/Password - Sign In (DEFAULT) +# curl -X POST http://localhost/api/auth/signin \ +# -H "Content-Type: application/json" \ +# -d '{ +# "formFields": [ +# {"id": "email", "value": "test@example.com"}, +# {"id": "password", "value": "SecurePassword123!"} +# ] +# }' + +### 3a. Email OTP - Create Code (only if AGENTA_AUTHN_EMAIL=otp) +# curl -X POST http://localhost/api/auth/signinup/code \ +# -H "Content-Type: application/json" \ +# -d '{"email": "test@example.com"}' + +### 3b. Email OTP - Consume Code (only if AGENTA_AUTHN_EMAIL=otp) +# curl -X POST http://localhost/api/auth/signinup/code/consume \ +# -H "Content-Type: application/json" \ +# -d '{ +# "preAuthSessionId": "SESSION_ID", +# "deviceId": "DEVICE_ID", +# "userInputCode": "123456" +# }' + +### 4a. Google OAuth - Get URL (only if AGENTA_AUTHN_GOOGLE_ENABLED=true) +# curl -X GET "http://localhost/api/auth/authorisationurl?thirdPartyId=google&redirectURIOnProviderDashboard=http://localhost/auth/callback/google" + +### 4b. GitHub OAuth - Get URL (only if AGENTA_AUTHN_GITHUB_ENABLED=true) +# curl -X GET "http://localhost/api/auth/authorisationurl?thirdPartyId=github&redirectURIOnProviderDashboard=http://localhost/auth/callback/github" + +### 5. OIDC/SSO - Authorize (EE only, requires configured providers) +# curl -X GET "http://localhost/api/auth/authorize/oidc?provider_id=UUID&redirect=/dashboard" + +### 6a. Session - Verify +# curl -X GET http://localhost/api/auth/session/verify \ +# -H "Cookie: sAccessToken=YOUR_TOKEN" + +### 6b. Session - Refresh +# curl -X POST http://localhost/api/auth/session/refresh \ +# -H "Cookie: sRefreshToken=YOUR_REFRESH_TOKEN" + +### 6c. Session - Sign Out +# curl -X POST http://localhost/api/auth/signout \ +# -H "Cookie: sAccessToken=YOUR_TOKEN" + + +################################################################################ +# AUTHENTICATION FLOW DOCUMENTATION +################################################################################ + +# ============================================================================ +# DEFAULT CONFIGURATION (No environment variables set) +# ============================================================================ +# When AGENTA_AUTHN_EMAIL is NOT set or = "password": +# - Email/Password authentication is ENABLED (default) +# - Available endpoints: /discover, /signup, /signin, /signout, /session/* +# - SuperTokens emailpassword recipe is initialized +# +# Expected discover response: +# { +# "primary_method": "email:password", +# "methods": { +# "email:password": true, +# "email:otp": false, +# "social:google": false, +# "social:github": false, +# "sso": false +# } +# } + +# ============================================================================ +# EMAIL OTP CONFIGURATION +# ============================================================================ +# Set: AGENTA_AUTHN_EMAIL=otp +# - Email OTP authentication is ENABLED +# - Email/Password authentication is DISABLED +# - Available endpoints: /discover, /signinup/code, /signinup/code/consume, /signout, /session/* +# - SuperTokens passwordless recipe is initialized +# +# Expected discover response: +# { +# "primary_method": "email:otp", +# "methods": { +# "email:password": false, +# "email:otp": true, +# "social:google": false, +# "social:github": false, +# "sso": false +# } +# } + +# ============================================================================ +# SOCIAL OAUTH CONFIGURATION +# ============================================================================ +# Set: AGENTA_AUTHN_GOOGLE_ENABLED=true +# AGENTA_AUTHN_GOOGLE_CLIENT_ID=your_client_id +# AGENTA_AUTHN_GOOGLE_CLIENT_SECRET=your_client_secret +# - Google OAuth is ENABLED +# - Available endpoints: /authorisationurl?thirdPartyId=google +# - SuperTokens thirdparty recipe is initialized with Google provider +# +# Set: AGENTA_AUTHN_GITHUB_ENABLED=true +# AGENTA_AUTHN_GITHUB_CLIENT_ID=your_client_id +# AGENTA_AUTHN_GITHUB_CLIENT_SECRET=your_client_secret +# - GitHub OAuth is ENABLED +# - Available endpoints: /authorisationurl?thirdPartyId=github +# - SuperTokens thirdparty recipe is initialized with GitHub provider + +# ============================================================================ +# OIDC/SSO CONFIGURATION (EE ONLY) +# ============================================================================ +# Requires: +# 1. EE edition enabled +# 2. Organization with verified domain in organization_domains table +# 3. OIDC provider configured in organization_providers table +# 4. Provider must be enabled and linked to domain +# +# Available endpoints: /authorize/oidc?provider_id=UUID +# SuperTokens thirdparty recipe handles OIDC exchange +# +# Expected discover response (when domain has SSO): +# { +# "primary_method": "sso", +# "methods": { +# "email:password": false, +# "email:otp": false, +# "social:google": false, +# "social:github": false, +# "sso": { +# "available": true, +# "required_by_some_orgs": false, +# "providers": [ +# { +# "slug": "okta", +# "name": "ACME SSO", +# "recommended": true +# } +# ] +# } +# } +# } + +# ============================================================================ +# COMMON ERRORS +# ============================================================================ +# 404 on /signup or /signin: +# - Email/Password recipe is NOT enabled +# - Check: AGENTA_AUTHN_EMAIL should be "password" or unset +# +# 404 on /signinup/code: +# - Email OTP recipe is NOT enabled +# - Check: AGENTA_AUTHN_EMAIL should be "otp" +# +# 404 on /authorisationurl?thirdPartyId=google: +# - Google OAuth is NOT enabled +# - Check: AGENTA_AUTHN_GOOGLE_ENABLED=true and credentials configured +# +# 404 on /authorize/oidc: +# - OIDC endpoints are EE only +# - Check: EE edition enabled and provider configured +# +# All methods false in discover response: +# - No authentication methods are configured +# - This should NOT happen - email:password is always default +# - Check SuperTokens initialization in oss/src/__init__.py + +# ============================================================================ +# SUPERTOKENS RECIPE INITIALIZATION +# ============================================================================ +# Location: /Users/junaway/Agenta/github/agenta/api/oss/src/__init__.py +# +# Email/Password (default): +# - Initialized when: env.auth.email_method == "password" OR not set +# - Recipe: emailpassword.init() +# - Endpoints: /signup, /signin +# +# Email OTP: +# - Initialized when: env.auth.email_method == "otp" +# - Recipe: passwordless.init() +# - Endpoints: /signinup/code, /signinup/code/consume +# +# Third-Party (Social OAuth): +# - Initialized when: env.auth.google_enabled OR env.auth.github_enabled +# - Recipe: thirdparty.init() +# - Endpoints: /authorisationurl, /callback/{provider} +# +# Session: +# - Always initialized +# - Recipe: session.init() +# - Endpoints: /session/verify, /session/refresh +# +# Dashboard: +# - Always initialized +# - Recipe: dashboard.init() +# - Endpoints: /dashboard + +# ============================================================================ +# SESSION PAYLOAD +# ============================================================================ +# After successful authentication, SuperTokens creates a session with: +# - User ID (from SuperTokens) +# - Email +# - Identities (list of authentication methods used) +# Example: ["email:password"], ["email:otp"], ["social:google", "email:otp"] +# +# Session cookies: +# - sAccessToken: Short-lived access token +# - sRefreshToken: Long-lived refresh token +# - Both are httpOnly, secure, sameSite=lax diff --git a/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx b/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx index 423b5239b9..e138fa8a29 100644 --- a/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx +++ b/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx @@ -2,11 +2,7 @@ title: "Custom Code Evaluator" --- -Sometimes, the default evaluators in **Agenta** may not be sufficient for your specific use case. In such cases, you can create a custom evaluator to suit your specific needs. Custom evaluators are written in Python. - -:::info -For the moment, there are limitation on the code that can be written in the custom evaluator. Our backend uses `RestrictedPython` to execute the code which limits the libraries that can be used. -::: +Sometimes, the default evaluators in **Agenta** may not be sufficient for your specific use case. In such cases, you can create a custom evaluator to suit your specific needs. Custom evaluators are written in Python, JavaScript, or TypeScript. ## Evaluation code diff --git a/docs/docs/observability/integrations/02-langchain.mdx b/docs/docs/observability/integrations/02-langchain.mdx index 8d365ce859..3f04a1e6a1 100644 --- a/docs/docs/observability/integrations/02-langchain.mdx +++ b/docs/docs/observability/integrations/02-langchain.mdx @@ -19,7 +19,7 @@ This guide shows you how to instrument LangChain applications using Agenta's obs Install the required packages: ```bash -pip install -U agenta openai opentelemetry-instrumentation-langchain langchain langchain_community +pip install -U agenta openai opentelemetry-instrumentation-langchain langchain langchain-openai ``` ## Configure Environment Variables @@ -48,15 +48,17 @@ os.environ["AGENTA_HOST"] = "http://localhost" ## Code Example +This example uses [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/concepts/lcel/) to build a multi-step workflow that generates a joke and then translates it. + ```python # highlight-next-line import agenta as ag # highlight-next-line from opentelemetry.instrumentation.langchain import LangchainInstrumentor -from langchain.schema import SystemMessage, HumanMessage -from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate -from langchain_community.chat_models import ChatOpenAI -from langchain.chains import LLMChain, SequentialChain, TransformChain +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnablePassthrough, RunnableLambda +from langchain_openai import ChatOpenAI # highlight-next-line ag.init() @@ -66,43 +68,39 @@ LangchainInstrumentor().instrument() def langchain_app(): # Initialize the chat model - chat = ChatOpenAI(temperature=0) - - # Define a transformation chain to create the prompt - transform = TransformChain( - input_variables=["subject"], - output_variables=["prompt"], - transform=lambda inputs: {"prompt": f"Tell me a joke about {inputs['subject']}."}, - ) - - # Define the first LLM chain to generate a joke - first_prompt_messages = [ - SystemMessage(content="You are a funny sarcastic nerd."), - HumanMessage(content="{prompt}"), - ] - first_prompt_template = ChatPromptTemplate.from_messages(first_prompt_messages) - first_chain = LLMChain(llm=chat, prompt=first_prompt_template, output_key="joke") - - # Define the second LLM chain to translate the joke - second_prompt_messages = [ - SystemMessage(content="You are an Elf."), - HumanMessagePromptTemplate.from_template( - "Translate the joke below into Sindarin language:\n{joke}" - ), - ] - second_prompt_template = ChatPromptTemplate.from_messages(second_prompt_messages) - second_chain = LLMChain(llm=chat, prompt=second_prompt_template) - - # Chain everything together in a sequential workflow - workflow = SequentialChain( - chains=[transform, first_chain, second_chain], - input_variables=["subject"], + llm = ChatOpenAI(temperature=0) + + # Create prompt for joke generation + joke_prompt = ChatPromptTemplate.from_messages([ + ("system", "You are a funny sarcastic nerd."), + ("human", "Tell me a joke about {subject}."), + ]) + + # Create prompt for translation + translate_prompt = ChatPromptTemplate.from_messages([ + ("system", "You are an Elf."), + ("human", "Translate the joke below into Sindarin language:\n{joke}"), + ]) + + # Build the chain using LCEL (LangChain Expression Language) + # First chain: generate a joke + joke_chain = joke_prompt | llm | StrOutputParser() + + # Second chain: translate the joke + translate_chain = translate_prompt | llm | StrOutputParser() + + # Combine the chains: generate joke, then translate it + full_chain = ( + {"subject": RunnablePassthrough()} + | RunnableLambda(lambda x: {"joke": joke_chain.invoke(x["subject"])}) + | translate_chain ) # Execute the workflow and print the result - result = workflow({"subject": "OpenTelemetry"}) + result = full_chain.invoke("OpenTelemetry") print(result) + # Run the LangChain application langchain_app() ``` @@ -111,6 +109,7 @@ langchain_app() - **Initialize Agenta**: `ag.init()` sets up the Agenta SDK. - **Instrument LangChain**: `LangchainInstrumentor().instrument()` instruments LangChain for tracing. This must be called **before** running your application to ensure all components are traced. +- **LCEL Chains**: The pipe operator (`|`) chains components together. Each step's output becomes the next step's input, making it easy to compose complex workflows. ## Using Workflows diff --git a/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx b/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx index b710d7052b..67d182d485 100644 --- a/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx +++ b/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx @@ -5,6 +5,10 @@ description: "Learn how to track multi-turn conversations and chat sessions" sidebar_position: 9 --- +:::info +This guide covers tracking chat sessions with the Agenta Python SDK. For JavaScript/TypeScript or other OpenTelemetry-based clients, see the [session tracking with OpenTelemetry guide](/observability/trace-with-opentelemetry/session-tracking). +::: + Chat applications often span multiple requests and traces. Session tracking groups related interactions together so you can analyze complete conversations and user journeys. ## What are sessions? diff --git a/docs/docs/prompt-engineering/playground/02-custom-providers.mdx b/docs/docs/prompt-engineering/playground/02-custom-providers.mdx index bbb65252e8..007233c221 100644 --- a/docs/docs/prompt-engineering/playground/02-custom-providers.mdx +++ b/docs/docs/prompt-engineering/playground/02-custom-providers.mdx @@ -166,6 +166,24 @@ Region: (e.g eu-central-1) Model name: (e.g anthropic.claude-3-sonnet-20240229-v1:0) ``` +### Troubleshooting AWS Bedrock + +If a model works in the AWS console but fails in Agenta, the most common cause is that the model does not support on demand throughput in your Region. In that case, you must invoke the model through an inference profile. + +You will usually see an error similar to: + +`Invocation of model ID with on-demand throughput isn't supported. Retry your request with the ID or ARN of an inference profile that contains this model.` + +To fix it: + +1. In AWS Bedrock, open the **Cross region inference** page. +2. Create or select an **inference profile** that includes your model. +3. Copy the **Inference profile ID**. It looks like `eu.anthropic.claude-3-haiku-20240307-v1:0`. +4. In Agenta, set **Model name** to the inference profile ID (not the base model ID). +5. Keep your Bedrock **Region** set to a Region supported by your account and the model. If you are not sure, use the same Region you used to create the inference profile. + +If you see a Bedrock error about a malformed request and an extraneous key (for example, `textGenerationConfig`), verify that you selected the right model identifier. The inference profile ID is often the correct choice for cross region inference models. + ## Configuring OpenAI-Compatible Endpoints (e.g., Ollama) diff --git a/docs/docs/self-host/01-quick-start.mdx b/docs/docs/self-host/01-quick-start.mdx index 9c3b02cfe8..13ceb0c90f 100644 --- a/docs/docs/self-host/01-quick-start.mdx +++ b/docs/docs/self-host/01-quick-start.mdx @@ -69,7 +69,8 @@ If Agenta doesn't start properly, check these common issues: docker logs agenta-oss-gh-api ``` 4. SDK connectivity issues: If you're using the Agenta SDK from outside Docker to connect to your localhost Agenta instance and experiencing connection failures, ensure the `DOCKER_NETWORK_MODE` environment variable is unset (this is the default behavior). -5. Lack of memory provided to docker: If you are experiencing the web container restarting and dying unexpectedly, the most likely cause is that you are running out of memory. You may need to increase the memory allocated to docker (desktop). +5. Docker network layout: The Docker networks are defined in the compose files. See `hosting/docker-compose/oss/docker-compose.gh.yml` (OSS) or `hosting/docker-compose/ee/docker-compose.dev.yml` (EE) for the network names and service attachments. +6. Lack of memory provided to docker: If you are experiencing the web container restarting and dying unexpectedly, the most likely cause is that you are running out of memory. You may need to increase the memory allocated to docker (desktop). :::info To set up a development environment with features like hot-reloading, refer to our [Development Guide](/misc/contributing/development-mode). @@ -78,5 +79,3 @@ To set up a development environment with features like hot-reloading, refer to o Need help? Either: - [Create a GitHub issue](https://github.com/Agenta-AI/agenta/issues/new/choose) - Join our [Slack community](https://join.slack.com/t/agenta-hq/shared_invite/zt-37pnbp5s6-mbBrPL863d_oLB61GSNFjw) for quick support - - diff --git a/docs/docs/self-host/02-configuration.mdx b/docs/docs/self-host/02-configuration.mdx index 749debb58f..7eb5dac203 100644 --- a/docs/docs/self-host/02-configuration.mdx +++ b/docs/docs/self-host/02-configuration.mdx @@ -49,7 +49,7 @@ Optional Agenta-specific configurations: | Variable | Description | Default | |----------|-------------|---------| -| `AGENTA_AUTO_MIGRATIONS` | Enable automatic database migrations | `true` | +| `ALEMBIC_AUTO_MIGRATIONS` | Enable automatic database migrations (legacy: `AGENTA_AUTO_MIGRATIONS`) | `true` | | `AGENTA_PRICING` | Enable pricing features | _(empty)_ | | `AGENTA_DEMOS` | Enable demo applications | _(empty)_ | | `AGENTA_RUNTIME_PREFIX` | Prefix for runtime containers | _(empty)_ | @@ -60,6 +60,20 @@ Optional Agenta-specific configurations: | `AGENTA_ALLOWED_DOMAINS` | Comma-separated list of email domains allowed to authenticate; when set, all other domains are rejected | _(empty)_ | | `AGENTA_OTLP_MAX_BATCH_BYTES` | Max OTLP batch size before requests are rejected with 413 | `10485760` (10MB) | +### Sandbox Runner (Custom Evaluators) + +Agenta executes custom evaluator code in a sandboxed environment. You can choose between local execution or remote execution using Daytona sandboxes. + +| Variable | Description | Default | +|----------|-------------|---------| +| `AGENTA_SERVICES_SANDBOX_RUNNER` | Code execution backend for custom evaluators. Set to `local` or `daytona`. | `local` | +| `DAYTONA_API_KEY` | Your Daytona API key. Required when using Daytona. Get one from https://app.daytona.io | _(empty)_ | +| `DAYTONA_API_URL` | Daytona API endpoint. | `https://app.daytona.io/api` | +| `DAYTONA_TARGET` | Daytona region for sandbox execution (e.g., `eu`, `us`). | Value of `AGENTA_REGION`, or `eu` | +| `DAYTONA_SNAPSHOT` | Snapshot ID that defines the sandbox environment. Required when using Daytona. | _(empty)_ | + +**When to use Daytona**: Local execution runs code directly on the API server. This is simpler but runs in the same process as the API. Daytona runs code in isolated containers with their own dependencies. Use Daytona for production deployments or when you need stronger isolation. + ### Third-party (Required) Essential third-party service configurations: diff --git a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx index 237fee4f68..638d413d86 100644 --- a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx +++ b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx @@ -73,9 +73,7 @@ This Langchain RAG application: ```python from langchain_openai import ChatOpenAI - import bs4 -from langchain import hub from langchain_chroma import Chroma from langchain_community.document_loaders import WebBaseLoader from langchain_core.output_parsers import StrOutputParser diff --git a/docs/package-lock.json b/docs/package-lock.json index 4a5e1e5339..205fad92b4 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -7031,23 +7031,23 @@ } }, "node_modules/body-parser": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", - "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", + "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", "license": "MIT", "dependencies": { - "bytes": "3.1.2", + "bytes": "~3.1.2", "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.13.0", - "raw-body": "2.5.2", + "destroy": "~1.2.0", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "on-finished": "~2.4.1", + "qs": "~6.14.0", + "raw-body": "~2.5.3", "type-is": "~1.6.18", - "unpipe": "1.0.0" + "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.8", @@ -7072,6 +7072,26 @@ "ms": "2.0.0" } }, + "node_modules/body-parser/node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, "node_modules/body-parser/node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -7090,19 +7110,13 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, - "node_modules/body-parser/node_modules/qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.0.6" - }, + "node_modules/body-parser/node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "node": ">= 0.8" } }, "node_modules/bonjour-service": { @@ -18510,9 +18524,9 @@ } }, "node_modules/qs": { - "version": "6.14.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", - "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.1.0" @@ -18575,15 +18589,15 @@ } }, "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", + "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", "license": "MIT", "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.8" @@ -18598,6 +18612,26 @@ "node": ">= 0.8" } }, + "node_modules/raw-body/node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, "node_modules/raw-body/node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -18610,6 +18644,15 @@ "node": ">=0.10.0" } }, + "node_modules/raw-body/node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/rc": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", diff --git a/examples/jupyter/integrations/google-adk-integration.ipynb b/examples/jupyter/integrations/google-adk-integration.ipynb index f35064cc54..bf8c90e28d 100644 --- a/examples/jupyter/integrations/google-adk-integration.ipynb +++ b/examples/jupyter/integrations/google-adk-integration.ipynb @@ -77,10 +77,10 @@ "# Load configuration from environment\n", "os.environ[\"AGENTA_API_KEY\"] = \"YOUR AGENTA API KEY\"\n", "os.environ[\"AGENTA_HOST\"] = \"https://cloud.agenta.ai\"\n", - "os.environ[\"GOOGLE_API_KEY\"] = \"YOUR GOOGLE API KEY\" # Required for Google ADK / Gemini\n", + "os.environ[\"GOOGLE_API_KEY\"] = \"YOUR GOOGLE API KEY\" # Required for Google ADK / Gemini\n", "\n", "# Initialize Agenta (uses AGENTA_* env vars)\n", - "ag.init()\n" + "ag.init()" ] }, { @@ -135,6 +135,7 @@ "APP_NAME = \"weather_app\"\n", "USER_ID = \"demo_user\"\n", "\n", + "\n", "def get_weather(city: str) -> dict:\n", " \"\"\"Toy tool used to generate spans in our traces.\"\"\"\n", " normalized = city.strip().lower()\n", @@ -144,7 +145,7 @@ " \"report\": \"The weather in New York is sunny with a temperature of 25°C.\",\n", " }\n", " if normalized == \"london\":\n", - " return{\n", + " return {\n", " \"status\": \"success\",\n", " \"report\": \"The weather in London is cloudy with a temperature of 18°C.\",\n", " }\n", @@ -154,6 +155,7 @@ " \"error_message\": f\"Weather information for '{city}' is not available.\",\n", " }\n", "\n", + "\n", "weather_agent = Agent(\n", " name=\"weather_agent\",\n", " model=\"gemini-2.0-flash-exp\",\n", @@ -163,7 +165,9 @@ ")\n", "\n", "session_service = InMemorySessionService()\n", - "weather_runner = Runner(agent=weather_agent , app_name=APP_NAME, session_service=session_service)\n" + "weather_runner = Runner(\n", + " agent=weather_agent, app_name=APP_NAME, session_service=session_service\n", + ")" ] }, { @@ -183,8 +187,9 @@ "from google.genai import types\n", "import agenta as ag\n", "\n", + "\n", "@ag.instrument(spankind=\"workflow\")\n", - "async def ask_weather(question: str, user_id: str = \"demo_user\")-> str:\n", + "async def ask_weather(question: str, user_id: str = \"demo_user\") -> str:\n", " \"\"\"\n", " Run a single weather question through the Google ADK agent.\n", " This appears as a top-level span inside Agenta observability.\n", @@ -201,13 +206,12 @@ " parts=[types.Part.from_text(text=question)],\n", " )\n", "\n", - "\n", " try:\n", " events = weather_runner.run_async(\n", " user_id=user_id,\n", " session_id=session.id,\n", " new_message=content,\n", - " )\n", + " )\n", "\n", " final_text = \"\"\n", " async for event in events:\n", @@ -220,11 +224,9 @@ " # Basic handling for Gemini quota / resource exhaustion\n", " msg = str(exc).lower()\n", " if \"exhausted\" in msg:\n", - " return (\n", - " \"The model is temporarily exhausted or over quota. (Check you 'google gemini' subscription) \"\n", - " )\n", + " return \"The model is temporarily exhausted or over quota. (Check you 'google gemini' subscription) \"\n", " # Re-raise all other errors so you still see real issues\n", - " raise\n" + " raise" ] }, { @@ -240,12 +242,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Example usage \n", - "async def main(): \n", - " response = await ask_weather(\"What is the weather in New York?\") \n", - " print(\"Response:\", response) \n", + "# Example usage\n", + "async def main():\n", + " response = await ask_weather(\"What is the weather in New York?\")\n", + " print(\"Response:\", response)\n", "\n", - "# Run the example \n", + "\n", + "# Run the example\n", "await main()" ] }, @@ -283,11 +286,11 @@ "# Example with custom span classification:\n", "import agenta as ag\n", "\n", + "\n", "@ag.instrument(spankind=\"agent\")\n", "def specialized_agent_function(input_data: str):\n", " # Agent-specific logic implementation (placeholder)\n", - " return input_data.upper()\n", - " " + " return input_data.upper()" ] }, { diff --git a/examples/jupyter/observability/observability_langchain.ipynb b/examples/jupyter/observability/observability_langchain.ipynb index dfc7d6ae09..3390878816 100644 --- a/examples/jupyter/observability/observability_langchain.ipynb +++ b/examples/jupyter/observability/observability_langchain.ipynb @@ -135,78 +135,8 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'To save a new version of a prompt in Agenta, you need to create a variant, which acts like a branch in git for versioning. After making your changes, commit them to the variant. Finally, you can deploy the specific version of your variant to the desired environment.'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langchain_openai import ChatOpenAI\n", - "\n", - "\n", - "import bs4\n", - "from langchain import hub\n", - "from langchain_chroma import Chroma\n", - "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough\n", - "from langchain_openai import OpenAIEmbeddings\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "\n", - "prompt = \"\"\"\n", - "You are an assistant for question-answering tasks.\n", - "Use the following pieces of retrieved context to answer the question.\n", - "If you don't know the answer, just say that you don't know.\n", - "Use three sentences maximum and keep the answer concise and to the point.\n", - "\n", - "Question: {question} \n", - "\n", - "Context: {context} \n", - "\n", - "Answer:\n", - "\"\"\"\n", - "\n", - "prompt_template = ChatPromptTemplate(\n", - " [\n", - " (\"human\", prompt),\n", - " ]\n", - ")\n", - "\n", - "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n", - "\n", - "loader = WebBaseLoader(\n", - " web_paths=(\n", - " \"https://agenta.ai/docs/prompt-engineering/managing-prompts-programatically/create-and-commit\",\n", - " ),\n", - " bs_kwargs=dict(parse_only=bs4.SoupStrainer(\"article\")), # Only parse the core\n", - ")\n", - "docs = loader.load()\n", - "\n", - "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", - "splits = text_splitter.split_documents(docs)\n", - "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n", - "\n", - "# Retrieve and generate using the relevant snippets of the blog.\n", - "retriever = vectorstore.as_retriever()\n", - "\n", - "\n", - "rag_chain = (\n", - " {\"context\": retriever, \"question\": RunnablePassthrough()}\n", - " | prompt_template\n", - " | llm\n", - " | StrOutputParser()\n", - ")\n", - "\n", - "rag_chain.invoke(\"How can I save a new version of a prompt in Agenta?\")" - ] + "outputs": [], + "source": "from langchain_openai import ChatOpenAI\n\nimport bs4\nfrom langchain_chroma import Chroma\nfrom langchain_community.document_loaders import WebBaseLoader\nfrom langchain_core.output_parsers import StrOutputParser\nfrom langchain_core.runnables import RunnablePassthrough\nfrom langchain_openai import OpenAIEmbeddings\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\nfrom langchain_core.prompts import ChatPromptTemplate\n\nprompt = \"\"\"\nYou are an assistant for question-answering tasks.\nUse the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know.\nUse three sentences maximum and keep the answer concise and to the point.\n\nQuestion: {question} \n\nContext: {context} \n\nAnswer:\n\"\"\"\n\nprompt_template = ChatPromptTemplate(\n [\n (\"human\", prompt),\n ]\n)\n\nllm = ChatOpenAI(model=\"gpt-4o-mini\")\n\nloader = WebBaseLoader(\n web_paths=(\n \"https://agenta.ai/docs/prompt-engineering/managing-prompts-programatically/create-and-commit\",\n ),\n bs_kwargs=dict(parse_only=bs4.SoupStrainer(\"article\")), # Only parse the core\n)\ndocs = loader.load()\n\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\nsplits = text_splitter.split_documents(docs)\nvectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n\n# Retrieve and generate using the relevant snippets of the blog.\nretriever = vectorstore.as_retriever()\n\n\nrag_chain = (\n {\"context\": retriever, \"question\": RunnablePassthrough()}\n | prompt_template\n | llm\n | StrOutputParser()\n)\n\nrag_chain.invoke(\"How can I save a new version of a prompt in Agenta?\")" } ], "metadata": { @@ -230,4 +160,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/examples/python/evaluators/ag/store_internals.py b/examples/python/evaluators/ag/store_internals.py index ce527657a1..b1559a3271 100644 --- a/examples/python/evaluators/ag/store_internals.py +++ b/examples/python/evaluators/ag/store_internals.py @@ -33,10 +33,12 @@ def evaluate( return 1.0 if str(output).lower() == str(correct_answer).lower() else 0.0 # Store a simple hello world message in internals - ag.tracing.store_internals({ - "message": "Hello World from evaluator internals!", - "evaluator_name": "internals_demo", - }) + ag.tracing.store_internals( + { + "message": "Hello World from evaluator internals!", + "evaluator_name": "internals_demo", + } + ) # Perform actual evaluation output_str = str(output).lower().strip() @@ -47,11 +49,13 @@ def evaluate( # Store evaluation details as internals # These will be visible in the observability drawer - ag.tracing.store_internals({ - "output_processed": output_str, - "correct_answer_processed": correct_str, - "exact_match": match, - "score": score, - }) + ag.tracing.store_internals( + { + "output_processed": output_str, + "correct_answer_processed": correct_str, + "exact_match": match, + "score": score, + } + ) return score diff --git a/examples/python/evaluators/basic/json_structure.py b/examples/python/evaluators/basic/json_structure.py index fea48debe3..0e123463a7 100644 --- a/examples/python/evaluators/basic/json_structure.py +++ b/examples/python/evaluators/basic/json_structure.py @@ -13,7 +13,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Evaluator that validates JSON structure and required fields. @@ -40,7 +40,7 @@ def evaluate( return 0.0 # Get required fields - required_fields = app_params.get('required_fields', '').split(',') + required_fields = app_params.get("required_fields", "").split(",") required_fields = [f.strip() for f in required_fields if f.strip()] if not required_fields: diff --git a/examples/python/evaluators/basic/length_check.py b/examples/python/evaluators/basic/length_check.py index e86e3177df..01cd8cf947 100644 --- a/examples/python/evaluators/basic/length_check.py +++ b/examples/python/evaluators/basic/length_check.py @@ -13,7 +13,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Evaluator that checks if output length is within expected range. @@ -36,8 +36,8 @@ def evaluate( output_str = str(output) # Get length constraints from app_params - min_length = int(app_params.get('min_length', 0)) - max_length = int(app_params.get('max_length', 10000)) + min_length = int(app_params.get("min_length", 0)) + max_length = int(app_params.get("max_length", 10000)) output_length = len(output_str) diff --git a/examples/python/evaluators/basic/string_contains.py b/examples/python/evaluators/basic/string_contains.py index b807c1e2da..aca84c3c82 100644 --- a/examples/python/evaluators/basic/string_contains.py +++ b/examples/python/evaluators/basic/string_contains.py @@ -13,7 +13,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Evaluator that checks if the output contains expected keywords. diff --git a/examples/python/evaluators/basic/word_count.py b/examples/python/evaluators/basic/word_count.py index 8dd4446656..346b88cfa7 100644 --- a/examples/python/evaluators/basic/word_count.py +++ b/examples/python/evaluators/basic/word_count.py @@ -13,7 +13,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Evaluator that checks word count is within target range. @@ -31,7 +31,7 @@ def evaluate( """ # Convert output to string if isinstance(output, dict): - output_str = str(output.get('text', json.dumps(output))) + output_str = str(output.get("text", json.dumps(output))) else: output_str = str(output) @@ -40,14 +40,14 @@ def evaluate( word_count = len(words) # Check target or range - if 'target_words' in app_params: - target = int(app_params['target_words']) + if "target_words" in app_params: + target = int(app_params["target_words"]) # Allow 10% variance min_words = int(target * 0.9) max_words = int(target * 1.1) else: - min_words = int(app_params.get('min_words', 0)) - max_words = int(app_params.get('max_words', 10000)) + min_words = int(app_params.get("min_words", 0)) + max_words = int(app_params.get("max_words", 10000)) if min_words <= word_count <= max_words: return 1.0 diff --git a/examples/python/evaluators/numpy/dependency_check.py b/examples/python/evaluators/numpy/dependency_check.py index 70bcdcf30e..46d8bfebcb 100644 --- a/examples/python/evaluators/numpy/dependency_check.py +++ b/examples/python/evaluators/numpy/dependency_check.py @@ -12,7 +12,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Tests if NumPy is available in the environment. diff --git a/examples/python/evaluators/numpy/exact_match.py b/examples/python/evaluators/numpy/exact_match.py index 254a6f0884..058bd6d9ec 100644 --- a/examples/python/evaluators/numpy/exact_match.py +++ b/examples/python/evaluators/numpy/exact_match.py @@ -13,7 +13,7 @@ def evaluate( app_params: Dict[str, str], inputs: Dict[str, str], output: Union[str, Dict[str, Any]], - correct_answer: str + correct_answer: str, ) -> float: """ Tests NumPy functionality by counting characters in strings. diff --git a/examples/test_daytona_scripts.py b/examples/test_daytona_scripts.py index 5b8d5b8568..aa7da58fb0 100644 --- a/examples/test_daytona_scripts.py +++ b/examples/test_daytona_scripts.py @@ -33,7 +33,13 @@ def _load_files() -> dict[str, list[Path]]: for runtime, folder in BASIC_DIRS.items(): if not folder.exists(): continue - pattern = "*.py" if runtime == "python" else "*.js" if runtime == "javascript" else "*.ts" + pattern = ( + "*.py" + if runtime == "python" + else "*.js" + if runtime == "javascript" + else "*.ts" + ) candidates = sorted(folder.glob(pattern)) files[runtime] = [ path @@ -57,9 +63,7 @@ def _wrap_js(code: str) -> str: "const app_params = params.app_params;\n" "const inputs = params.inputs;\n" "const output = params.output;\n" - "const correct_answer = params.correct_answer;\n" - + code - + "\n" + "const correct_answer = params.correct_answer;\n" + code + "\n" "let result = evaluate(app_params, inputs, output, correct_answer);\n" "result = Number(result);\n" "if (!Number.isFinite(result)) { result = 0.0; }\n" @@ -82,9 +86,7 @@ def _wrap_python(code: str) -> str: "app_params = params['app_params']\n" "inputs = params['inputs']\n" "output = params['output']\n" - "correct_answer = params['correct_answer']\n" - + code - + "\n" + "correct_answer = params['correct_answer']\n" + code + "\n" "result = evaluate(app_params, inputs, output, correct_answer)\n" "if isinstance(result, (float, int, str)):\n" " try:\n" diff --git a/hosting/docker-compose/ee/docker-compose.dev.yml b/hosting/docker-compose/ee/docker-compose.dev.yml index 9bb435c188..9382983c04 100644 --- a/hosting/docker-compose/ee/docker-compose.dev.yml +++ b/hosting/docker-compose/ee/docker-compose.dev.yml @@ -1,78 +1,58 @@ name: agenta-ee-dev services: - # Build services - keep for caching but not strictly needed .api: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest build: context: ../../../api dockerfile: ee/docker/Dockerfile.dev + # === EXECUTION ============================================ # command: ["true"] .web: + # === IMAGE ================================================ # image: agenta-ee-dev-web:latest build: context: ../../../web dockerfile: ee/docker/Dockerfile.dev + # === EXECUTION ============================================ # command: ["true"] web: + # === ACTIVATION =========================================== # profiles: - with-web - + # === IMAGE ================================================ # image: agenta-ee-dev-web:latest - + # === EXECUTION ============================================ # + command: sh -c "pnpm dev-ee" + # === STORAGE ============================================== # volumes: - ../../../web/ee/src:/app/ee/src - ../../../web/ee/public:/app/ee/public - ../../../web/oss/src:/app/oss/src - ../../../web/oss/public:/app/oss/public - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - - ports: - - "3000:3000" - - restart: always - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # networks: - agenta-network - + # === LABELS =============================================== # labels: - "traefik.http.routers.agenta-web.rule=PathPrefix(`/`)" - "traefik.http.routers.agenta-web.entrypoints=web" - "traefik.http.services.agenta-web.loadbalancer.server.port=3000" - - command: sh -c "pnpm dev-ee" + # === LIFECYCLE ============================================ # + restart: always api: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest - - volumes: - - ../../../api:/app - - ../../../sdk:/sdk - - env_file: - - ${ENV_FILE:-./.env.ee.dev} - - labels: - - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" - - "traefik.http.routers.api.entrypoints=web" - - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" - - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" - - "traefik.http.routers.api.middlewares=api-strip" - - "traefik.http.services.api.loadbalancer.server.port=8000" - - "traefik.http.routers.api.service=api" - - restart: always - - networks: - - agenta-network - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === EXECUTION ============================================ # command: [ "uvicorn", @@ -85,7 +65,23 @@ services: "--root-path", "/api", ] - + # === STORAGE ============================================== # + volumes: + - ../../../api/ee:/app/ee + - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints + - ../../../sdk:/sdk + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.ee.dev} + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy @@ -97,125 +93,173 @@ services: condition: service_healthy redis-durable: condition: service_healthy + # === LABELS =============================================== # + labels: + - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" + - "traefik.http.routers.api.entrypoints=web" + - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" + - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" + - "traefik.http.routers.api.middlewares=api-strip" + - "traefik.http.services.api.loadbalancer.server.port=8000" + - "traefik.http.routers.api.service=api" + # === LIFECYCLE ============================================ # + restart: always worker-evaluations: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest - + # === EXECUTION ============================================ # + command: > + watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- + python -m entrypoints.worker_evaluations + # === STORAGE ============================================== # volumes: - - ../../../api:/app + - ../../../api/ee:/app/ee + - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - networks: - - agenta-network - - command: > - watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- - python -m entrypoints.worker_evaluations - worker-tracing: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest - + # === EXECUTION ============================================ # + command: > + watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- + python -m entrypoints.worker_tracing + # === STORAGE ============================================== # volumes: - - ../../../api:/app + - ../../../api/ee:/app/ee + - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - networks: - - agenta-network - - command: > - watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- - python -m entrypoints.worker_tracing - cron: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest - + # === EXECUTION ============================================ # + command: cron -f + # === STORAGE ============================================== # volumes: - ../../../api/ee/src/crons/meters.sh:/meters.sh - ../../../api/oss/src/crons/queries.sh:/queries.sh - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: - postgres - api - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - - networks: - - agenta-network - - command: cron -f alembic: + # === IMAGE ================================================ # image: agenta-ee-dev-api:latest - + # === EXECUTION ============================================ # + command: sh -c "python -m ee.databases.postgres.migrations.runner" + # === STORAGE ============================================== # volumes: - - ../../../api:/app + - ../../../api/ee:/app/ee + - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy - - networks: - - agenta-network - - command: sh -c "python -m ee.databases.postgres.migrations.runner" completion: + # === IMAGE ================================================ # build: context: ../../../services/completion dockerfile: ee/docker/Dockerfile.dev - + # === EXECUTION ============================================ # + command: + [ + "uvicorn", + "oss.src.main:app", + "--host", + "0.0.0.0", + "--port", + "80", + "--reload", + "--root-path", + "/services/completion", + ] + # === STORAGE ============================================== # volumes: - ../../../services/completion:/app - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network extra_hosts: - "host.docker.internal:host-gateway" - + # === LABELS =============================================== # labels: - "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)" - "traefik.http.routers.completion.entrypoints=web" @@ -224,12 +268,15 @@ services: - "traefik.http.routers.completion.middlewares=completion-strip" - "traefik.http.services.completion.loadbalancer.server.port=80" - "traefik.http.routers.completion.service=completion" - + # === LIFECYCLE ============================================ # restart: always - networks: - - agenta-network - + chat: + # === IMAGE ================================================ # + build: + context: ../../../services/chat + dockerfile: ee/docker/Dockerfile.dev + # === EXECUTION ============================================ # command: [ "uvicorn", @@ -240,24 +287,23 @@ services: "80", "--reload", "--root-path", - "/services/completion", + "/services/chat", ] - - chat: - build: - context: ../../../services/chat - dockerfile: ee/docker/Dockerfile.dev - + # === STORAGE ============================================== # volumes: - ../../../services/chat:/app - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network extra_hosts: - "host.docker.internal:host-gateway" - + # === LABELS =============================================== # labels: - "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)" - "traefik.http.routers.chat.entrypoints=web" @@ -266,46 +312,29 @@ services: - "traefik.http.routers.chat.middlewares=chat-strip" - "traefik.http.services.chat.loadbalancer.server.port=80" - "traefik.http.routers.chat.service=chat" - + # === LIFECYCLE ============================================ # restart: always - networks: - - agenta-network - - command: - [ - "uvicorn", - "oss.src.main:app", - "--host", - "0.0.0.0", - "--port", - "80", - "--reload", - "--root-path", - "/services/chat", - ] - postgres: + # === IMAGE ================================================ # image: postgres:16 - + # === STORAGE ============================================== # + volumes: + - postgres-data:/var/lib/postgresql/data/ + - ../../../api/ee/databases/postgres/init-db-ee.sql:/docker-entrypoint-initdb.d/init-db.sql + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} environment: POSTGRES_USER: ${POSTGRES_USER:-username} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} - - ports: - - "5432:5432" - - restart: always - + # === NETWORK ============================================== # networks: - agenta-network - - volumes: - - postgres-data:/var/lib/postgresql/data/ - - ../../../api/ee/databases/postgres/init-db-ee.sql:/docker-entrypoint-initdb.d/init-db.sql - + ports: + - "${POSTGRES_PORT:-5432}:5432" + # === LIFECYCLE ============================================ # + restart: always healthcheck: test: ["CMD-SHELL", "pg_isready -U username -d agenta_ee_core"] interval: 10s @@ -313,8 +342,9 @@ services: retries: 5 redis-volatile: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly no @@ -322,18 +352,14 @@ services: --maxmemory 512mb --maxmemory-policy volatile-lru --port 6379 - - ports: - - "6379:6379" - - networks: - - agenta-network - + # === STORAGE ============================================== # volumes: - redis-volatile-data:/data - + # === NETWORK ============================================== # + networks: + - agenta-network + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6379", "ping"] interval: 10s @@ -342,8 +368,9 @@ services: start_period: 5s redis-durable: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly yes @@ -352,18 +379,14 @@ services: --maxmemory 512mb --maxmemory-policy noeviction --port 6381 - - ports: - - "6381:6381" - - networks: - - agenta-network - + # === STORAGE ============================================== # volumes: - redis-durable-data:/data - + # === NETWORK ============================================== # + networks: + - agenta-network + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6381", "ping"] interval: 10s @@ -372,28 +395,28 @@ services: start_period: 5s traefik: + # === IMAGE ================================================ # image: traefik:2 - - command: + # === EXECUTION ============================================ # + command: - --api.dashboard=true - --api.insecure=true - --providers.docker + - --providers.docker.constraints=Label(`com.docker.compose.project`,`${COMPOSE_PROJECT_NAME:-agenta-ee-dev}`) - --entrypoints.web.address=:80 - --ping=true - - --accesslog=true # Enable access logs for debugging - - ports: - - "80:80" # ALB forwards to this port - - "8080:8080" # Dashboard (optional, can be internal only) - + - --accesslog=true + # === STORAGE ============================================== # volumes: - /var/run/docker.sock:/var/run/docker.sock - + # === NETWORK ============================================== # networks: - agenta-network - + ports: + - "${TRAEFIK_PORT:-80}:80" + - "${TRAEFIK_UI_PORT:-8080}:8080" + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "traefik", "healthcheck", "--ping"] interval: 10s @@ -402,28 +425,24 @@ services: start_period: 10s supertokens: + # === IMAGE ================================================ # image: registry.supertokens.io/supertokens/supertokens-postgresql - + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.ee.dev} + environment: + POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS:-postgresql://username:password@postgres:5432/agenta_ee_supertokens} + # === NETWORK ============================================== # + networks: + - agenta-network + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy alembic: condition: service_completed_successfully - - ports: - - "3567:3567" - - env_file: - - ${ENV_FILE:-./.env.ee.dev} - - environment: - POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS} - + # === LIFECYCLE ============================================ # restart: always - - networks: - - agenta-network - healthcheck: test: > bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"' @@ -432,23 +451,25 @@ services: retries: 5 stripe: + # === IMAGE ================================================ # image: stripe/stripe-cli:latest - - command: [ - listen, - --forward-to, - http://api:8000/billing/stripe/events/, - --events, - "customer.subscription.created,customer.subscription.deleted,invoice.updated,invoice.upcoming,invoice.payment_failed,invoice.payment_succeeded" - ] - + # === EXECUTION ============================================ # + command: + [ + listen, + --forward-to, + http://api:8000/billing/stripe/events/, + --events, + "customer.subscription.created,customer.subscription.deleted,invoice.updated,invoice.upcoming,invoice.payment_failed,invoice.payment_succeeded", + ] + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.ee.dev} - - restart: always - + # === NETWORK ============================================== # networks: - agenta-network + # === LIFECYCLE ============================================ # + restart: always networks: agenta-network: diff --git a/hosting/docker-compose/ee/env.ee.dev.example b/hosting/docker-compose/ee/env.ee.dev.example index c51bad5e19..77a442f622 100644 --- a/hosting/docker-compose/ee/env.ee.dev.example +++ b/hosting/docker-compose/ee/env.ee.dev.example @@ -1,93 +1,167 @@ -# First-party (required) +# ============================================================================ # +# License - https://agenta.ai/pricing +# ============================================================================ # AGENTA_LICENSE=ee -AGENTA_STAGE=dev -AGENTA_PROVIDER=local -AGENTA_WEB_URL=http://localhost -AGENTA_API_URL=http://localhost/api -AGENTA_SERVICES_URL=http://localhost/services -AGENTA_AUTH_KEY=change-me -AGENTA_CRYPT_KEY=change-me -AGENTA_API_IMAGE_NAME=agenta-api -AGENTA_API_IMAGE_TAG=latest -AGENTA_WEB_IMAGE_NAME=agenta-web -AGENTA_WEB_IMAGE_TAG=latest -AGENTA_SERVICES_COMPLETION_IMAGE_NAME=agenta-completion -AGENTA_SERVICES_COMPLETION_IMAGE_TAG=latest -AGENTA_SERVICES_CHAT_IMAGE_NAME=agenta-chat -AGENTA_SERVICES_CHAT_IMAGE_TAG=latest - -# First-party (registry & service) -DOCKER_NETWORK_MODE=bridge -POSTGRES_USERNAME=username -POSTGRES_PASSWORD=password - -# First-party (optional) -AGENTA_AUTO_MIGRATIONS=true -AGENTA_PRICING= -AGENTA_DEMOS= -AGENTA_RUNTIME_PREFIX= -AGENTA_API_INTERNAL_URL= -AGENTA_LITELLM_MOCK= -POSTGRES_USERNAME_ADMIN= -POSTGRES_PASSWORD_ADMIN= -AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true -AGENTA_OTLP_MAX_BATCH_BYTES=10485760 - -# Third-party (required) -TRAEFIK_DOMAIN= -TRAEFIK_PROTOCOL= -TRAEFIK_PORT= - -# Redis: set REDIS_URI for a single instance, or override with the split URIs below -REDIS_URI= -REDIS_URI_VOLATILE= -REDIS_URI_DURABLE= - -POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens" -POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core" -POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing" - -ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini -ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini - -SUPERTOKENS_CONNECTION_URI=http://supertokens:3567 - -# Third-party (optional) -AWS_ECR_URL= -AWS_RDS_SECRET= +# ============================================================================ # +# Secrets - REPLACE ME IN PRODUCTION! +# ============================================================================ # +AGENTA_AUTH_KEY=replace-me +AGENTA_CRYPT_KEY=replace-me + +# ============================================================================ # +# Endpoints +# ============================================================================ # +# AGENTA_WEB_URL=http://localhost +# AGENTA_API_URL=http://localhost/api +# AGENTA_SERVICES_URL=http://localhost/services +# AGENTA_API_INTERNAL_URL= + +# ============================================================================ # +# Images +# ============================================================================ # +# AGENTA_WEB_IMAGE_NAME=agenta-web +# AGENTA_WEB_IMAGE_TAG=latest +# AGENTA_API_IMAGE_NAME=agenta-api +# AGENTA_API_IMAGE_TAG=latest +# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion +# AGENTA_COMPLETION_IMAGE_TAG=latest +# AGENTA_CHAT_IMAGE_NAME=agenta-chat +# AGENTA_CHAT_IMAGE_TAG=latest + +# ============================================================================ # +# OTLP +# ============================================================================ # +# AGENTA_OTLP_MAX_BATCH_BYTES=10485760 + +# ============================================================================ # +# Proxy - LLM Providers +# ============================================================================ # +# OPENAI_API_KEY= +# ANTHROPIC_API_KEY= +# COHERE_API_KEY= +# GROQ_API_KEY= +# GEMINI_API_KEY= +# MISTRAL_API_KEY= +# ALEPHALPHA_API_KEY= +# ANYSCALE_API_KEY= +# DEEPINFRA_API_KEY= +# OPENROUTER_API_KEY= +# PERPLEXITYAI_API_KEY= +# TOGETHERAI_API_KEY= + +# ============================================================================ # +# Docker - Compose +# ============================================================================ # +# COMPOSE_PROJECT_NAME=agenta-ee-dev + +# ============================================================================ # +# Network - Traefik +# ============================================================================ # +# TRAEFIK_PROTOCOL=http +# TRAEFIK_DOMAIN=localhost +# TRAEFIK_PORT=80 +# TRAEFIK_SSL_DIR= + +# ============================================================================ # +# Network - Nginx +# ============================================================================ # +# NGINX_PORT=80 + +# ============================================================================ # +# Databases - Postgres +# ============================================================================ # +# POSTGRES_USER=username +# POSTGRES_PASSWORD=password + +# POSTGRES_PORT=5432 +# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core +# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing +# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens + +# ============================================================================ # +# Databases - Alembic (migrations) +# ============================================================================ # +# ALEMBIC_AUTO_MIGRATIONS=true +# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini +# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini + +# ============================================================================ # +# Databases - Redis +# ============================================================================ # +# REDIS_URI_VOLATILE=redis://localhost:6379/0 +# REDIS_URI_DURABLE=redis://localhost:6381/0 + +# ============================================================================ # +# Authentication - SuperTokens +# ============================================================================ # +# SUPERTOKENS_EMAIL_DISABLED=false + +# ============================================================================ # +# Authentication - Email providers +# ============================================================================ # +# SENDGRID_API_KEY= +# SENDGRID_FROM_ADDRESS= + +# ============================================================================ # +# Authentication - OIDC providers +# ============================================================================ # +# GOOGLE_OAUTH_CLIENT_ID= +# GOOGLE_OAUTH_CLIENT_SECRET= + +# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID= +# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET= +# GOOGLE_WORKSPACES_HD= + +# APPLE_OAUTH_CLIENT_ID= +# APPLE_OAUTH_CLIENT_SECRET= +# APPLE_KEY_ID= +# APPLE_TEAM_ID= +# APPLE_PRIVATE_KEY= + +# DISCORD_OAUTH_CLIENT_ID= +# DISCORD_OAUTH_CLIENT_SECRET= + +# FACEBOOK_OAUTH_CLIENT_ID= +# FACEBOOK_OAUTH_CLIENT_SECRET= + +# GITHUB_OAUTH_CLIENT_ID= +# GITHUB_OAUTH_CLIENT_SECRET= + +# GITLAB_OAUTH_CLIENT_ID= +# GITLAB_OAUTH_CLIENT_SECRET= +# GITLAB_BASE_URL= + +# BITBUCKET_OAUTH_CLIENT_ID= +# BITBUCKET_OAUTH_CLIENT_SECRET= + +# LINKEDIN_OAUTH_CLIENT_ID= +# LINKEDIN_OAUTH_CLIENT_SECRET= + +# OKTA_OAUTH_CLIENT_ID= +# OKTA_OAUTH_CLIENT_SECRET= +# OKTA_DOMAIN= + +# AZURE_AD_OAUTH_CLIENT_ID= +# AZURE_AD_OAUTH_CLIENT_SECRET= +# AZURE_AD_DIRECTORY_ID= + +# BOXY_SAML_OAUTH_CLIENT_ID= +# BOXY_SAML_OAUTH_CLIENT_SECRET= +# BOXY_SAML_URL= + +# TWITTER_OAUTH_CLIENT_ID= +# TWITTER_OAUTH_CLIENT_SECRET= + +# ============================================================================ # +# Billing - Stripe [ee-only] +# ============================================================================ # +# STRIPE_API_KEY= +# STRIPE_WEBHOOK_SECRET= +# STRIPE_WEBHOOK_TARGET= +# STRIPE_PRICING= + +# ============================================================================ # +# Analytics - PostHog +# ============================================================================ # POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp - -GITHUB_OAUTH_CLIENT_ID= -GITHUB_OAUTH_CLIENT_SECRET= -GOOGLE_OAUTH_CLIENT_ID= -GOOGLE_OAUTH_CLIENT_SECRET= - -SUPERTOKENS_API_KEY=replace-me - -NEW_RELIC_LICENSE_KEY= -NRIA_LICENSE_KEY= - -LOOPS_API_KEY= - -SENDGRID_API_KEY= - -CRISP_WEBSITE_ID= - -STRIPE_API_KEY= -STRIPE_WEBHOOK_SECRET= -STRIPE_WEBHOOK_TARGET= - -# Third-party - LLM (optional) -ALEPHALPHA_API_KEY= -ANTHROPIC_API_KEY= -ANYSCALE_API_KEY= -COHERE_API_KEY= -DEEPINFRA_API_KEY= -GEMINI_API_KEY= -GROQ_API_KEY= -MISTRAL_API_KEY= -OPENAI_API_KEY= -OPENROUTER_API_KEY= -PERPLEXITYAI_API_KEY= -TOGETHERAI_API_KEY= diff --git a/hosting/docker-compose/ee/env.ee.gh.example b/hosting/docker-compose/ee/env.ee.gh.example index 7485edba05..77a442f622 100644 --- a/hosting/docker-compose/ee/env.ee.gh.example +++ b/hosting/docker-compose/ee/env.ee.gh.example @@ -1,86 +1,167 @@ -# First-party (required) +# ============================================================================ # +# License - https://agenta.ai/pricing +# ============================================================================ # AGENTA_LICENSE=ee -AGENTA_STAGE=dev -AGENTA_PROVIDER=local -AGENTA_API_URL=http://localhost/api -AGENTA_WEB_URL=http://localhost -AGENTA_SERVICES_URL=http://localhost/services -AGENTA_AUTH_KEY=change-me -AGENTA_CRYPT_KEY=change-me -AGENTA_API_IMAGE_NAME=agenta-api -AGENTA_API_IMAGE_TAG=latest -AGENTA_WEB_IMAGE_NAME=agenta-web -AGENTA_WEB_IMAGE_TAG=latest -AGENTA_SERVICES_COMPLETION_IMAGE_NAME=agenta-completion -AGENTA_SERVICES_COMPLETION_IMAGE_TAG=latest -AGENTA_SERVICES_CHAT_IMAGE_NAME=agenta-chat -AGENTA_SERVICES_CHAT_IMAGE_TAG=latest - -# First-party (registry & service) -DOCKER_NETWORK_MODE=bridge -POSTGRES_PASSWORD=password -POSTGRES_USERNAME=username - -# First-party (optional) -AGENTA_AUTO_MIGRATIONS=true -AGENTA_PRICING= -AGENTA_DEMOS= -AGENTA_RUNTIME_PREFIX= -AGENTA_API_INTERNAL_URL= -AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true -AGENTA_OTLP_MAX_BATCH_BYTES=10485760 - -# Third-party (required) -TRAEFIK_DOMAIN= -TRAEFIK_PROTOCOL= -TRAEFIK_PORT= - -# Redis: set REDIS_URI for a single instance, or override with the split URIs below -REDIS_URI= -REDIS_URI_VOLATILE= -REDIS_URI_DURABLE= - -POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens" -POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core" -POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing" - -ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini -ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini - -SUPERTOKENS_API_KEY=replace-me -SUPERTOKENS_CONNECTION_URI=http://supertokens:3567 - -# Third-party (optional) -POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp - -GITHUB_OAUTH_CLIENT_ID= -GITHUB_OAUTH_CLIENT_SECRET= - -GOOGLE_OAUTH_CLIENT_ID= -GOOGLE_OAUTH_CLIENT_SECRET= - -NEW_RELIC_LICENSE_KEY= -NRIA_LICENSE_KEY= - -LOOPS_API_KEY= -SENDGRID_API_KEY= - -CRISP_WEBSITE_ID= - -STRIPE_API_KEY= -STRIPE_WEBHOOK_SECRET= - -# Third-party - LLM (optional) -ALEPHALPHA_API_KEY= -ANTHROPIC_API_KEY= -ANYSCALE_API_KEY= -COHERE_API_KEY= -DEEPINFRA_API_KEY= -GEMINI_API_KEY= -GROQ_API_KEY= -MISTRAL_API_KEY= -OPENAI_API_KEY= -OPENROUTER_API_KEY= -PERPLEXITYAI_API_KEY= -TOGETHERAI_API_KEY= +# ============================================================================ # +# Secrets - REPLACE ME IN PRODUCTION! +# ============================================================================ # +AGENTA_AUTH_KEY=replace-me +AGENTA_CRYPT_KEY=replace-me + +# ============================================================================ # +# Endpoints +# ============================================================================ # +# AGENTA_WEB_URL=http://localhost +# AGENTA_API_URL=http://localhost/api +# AGENTA_SERVICES_URL=http://localhost/services +# AGENTA_API_INTERNAL_URL= + +# ============================================================================ # +# Images +# ============================================================================ # +# AGENTA_WEB_IMAGE_NAME=agenta-web +# AGENTA_WEB_IMAGE_TAG=latest +# AGENTA_API_IMAGE_NAME=agenta-api +# AGENTA_API_IMAGE_TAG=latest +# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion +# AGENTA_COMPLETION_IMAGE_TAG=latest +# AGENTA_CHAT_IMAGE_NAME=agenta-chat +# AGENTA_CHAT_IMAGE_TAG=latest + +# ============================================================================ # +# OTLP +# ============================================================================ # +# AGENTA_OTLP_MAX_BATCH_BYTES=10485760 + +# ============================================================================ # +# Proxy - LLM Providers +# ============================================================================ # +# OPENAI_API_KEY= +# ANTHROPIC_API_KEY= +# COHERE_API_KEY= +# GROQ_API_KEY= +# GEMINI_API_KEY= +# MISTRAL_API_KEY= +# ALEPHALPHA_API_KEY= +# ANYSCALE_API_KEY= +# DEEPINFRA_API_KEY= +# OPENROUTER_API_KEY= +# PERPLEXITYAI_API_KEY= +# TOGETHERAI_API_KEY= + +# ============================================================================ # +# Docker - Compose +# ============================================================================ # +# COMPOSE_PROJECT_NAME=agenta-ee-dev + +# ============================================================================ # +# Network - Traefik +# ============================================================================ # +# TRAEFIK_PROTOCOL=http +# TRAEFIK_DOMAIN=localhost +# TRAEFIK_PORT=80 +# TRAEFIK_SSL_DIR= + +# ============================================================================ # +# Network - Nginx +# ============================================================================ # +# NGINX_PORT=80 + +# ============================================================================ # +# Databases - Postgres +# ============================================================================ # +# POSTGRES_USER=username +# POSTGRES_PASSWORD=password + +# POSTGRES_PORT=5432 +# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core +# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing +# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens + +# ============================================================================ # +# Databases - Alembic (migrations) +# ============================================================================ # +# ALEMBIC_AUTO_MIGRATIONS=true +# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini +# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini + +# ============================================================================ # +# Databases - Redis +# ============================================================================ # +# REDIS_URI_VOLATILE=redis://localhost:6379/0 +# REDIS_URI_DURABLE=redis://localhost:6381/0 + +# ============================================================================ # +# Authentication - SuperTokens +# ============================================================================ # +# SUPERTOKENS_EMAIL_DISABLED=false + +# ============================================================================ # +# Authentication - Email providers +# ============================================================================ # +# SENDGRID_API_KEY= +# SENDGRID_FROM_ADDRESS= + +# ============================================================================ # +# Authentication - OIDC providers +# ============================================================================ # +# GOOGLE_OAUTH_CLIENT_ID= +# GOOGLE_OAUTH_CLIENT_SECRET= + +# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID= +# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET= +# GOOGLE_WORKSPACES_HD= + +# APPLE_OAUTH_CLIENT_ID= +# APPLE_OAUTH_CLIENT_SECRET= +# APPLE_KEY_ID= +# APPLE_TEAM_ID= +# APPLE_PRIVATE_KEY= + +# DISCORD_OAUTH_CLIENT_ID= +# DISCORD_OAUTH_CLIENT_SECRET= + +# FACEBOOK_OAUTH_CLIENT_ID= +# FACEBOOK_OAUTH_CLIENT_SECRET= + +# GITHUB_OAUTH_CLIENT_ID= +# GITHUB_OAUTH_CLIENT_SECRET= + +# GITLAB_OAUTH_CLIENT_ID= +# GITLAB_OAUTH_CLIENT_SECRET= +# GITLAB_BASE_URL= + +# BITBUCKET_OAUTH_CLIENT_ID= +# BITBUCKET_OAUTH_CLIENT_SECRET= + +# LINKEDIN_OAUTH_CLIENT_ID= +# LINKEDIN_OAUTH_CLIENT_SECRET= + +# OKTA_OAUTH_CLIENT_ID= +# OKTA_OAUTH_CLIENT_SECRET= +# OKTA_DOMAIN= + +# AZURE_AD_OAUTH_CLIENT_ID= +# AZURE_AD_OAUTH_CLIENT_SECRET= +# AZURE_AD_DIRECTORY_ID= + +# BOXY_SAML_OAUTH_CLIENT_ID= +# BOXY_SAML_OAUTH_CLIENT_SECRET= +# BOXY_SAML_URL= + +# TWITTER_OAUTH_CLIENT_ID= +# TWITTER_OAUTH_CLIENT_SECRET= + +# ============================================================================ # +# Billing - Stripe [ee-only] +# ============================================================================ # +# STRIPE_API_KEY= +# STRIPE_WEBHOOK_SECRET= +# STRIPE_WEBHOOK_TARGET= +# STRIPE_PRICING= + +# ============================================================================ # +# Analytics - PostHog +# ============================================================================ # +POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp diff --git a/hosting/docker-compose/oss/docker-compose.dev.yml b/hosting/docker-compose/oss/docker-compose.dev.yml index c880ff2cc2..97d45c3527 100644 --- a/hosting/docker-compose/oss/docker-compose.dev.yml +++ b/hosting/docker-compose/oss/docker-compose.dev.yml @@ -1,61 +1,58 @@ name: agenta-oss-dev services: - web: - profiles: - - with-web + .api: + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + build: + context: ../../../api + dockerfile: oss/docker/Dockerfile.dev + # === EXECUTION ============================================ # + command: ["true"] + .web: + # === IMAGE ================================================ # + image: agenta-oss-dev-web:latest build: context: ../../../web dockerfile: oss/docker/Dockerfile.dev - + # === EXECUTION ============================================ # + command: ["true"] + + web: + # === ACTIVATION =========================================== # + profiles: + - with-web + # === IMAGE ================================================ # + image: agenta-oss-dev-web:latest + # === EXECUTION ============================================ # + command: sh -c "pnpm dev-oss" + # === STORAGE ============================================== # volumes: + # + # - ../../../web/oss/src:/app/oss/src - ../../../web/oss/public:/app/oss/public - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - - ports: - - "3000:3000" + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # networks: - agenta-network + # === LABELS =============================================== # labels: - "traefik.http.routers.agenta-web.rule=PathPrefix(`/`)" - "traefik.http.routers.agenta-web.entrypoints=web" - "traefik.http.services.agenta-web.loadbalancer.server.port=3000" - - command: sh -c "pnpm dev-oss" - + # === LIFECYCLE ============================================ # restart: always api: - build: - context: ../../../api - dockerfile: oss/docker/Dockerfile.dev - - volumes: - - ../../../api/entrypoints:/app/entrypoints - - ../../../api/oss:/app/oss - - ../../../sdk:/sdk - - env_file: - - ${ENV_FILE:-./.env.oss.dev} - - labels: - - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" - - "traefik.http.routers.api.entrypoints=web" - - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" - - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" - - "traefik.http.routers.api.middlewares=api-strip" - - "traefik.http.services.api.loadbalancer.server.port=8000" - - "traefik.http.routers.api.service=api" - - networks: - - agenta-network - extra_hosts: - - "host.docker.internal:host-gateway" - + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + # === EXECUTION ============================================ # command: [ "uvicorn", @@ -68,143 +65,201 @@ services: "--root-path", "/api", ] - + # === STORAGE ============================================== # + volumes: + # + - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints + - ../../../sdk:/sdk + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.dev} + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy alembic: condition: service_completed_successfully + supertokens: + condition: service_healthy redis-volatile: condition: service_healthy redis-durable: condition: service_healthy + # === LABELS =============================================== # + labels: + - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" + - "traefik.http.routers.api.entrypoints=web" + - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" + - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" + - "traefik.http.routers.api.middlewares=api-strip" + - "traefik.http.services.api.loadbalancer.server.port=8000" + - "traefik.http.routers.api.service=api" + # === LIFECYCLE ============================================ # restart: always worker-evaluations: - build: - context: ../../../api - dockerfile: oss/docker/Dockerfile.dev - + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + # === EXECUTION ============================================ # + command: > + watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- + python -m entrypoints.worker_evaluations + # === STORAGE ============================================== # volumes: - - ../../../api/entrypoints:/app/entrypoints + # - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy - - extra_hosts: - - "host.docker.internal:host-gateway" - networks: - - agenta-network + # === LIFECYCLE ============================================ # restart: always + worker-tracing: + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + # === EXECUTION ============================================ # command: > watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- - python -m entrypoints.worker_evaluations - - worker-tracing: - build: - context: ../../../api - dockerfile: oss/docker/Dockerfile.dev - + python -m entrypoints.worker_tracing + # === STORAGE ============================================== # volumes: - - ../../../api/entrypoints:/app/entrypoints + # - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy - - extra_hosts: - - "host.docker.internal:host-gateway" - networks: - - agenta-network + # === LIFECYCLE ============================================ # restart: always - command: > - watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- - python -m entrypoints.worker_tracing - cron: - build: - context: ../../../api - dockerfile: oss/docker/Dockerfile.dev - + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + # === EXECUTION ============================================ # + command: cron -f + # === STORAGE ============================================== # volumes: # - ../../../api/oss/src/crons/queries.sh:/queries.sh - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: - postgres - api - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - - networks: - - agenta-network - - command: cron -f alembic: - build: - context: ../../../api - dockerfile: oss/docker/Dockerfile.dev - + # === IMAGE ================================================ # + image: agenta-oss-dev-api:latest + # === EXECUTION ============================================ # + command: sh -c "python -m oss.databases.postgres.migrations.runner" + # === STORAGE ============================================== # volumes: - - ../../../api/routes.py:/app/routes.py + # - ../../../api/oss:/app/oss + - ../../../api/entrypoints:/app/entrypoints - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # + networks: + - agenta-network + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy - networks: - - agenta-network - - command: sh -c "python -m oss.databases.postgres.migrations.runner" completion: + # === IMAGE ================================================ # build: context: ../../../services/completion dockerfile: oss/docker/Dockerfile.dev - + # === EXECUTION ============================================ # + command: + [ + "uvicorn", + "oss.src.main:app", + "--host", + "0.0.0.0", + "--port", + "80", + "--reload", + "--root-path", + "/services/completion", + ] + # === STORAGE ============================================== # volumes: - ../../../services/completion:/app - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # networks: - agenta-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)" - "traefik.http.routers.completion.entrypoints=web" @@ -213,7 +268,15 @@ services: - "traefik.http.routers.completion.middlewares=completion-strip" - "traefik.http.services.completion.loadbalancer.server.port=80" - "traefik.http.routers.completion.service=completion" + # === LIFECYCLE ============================================ # + restart: always + chat: + # === IMAGE ================================================ # + build: + context: ../../../services/chat + dockerfile: oss/docker/Dockerfile.dev + # === EXECUTION ============================================ # command: [ "uvicorn", @@ -224,27 +287,23 @@ services: "80", "--reload", "--root-path", - "/services/completion", + "/services/chat", ] - - restart: always - - chat: - build: - context: ../../../services/chat - dockerfile: oss/docker/Dockerfile.dev - + # === STORAGE ============================================== # volumes: - ../../../services/chat:/app - ../../../sdk:/sdk - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - + environment: + DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge} + # === NETWORK ============================================== # networks: - agenta-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)" - "traefik.http.routers.chat.entrypoints=web" @@ -253,38 +312,29 @@ services: - "traefik.http.routers.chat.middlewares=chat-strip" - "traefik.http.services.chat.loadbalancer.server.port=80" - "traefik.http.routers.chat.service=chat" - - command: - [ - "uvicorn", - "oss.src.main:app", - "--host", - "0.0.0.0", - "--port", - "80", - "--reload", - "--root-path", - "/services/chat", - ] - + # === LIFECYCLE ============================================ # restart: always postgres: + # === IMAGE ================================================ # image: postgres:16 - - restart: always - ports: - - "5432:5432" - networks: - - agenta-network + # === STORAGE ============================================== # + volumes: + - postgres-data:/var/lib/postgresql/data/ + - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} environment: POSTGRES_USER: ${POSTGRES_USER:-username} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} - volumes: - - postgres-data:/var/lib/postgresql/data/ - - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql + # === NETWORK ============================================== # + networks: + - agenta-network + ports: + - "${POSTGRES_PORT:-5432}:5432" + # === LIFECYCLE ============================================ # + restart: always healthcheck: test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"] interval: 10s @@ -292,8 +342,9 @@ services: retries: 5 redis-volatile: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly no @@ -301,18 +352,14 @@ services: --maxmemory 512mb --maxmemory-policy volatile-lru --port 6379 - - ports: - - "6379:6379" - - networks: - - agenta-network - + # === STORAGE ============================================== # volumes: - redis-volatile-data:/data - + # === NETWORK ============================================== # + networks: + - agenta-network + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6379", "ping"] interval: 10s @@ -321,8 +368,9 @@ services: start_period: 5s redis-durable: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly yes @@ -331,18 +379,14 @@ services: --maxmemory 512mb --maxmemory-policy noeviction --port 6381 - - ports: - - "6381:6381" - - networks: - - agenta-network - + # === STORAGE ============================================== # volumes: - redis-durable-data:/data - + # === NETWORK ============================================== # + networks: + - agenta-network + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6381", "ping"] interval: 10s @@ -351,42 +395,78 @@ services: start_period: 5s traefik: + # === IMAGE ================================================ # image: traefik:2 - - command: --api.dashboard=true --api.insecure=true --providers.docker --entrypoints.web.address=:${TRAEFIK_PORT:-80} - ports: - - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}" - - "${TRAEFIK_UI_PORT:-8080}:8080" + # === EXECUTION ============================================ # + command: + - --api.dashboard=true + - --api.insecure=true + - --providers.docker + - --providers.docker.constraints=Label(`com.docker.compose.project`,`${COMPOSE_PROJECT_NAME:-agenta-oss-dev}`) + - --entrypoints.web.address=:80 + - --ping=true + - --accesslog=true + # === STORAGE ============================================== # volumes: - /var/run/docker.sock:/var/run/docker.sock + # === NETWORK ============================================== # networks: - agenta-network + ports: + - "${TRAEFIK_PORT:-80}:80" + - "${TRAEFIK_UI_PORT:-8080}:8080" + # === LIFECYCLE ============================================ # restart: always + healthcheck: + test: ["CMD", "traefik", "healthcheck", "--ping"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 10s supertokens: + # === IMAGE ================================================ # image: registry.supertokens.io/supertokens/supertokens-postgresql - - depends_on: - postgres: - condition: service_healthy - ports: - - 3567:3567 - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.dev} - environment: - POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS} - + POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS:-postgresql://username:password@postgres:5432/agenta_oss_supertokens} + # === NETWORK ============================================== # networks: - agenta-network + # === ORCHESTRATION ======================================== # + depends_on: + postgres: + condition: service_healthy + alembic: + condition: service_completed_successfully + # === LIFECYCLE ============================================ # + restart: always healthcheck: test: > bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"' interval: 10s timeout: 5s retries: 5 - restart: always + + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # networks: agenta-network: diff --git a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml index 5d5aa4ce61..c5618234be 100644 --- a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml +++ b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml @@ -2,52 +2,37 @@ name: agenta-gh-ssl services: web: + # === ACTIVATION =========================================== # profiles: - with-web - + # === IMAGE ================================================ # build: context: ../../../web dockerfile: oss/docker/Dockerfile.gh - + # === EXECUTION ============================================ # + command: sh -c "node ./oss/server.js" + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network + # === LABELS =============================================== # labels: - "traefik.http.routers.web.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/`)" - "traefik.http.routers.web.entrypoints=web,web-secure" - "traefik.http.services.web.loadbalancer.server.port=3000" - "traefik.http.routers.web.tls=true" - "traefik.http.routers.web.tls.certresolver=myResolver" - env_file: - - ${ENV_FILE:-./.env.oss.gh} - command: sh -c "node ./oss/server.js" + # === LIFECYCLE ============================================ # restart: always api: - build: + # === IMAGE ================================================ # + build: context: ../../../api dockerfile: oss/docker/Dockerfile.gh - - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - networks: - - agenta-gh-ssl-network - extra_hosts: - - "host.docker.internal:host-gateway" - labels: - - "traefik.http.routers.api.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/api/`)" - - "traefik.http.routers.api.entrypoints=web,web-secure" - - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" - - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" - - "traefik.http.routers.api.middlewares=api-strip" - - "traefik.http.services.api.loadbalancer.server.port=8000" - - "traefik.http.routers.api.service=api" - - "traefik.http.routers.api.tls=true" - - "traefik.http.routers.api.tls.certresolver=myResolver" - env_file: - - ${ENV_FILE:-./.env.oss.gh} - environment: - - SCRIPT_NAME=/api + # === EXECUTION ============================================ # command: > newrelic-admin run-program gunicorn entrypoints.routers:app --bind 0.0.0.0:8000 @@ -60,7 +45,20 @@ services: --log-level info --access-logfile - --error-logfile - - + # === STORAGE ============================================== # + volumes: + - /var/run/docker.sock:/var/run/docker.sock + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + environment: + - SCRIPT_NAME=/api + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy @@ -70,128 +68,175 @@ services: condition: service_healthy redis-durable: condition: service_healthy + # === LABELS =============================================== # + labels: + - "traefik.http.routers.api.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/api/`)" + - "traefik.http.routers.api.entrypoints=web,web-secure" + - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" + - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" + - "traefik.http.routers.api.middlewares=api-strip" + - "traefik.http.services.api.loadbalancer.server.port=8000" + - "traefik.http.routers.api.service=api" + - "traefik.http.routers.api.tls=true" + - "traefik.http.routers.api.tls.certresolver=myResolver" + # === LIFECYCLE ============================================ # restart: always worker-evaluations: + # === IMAGE ================================================ # build: context: ../../../api dockerfile: oss/docker/Dockerfile.gh - - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - networks: - - agenta-gh-ssl-network - extra_hosts: - - "host.docker.internal:host-gateway" - env_file: - - ${ENV_FILE:-./.env.oss.gh} - + # === EXECUTION ============================================ # command: [ "newrelic-admin", "run-program", "python", "-m", - "entrypoints.worker_evaluations" + "entrypoints.worker_evaluations", ] + # === STORAGE ============================================== # + volumes: + - /var/run/docker.sock:/var/run/docker.sock + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy + # === LIFECYCLE ============================================ # restart: always worker-tracing: + # === IMAGE ================================================ # build: context: ../../../api dockerfile: oss/docker/Dockerfile.gh - - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - networks: - - agenta-gh-ssl-network - extra_hosts: - - "host.docker.internal:host-gateway" - env_file: - - ${ENV_FILE:-./.env.oss.gh} - + # === EXECUTION ============================================ # command: [ "newrelic-admin", "run-program", "python", "-m", - "entrypoints.worker_tracing" + "entrypoints.worker_tracing", ] + # === STORAGE ============================================== # + volumes: + - /var/run/docker.sock:/var/run/docker.sock + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy + # === LIFECYCLE ============================================ # restart: always cron: + # === IMAGE ================================================ # build: context: ../../../api dockerfile: oss/docker/Dockerfile.gh - + # === EXECUTION ============================================ # + command: cron -f + # === STORAGE ============================================== # volumes: - /var/run/docker.sock:/var/run/docker.sock - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: - postgres - api - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - - networks: - - agenta-gh-ssl-network - - command: cron -f alembic: - build: + # === IMAGE ================================================ # + build: context: ../../../api dockerfile: oss/docker/Dockerfile.gh - + # === EXECUTION ============================================ # + command: sh -c "python -m oss.databases.postgres.migrations.runner" + # === STORAGE ============================================== # volumes: - /var/run/docker.sock:/var/run/docker.sock - - networks: - - agenta-gh-ssl-network + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - - command: sh -c "python -m oss.databases.postgres.migrations.runner" - + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy completion: + # === IMAGE ================================================ # build: context: ../../../services/completion dockerfile: oss/docker/Dockerfile.gh - + # === EXECUTION ============================================ # + command: > + newrelic-admin run-program gunicorn oss.src.main:app + --bind 0.0.0.0:80 + --worker-class uvicorn.workers.UvicornWorker + --workers 2 + --max-requests 10000 + --max-requests-jitter 1000 + --timeout 60 + --graceful-timeout 60 + --log-level info + --access-logfile - + --error-logfile - + # === STORAGE ============================================== # volumes: - ../../../services/completion:/app - ../../../sdk:/sdk + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + environment: + - SCRIPT_NAME=/services/completion + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.completion.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/services/completion/`)" - "traefik.http.routers.completion.entrypoints=web,web-secure" @@ -202,11 +247,15 @@ services: - "traefik.http.routers.completion.service=completion" - "traefik.http.routers.completion.tls=true" - "traefik.http.routers.completion.tls.certresolver=myResolver" - env_file: - - ${ENV_FILE:-./.env.oss.gh} - environment: - - SCRIPT_NAME=/services/completion + # === LIFECYCLE ============================================ # + restart: always + chat: + # === IMAGE ================================================ # + build: + context: ../../../services/chat + dockerfile: oss/docker/Dockerfile.gh + # === EXECUTION ============================================ # command: > newrelic-admin run-program gunicorn oss.src.main:app --bind 0.0.0.0:80 @@ -219,22 +268,21 @@ services: --log-level info --access-logfile - --error-logfile - - - restart: always - - chat: - build: - context: ../../../services/chat - dockerfile: oss/docker/Dockerfile.gh + # === STORAGE ============================================== # volumes: - ../../../services/chat:/app - ../../../sdk:/sdk + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} environment: - SCRIPT_NAME=/services/chat + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.chat.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/services/chat/`)" - "traefik.http.routers.chat.entrypoints=web,web-secure" @@ -245,51 +293,39 @@ services: - "traefik.http.routers.chat.service=chat" - "traefik.http.routers.chat.tls=true" - "traefik.http.routers.chat.tls.certresolver=myResolver" - networks: - - agenta-gh-ssl-network - - command: > - newrelic-admin run-program gunicorn oss.src.main:app - --bind 0.0.0.0:80 - --worker-class uvicorn.workers.UvicornWorker - --workers 2 - --max-requests 10000 - --max-requests-jitter 1000 - --timeout 60 - --graceful-timeout 60 - --log-level info - --access-logfile - - --error-logfile - - + # === LIFECYCLE ============================================ # restart: always postgres: + # === IMAGE ================================================ # image: postgres:16 - + # === STORAGE ============================================== # volumes: - postgres-data:/var/lib/postgresql/data/ - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql - - restart: always - networks: - - agenta-gh-ssl-network + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} environment: POSTGRES_USER: ${POSTGRES_USER:-username} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} + # === NETWORK ============================================== # + networks: + - agenta-gh-ssl-network ports: - "${POSTGRES_PORT:-5432}:5432" - + # === LIFECYCLE ============================================ # + restart: always healthcheck: - test: ["CMD-SHELL", "pg_isready -U postgres"] + test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"] interval: 10s timeout: 5s retries: 5 redis-volatile: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly no @@ -297,21 +333,17 @@ services: --maxmemory 512mb --maxmemory-policy volatile-lru --port 6379 - - ports: - - "6379:6379" - + # === STORAGE ============================================== # volumes: - redis-volatile-data:/data - + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network - + # === LABELS =============================================== # labels: - "traefik.enable=false" - + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6379", "ping"] interval: 10s @@ -320,8 +352,9 @@ services: start_period: 5s redis-durable: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly yes @@ -330,21 +363,17 @@ services: --maxmemory 512mb --maxmemory-policy noeviction --port 6381 - - ports: - - "6381:6381" - + # === STORAGE ============================================== # volumes: - redis-durable-data:/data - + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network - + # === LABELS =============================================== # labels: - "traefik.enable=false" - + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6381", "ping"] interval: 10s @@ -353,38 +382,46 @@ services: start_period: 5s traefik: + # === IMAGE ================================================ # image: traefik:2 + # === STORAGE ============================================== # volumes: - ./ssl/traefik.yml:/traefik.yml - /var/run/docker.sock:/var/run/docker.sock - ${AGENTA_SSL_DIR:-/home/ubuntu/ssl_certificates}/acme.json:/acme.json + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network ports: - - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}" + - "${TRAEFIK_PORT:-80}:80" - "${TRAEFIK_UI_PORT:-8080}:8080" - - "${TRAEFIK_HTTPS_PORT:-443}:${TRAEFIK_HTTPS_PORT:-443}" + - "${TRAEFIK_HTTPS_PORT:-443}:443" + # === LIFECYCLE ============================================ # restart: always supertokens: + # === IMAGE ================================================ # image: registry.supertokens.io/supertokens/supertokens-postgresql - - depends_on: - postgres: - condition: service_healthy - ports: - - "${SUPERTOKENS_PORT:-3567}:3567" + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # networks: - agenta-gh-ssl-network + # === ORCHESTRATION ======================================== # + depends_on: + postgres: + condition: service_healthy + alembic: + condition: service_completed_successfully + # === LIFECYCLE ============================================ # + restart: always healthcheck: test: > bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"' interval: 10s timeout: 5s retries: 5 - restart: always networks: agenta-gh-ssl-network: diff --git a/hosting/docker-compose/oss/docker-compose.gh.yml b/hosting/docker-compose/oss/docker-compose.gh.yml index 14bc0f8db2..c75963160a 100644 --- a/hosting/docker-compose/oss/docker-compose.gh.yml +++ b/hosting/docker-compose/oss/docker-compose.gh.yml @@ -2,53 +2,37 @@ name: agenta-oss-gh services: web: + # === ACTIVATION =========================================== # profiles: - with-web - + # === IMAGE ================================================ # # build: # context: ../../../web # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_WEB_IMAGE_NAME:-agenta-web}:${AGENTA_WEB_IMAGE_TAG:-latest} - + # === EXECUTION ============================================ # + command: sh -c "node ./oss/server.js" + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - + # === NETWORK ============================================== # networks: - agenta-oss-gh-network + # === LABELS =============================================== # labels: - "traefik.http.routers.web.rule=PathPrefix(`/`)" - "traefik.http.routers.web.entrypoints=web" - "traefik.http.services.web.loadbalancer.server.port=3000" - - command: sh -c "node ./oss/server.js" + # === LIFECYCLE ============================================ # restart: always api: + # === IMAGE ================================================ # # build: # context: ../../../api # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest} - - networks: - - agenta-oss-gh-network - extra_hosts: - - "host.docker.internal:host-gateway" - labels: - - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" - - "traefik.http.routers.api.entrypoints=web" - - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" - - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" - - "traefik.http.routers.api.middlewares=api-strip" - - "traefik.http.services.api.loadbalancer.server.port=8000" - - "traefik.http.routers.api.service=api" - - env_file: - - ${ENV_FILE:-./.env.oss.gh} - environment: - - SCRIPT_NAME=/api - + # === EXECUTION ============================================ # command: > newrelic-admin run-program gunicorn entrypoints.routers:app --bind 0.0.0.0:8000 @@ -61,7 +45,17 @@ services: --log-level info --access-logfile - --error-logfile - - + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + environment: + - SCRIPT_NAME=/api + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy @@ -71,128 +65,162 @@ services: condition: service_healthy redis-durable: condition: service_healthy + # === LABELS =============================================== # + labels: + - "traefik.http.routers.api.rule=PathPrefix(`/api/`)" + - "traefik.http.routers.api.entrypoints=web" + - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api" + - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true" + - "traefik.http.routers.api.middlewares=api-strip" + - "traefik.http.services.api.loadbalancer.server.port=8000" + - "traefik.http.routers.api.service=api" + # === LIFECYCLE ============================================ # restart: always worker-evaluations: + # === IMAGE ================================================ # # build: # context: ../../../api # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest} - - networks: - - agenta-oss-gh-network - extra_hosts: - - "host.docker.internal:host-gateway" - - env_file: - - ${ENV_FILE:-./.env.oss.gh} - + # === EXECUTION ============================================ # command: [ "newrelic-admin", "run-program", "python", "-m", - "entrypoints.worker_evaluations" + "entrypoints.worker_evaluations", ] - + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy + # === LIFECYCLE ============================================ # restart: always worker-tracing: + # === IMAGE ================================================ # # build: # context: ../../../api # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest} - - networks: - - agenta-oss-gh-network - extra_hosts: - - "host.docker.internal:host-gateway" - - env_file: - - ${ENV_FILE:-./.env.oss.gh} - + # === EXECUTION ============================================ # command: [ "newrelic-admin", "run-program", "python", "-m", - "entrypoints.worker_tracing" + "entrypoints.worker_tracing", ] - + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy + alembic: + condition: service_completed_successfully redis-volatile: condition: service_healthy redis-durable: condition: service_healthy + # === LIFECYCLE ============================================ # restart: always cron: + # === IMAGE ================================================ # # build: # context: ../../../api # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest} - + # === EXECUTION ============================================ # + command: cron -f + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network + extra_hosts: + - "host.docker.internal:host-gateway" + # === ORCHESTRATION ======================================== # depends_on: - postgres - api - - extra_hosts: - - "host.docker.internal:host-gateway" - + # === LIFECYCLE ============================================ # restart: always - - networks: - - agenta-oss-gh-network - - command: cron -f alembic: + # === IMAGE ================================================ # # build: # context: ../../../api # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest} - - networks: - - agenta-oss-gh-network - + # === EXECUTION ============================================ # + command: sh -c "python -m oss.databases.postgres.migrations.runner" + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - - command: sh -c "python -m oss.databases.postgres.migrations.runner" - + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network + # === ORCHESTRATION ======================================== # depends_on: postgres: condition: service_healthy completion: + # === IMAGE ================================================ # # build: # context: ../../../services/completion # dockerfile: oss/docker/Dockerfile.gh - image: ghcr.io/agenta-ai/${AGENTA_COMPLETION_IMAGE_NAME:-agenta-completion}:${AGENTA_COMPLETION_IMAGE_TAG:-latest} - + # === EXECUTION ============================================ # + command: > + newrelic-admin run-program gunicorn oss.src.main:app + --bind 0.0.0.0:80 + --worker-class uvicorn.workers.UvicornWorker + --workers 2 + --max-requests 10000 + --max-requests-jitter 1000 + --timeout 60 + --graceful-timeout 60 + --log-level info + --access-logfile - + --error-logfile - + # === CONFIGURATION ======================================== # + env_file: + - ${ENV_FILE:-./.env.oss.gh} + environment: + - SCRIPT_NAME=/services/completion + # === NETWORK ============================================== # networks: - agenta-oss-gh-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)" - "traefik.http.routers.completion.entrypoints=web" @@ -201,12 +229,16 @@ services: - "traefik.http.routers.completion.middlewares=completion-strip" - "traefik.http.services.completion.loadbalancer.server.port=80" - "traefik.http.routers.completion.service=completion" + # === LIFECYCLE ============================================ # + restart: always - env_file: - - ${ENV_FILE:-./.env.oss.gh} - environment: - - SCRIPT_NAME=/services/completion - + chat: + # === IMAGE ================================================ # + # build: + # context: ../../../services/chat + # dockerfile: oss/docker/Dockerfile.gh + image: ghcr.io/agenta-ai/${AGENTA_CHAT_IMAGE_NAME:-agenta-chat}:${AGENTA_CHAT_IMAGE_TAG:-latest} + # === EXECUTION ============================================ # command: > newrelic-admin run-program gunicorn oss.src.main:app --bind 0.0.0.0:80 @@ -219,22 +251,17 @@ services: --log-level info --access-logfile - --error-logfile - - - restart: always - - chat: - # build: - # context: ../../../services/chat - # dockerfile: oss/docker/Dockerfile.gh - - image: ghcr.io/agenta-ai/${AGENTA_CHAT_IMAGE_NAME:-agenta-chat}:${AGENTA_CHAT_IMAGE_TAG:-latest} - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} environment: - SCRIPT_NAME=/services/chat + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network extra_hosts: - "host.docker.internal:host-gateway" + # === LABELS =============================================== # labels: - "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)" - "traefik.http.routers.chat.entrypoints=web" @@ -243,53 +270,39 @@ services: - "traefik.http.routers.chat.middlewares=chat-strip" - "traefik.http.services.chat.loadbalancer.server.port=80" - "traefik.http.routers.chat.service=chat" - networks: - - agenta-oss-gh-network - - command: > - newrelic-admin run-program gunicorn oss.src.main:app - --bind 0.0.0.0:80 - --worker-class uvicorn.workers.UvicornWorker - --workers 2 - --max-requests 10000 - --max-requests-jitter 1000 - --timeout 60 - --graceful-timeout 60 - --log-level info - --access-logfile - - --error-logfile - - + # === LIFECYCLE ============================================ # restart: always postgres: + # === IMAGE ================================================ # image: postgres:16 - + # === STORAGE ============================================== # volumes: - postgres-data:/var/lib/postgresql/data/ - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql - - restart: always - networks: - - agenta-oss-gh-network - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} environment: POSTGRES_USER: ${POSTGRES_USER:-username} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} - + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network ports: - "${POSTGRES_PORT:-5432}:5432" - + # === LIFECYCLE ============================================ # + restart: always healthcheck: - test: [ "CMD-SHELL", "pg_isready -U username -d agenta_oss_core" ] + test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"] interval: 10s timeout: 5s retries: 5 redis-volatile: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly no @@ -297,21 +310,17 @@ services: --maxmemory 512mb --maxmemory-policy volatile-lru --port 6379 - - ports: - - "6379:6379" - + # === STORAGE ============================================== # volumes: - redis-volatile-data:/data - + # === NETWORK ============================================== # networks: - agenta-oss-gh-network - + # === LABELS =============================================== # labels: - "traefik.enable=false" - + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6379", "ping"] interval: 10s @@ -320,8 +329,9 @@ services: start_period: 5s redis-durable: + # === IMAGE ================================================ # image: redis:8 - + # === EXECUTION ============================================ # command: > redis-server --appendonly yes @@ -330,21 +340,17 @@ services: --maxmemory 512mb --maxmemory-policy noeviction --port 6381 - - ports: - - "6381:6381" - + # === STORAGE ============================================== # volumes: - redis-durable-data:/data - + # === NETWORK ============================================== # networks: - agenta-oss-gh-network - + # === LABELS =============================================== # labels: - "traefik.enable=false" - + # === LIFECYCLE ============================================ # restart: always - healthcheck: test: ["CMD", "redis-cli", "-p", "6381", "ping"] interval: 10s @@ -353,72 +359,80 @@ services: start_period: 5s traefik: - image: traefik:2 + # === ACTIVATION =========================================== # profiles: - with-traefik - volumes: - - /var/run/docker.sock:/var/run/docker.sock - networks: - - agenta-oss-gh-network - + # === IMAGE ================================================ # + image: traefik:2 + # === EXECUTION ============================================ # command: - --api.dashboard=true - --providers.docker - - --entrypoints.web.address=:${TRAEFIK_PORT:-80} + - --entrypoints.web.address=:80 + # === STORAGE ============================================== # + volumes: + - /var/run/docker.sock:/var/run/docker.sock + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network ports: - - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}" + - "${TRAEFIK_PORT:-80}:80" - "${TRAEFIK_UI_PORT:-8080}:8080" - + # === LIFECYCLE ============================================ # restart: always nginx: - image: nginx:latest + # === ACTIVATION =========================================== # profiles: - with-nginx + # === IMAGE ================================================ # + image: nginx:latest + # === STORAGE ============================================== # volumes: - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro - - networks: - - agenta-oss-gh-network - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - + # === NETWORK ============================================== # + networks: + - agenta-oss-gh-network ports: - "${NGINX_PORT:-80}:80" - restart: always - + # === ORCHESTRATION ======================================== # depends_on: - api - web + # === LIFECYCLE ============================================ # + restart: always supertokens: + # === IMAGE ================================================ # image: registry.supertokens.io/supertokens/supertokens-postgresql - - depends_on: - postgres: - condition: service_healthy - ports: - - "${SUPERTOKENS_PORT:-3567}:3567" - + # === CONFIGURATION ======================================== # env_file: - ${ENV_FILE:-./.env.oss.gh} - environment: POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS} - + # === NETWORK ============================================== # networks: - agenta-oss-gh-network + # === ORCHESTRATION ======================================== # + depends_on: + postgres: + condition: service_healthy + alembic: + condition: service_completed_successfully + # === LIFECYCLE ============================================ # + restart: always healthcheck: test: > bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"' interval: 10s timeout: 5s retries: 5 - restart: always networks: agenta-oss-gh-network: diff --git a/hosting/docker-compose/oss/docker-compose.otel.yml b/hosting/docker-compose/oss/docker-compose.otel.yml index 19570f9353..28a820576e 100644 --- a/hosting/docker-compose/oss/docker-compose.otel.yml +++ b/hosting/docker-compose/oss/docker-compose.otel.yml @@ -1,11 +1,15 @@ services: - otel-collector: - image: otel/opentelemetry-collector-contrib - volumes: - - ./otel-collector-config.yml:/etc/otelcol-contrib/config.yaml - environment: - - AGENTA_OTLP_ENDPOINT=${AGENTA_OTLP_ENDPOINT} - - AGENTA_API_KEY=${AGENTA_API_KEY} - ports: - - "4317:4317" # OTLP gRPC receiver - - "4318:4318" # OTLP HTTP receiver + otel-collector: + # === IMAGE ================================================ # + image: otel/opentelemetry-collector-contrib + # === STORAGE ============================================== # + volumes: + - ./otel-collector-config.yml:/etc/otelcol-contrib/config.yaml + # === CONFIGURATION ======================================== # + environment: + - AGENTA_OTLP_ENDPOINT=${AGENTA_OTLP_ENDPOINT} + - AGENTA_API_KEY=${AGENTA_API_KEY} + # === NETWORK ============================================== # + ports: + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP receiver diff --git a/hosting/docker-compose/oss/env.oss.dev.example b/hosting/docker-compose/oss/env.oss.dev.example index 9840592f72..5d01703cfb 100644 --- a/hosting/docker-compose/oss/env.oss.dev.example +++ b/hosting/docker-compose/oss/env.oss.dev.example @@ -1,95 +1,159 @@ -# First-party (required) +# ============================================================================ # +# License - https://agenta.ai/pricing +# ============================================================================ # AGENTA_LICENSE=oss -AGENTA_API_URL=http://localhost/api -AGENTA_WEB_URL=http://localhost -AGENTA_SERVICES_URL=http://localhost/services + +# ============================================================================ # +# Secrets - REPLACE ME IN PRODUCTION! +# ============================================================================ # AGENTA_AUTH_KEY=replace-me AGENTA_CRYPT_KEY=replace-me -# First-party (registry & service) -POSTGRES_PASSWORD=password -POSTGRES_USERNAME=username - -# First-party (optional) -AGENTA_AUTO_MIGRATIONS=true -AGENTA_PRICING= -AGENTA_DEMOS= -DOCKER_NETWORK_MODE=bridge -AGENTA_RUNTIME_PREFIX= -AGENTA_SEND_EMAIL_FROM_ADDRESS=mail@example.com -AGENTA_API_INTERNAL_URL= -AGENTA_TELEMETRY_ENABLED=true -AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true -AGENTA_OTLP_MAX_BATCH_BYTES=10485760 - -# Third-party (required) -TRAEFIK_DOMAIN=localhost -TRAEFIK_PORT=80 -TRAEFIK_PROTOCOL=http -TRAEFIK_UI_PORT=8080 -TRAEFIK_HTTPS_PORT=443 - -POSTGRES_URI_SUPERTOKENS=postgresql://username:password@postgres:5432/agenta_oss_supertokens -POSTGRES_URI_CORE=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_core -POSTGRES_URI_TRACING=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_tracing - -ALEMBIC_CFG_PATH_CORE=/app/oss/databases/postgres/migrations/core/alembic.ini -ALEMBIC_CFG_PATH_TRACING=/app/oss/databases/postgres/migrations/tracing/alembic.ini - -SUPERTOKENS_CONNECTION_URI=http://supertokens:3567 - +# ============================================================================ # +# Endpoints +# ============================================================================ # +# AGENTA_WEB_URL=http://localhost +# AGENTA_API_URL=http://localhost/api +# AGENTA_SERVICES_URL=http://localhost/services +# AGENTA_API_INTERNAL_URL= + +# ============================================================================ # +# Images +# ============================================================================ # +# AGENTA_WEB_IMAGE_NAME=agenta-web +# AGENTA_WEB_IMAGE_TAG=latest +# AGENTA_API_IMAGE_NAME=agenta-api +# AGENTA_API_IMAGE_TAG=latest +# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion +# AGENTA_COMPLETION_IMAGE_TAG=latest +# AGENTA_CHAT_IMAGE_NAME=agenta-chat +# AGENTA_CHAT_IMAGE_TAG=latest + +# ============================================================================ # +# OTLP +# ============================================================================ # +# AGENTA_OTLP_MAX_BATCH_BYTES=10485760 + +# ============================================================================ # +# Proxy - LLM Providers +# ============================================================================ # +# OPENAI_API_KEY= +# ANTHROPIC_API_KEY= +# COHERE_API_KEY= +# GROQ_API_KEY= +# GEMINI_API_KEY= +# MISTRAL_API_KEY= +# ALEPHALPHA_API_KEY= +# ANYSCALE_API_KEY= +# DEEPINFRA_API_KEY= +# OPENROUTER_API_KEY= +# PERPLEXITYAI_API_KEY= +# TOGETHERAI_API_KEY= + +# ============================================================================ # +# Docker - Compose +# ============================================================================ # +# COMPOSE_PROJECT_NAME=agenta-oss-dev + +# ============================================================================ # +# Network - Traefik +# ============================================================================ # +# TRAEFIK_PROTOCOL=http +# TRAEFIK_DOMAIN=localhost +# TRAEFIK_PORT=80 +# TRAEFIK_SSL_DIR= + +# ============================================================================ # +# Network - Nginx +# ============================================================================ # +# NGINX_PORT=80 + +# ============================================================================ # +# Databases - Postgres +# ============================================================================ # +# POSTGRES_USER=username +# POSTGRES_PASSWORD=password + +# POSTGRES_PORT=5432 +# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core +# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing +# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens + +# ============================================================================ # +# Databases - Alembic (migrations) +# ============================================================================ # +# ALEMBIC_AUTO_MIGRATIONS=true +# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini +# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini + +# ============================================================================ # +# Databases - Redis +# ============================================================================ # +# REDIS_URI_VOLATILE=redis://localhost:6379/0 +# REDIS_URI_DURABLE=redis://localhost:6381/0 + +# ============================================================================ # +# Authentication - SuperTokens +# ============================================================================ # +# SUPERTOKENS_EMAIL_DISABLED=false + +# ============================================================================ # +# Authentication - Email providers +# ============================================================================ # +# SENDGRID_API_KEY= +# SENDGRID_FROM_ADDRESS= + +# ============================================================================ # +# Authentication - OIDC providers +# ============================================================================ # +# GOOGLE_OAUTH_CLIENT_ID= +# GOOGLE_OAUTH_CLIENT_SECRET= + +# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID= +# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET= +# GOOGLE_WORKSPACES_HD= + +# APPLE_OAUTH_CLIENT_ID= +# APPLE_OAUTH_CLIENT_SECRET= +# APPLE_KEY_ID= +# APPLE_TEAM_ID= +# APPLE_PRIVATE_KEY= + +# DISCORD_OAUTH_CLIENT_ID= +# DISCORD_OAUTH_CLIENT_SECRET= + +# FACEBOOK_OAUTH_CLIENT_ID= +# FACEBOOK_OAUTH_CLIENT_SECRET= + +# GITHUB_OAUTH_CLIENT_ID= +# GITHUB_OAUTH_CLIENT_SECRET= + +# GITLAB_OAUTH_CLIENT_ID= +# GITLAB_OAUTH_CLIENT_SECRET= +# GITLAB_BASE_URL= + +# BITBUCKET_OAUTH_CLIENT_ID= +# BITBUCKET_OAUTH_CLIENT_SECRET= + +# LINKEDIN_OAUTH_CLIENT_ID= +# LINKEDIN_OAUTH_CLIENT_SECRET= + +# OKTA_OAUTH_CLIENT_ID= +# OKTA_OAUTH_CLIENT_SECRET= +# OKTA_DOMAIN= + +# AZURE_AD_OAUTH_CLIENT_ID= +# AZURE_AD_OAUTH_CLIENT_SECRET= +# AZURE_AD_DIRECTORY_ID= + +# BOXY_SAML_OAUTH_CLIENT_ID= +# BOXY_SAML_OAUTH_CLIENT_SECRET= +# BOXY_SAML_URL= + +# TWITTER_OAUTH_CLIENT_ID= +# TWITTER_OAUTH_CLIENT_SECRET= + +# ============================================================================ # +# Analytics - PostHog +# ============================================================================ # POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7 - -# Third-party (required for TLS/SSL) -AGENTA_SSL_DIR= - -# Third-party (optional) -NGINX_PORT=80 -SUPERTOKENS_API_KEY= -# Redis: set REDIS_URI for a single instance, or override with the split URIs below -REDIS_URI= -REDIS_URI_VOLATILE= -REDIS_URI_DURABLE= -POSTGRES_PORT= -SUPERTOKENS_PORT= - -GOOGLE_OAUTH_CLIENT_ID= -GOOGLE_OAUTH_CLIENT_SECRET= -GITHUB_OAUTH_CLIENT_ID= -GITHUB_OAUTH_CLIENT_SECRET= - -NEW_RELIC_LICENSE_KEY= -NRIA_LICENSE_KEY= - -LOOPS_API_KEY= - -SENDGRID_API_KEY= - -CRISP_WEBSITE_ID= - -STRIPE_API_KEY= -STRIPE_WEBHOOK_SECRET= - -# Third-party — LLM (optional) -ALEPHALPHA_API_KEY= -ANTHROPIC_API_KEY= -ANYSCALE_API_KEY= -COHERE_API_KEY= -DEEPINFRA_API_KEY= -GEMINI_API_KEY= -GROQ_API_KEY= -MISTRAL_API_KEY= -OPENAI_API_KEY= -OPENROUTER_API_KEY= -PERPLEXITYAI_API_KEY= -TOGETHERAI_API_KEY= - -# Legacy (deprecated, to be removed) -AGENTA_PORT=80 -BARE_DOMAIN_NAME=localhost -DOMAIN_NAME=http://localhost -WEBSITE_DOMAIN_NAME=http://localhost -SERVICE_URL_TEMPLATE=http://localhost:80/services/{path} -POSTGRES_DB=agenta_oss -POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss -ALEMBIC_CFG_PATH=/app/oss/databases/postgres/migrations/alembic.oss.ini diff --git a/hosting/docker-compose/oss/env.oss.gh.example b/hosting/docker-compose/oss/env.oss.gh.example index 9840592f72..5d01703cfb 100644 --- a/hosting/docker-compose/oss/env.oss.gh.example +++ b/hosting/docker-compose/oss/env.oss.gh.example @@ -1,95 +1,159 @@ -# First-party (required) +# ============================================================================ # +# License - https://agenta.ai/pricing +# ============================================================================ # AGENTA_LICENSE=oss -AGENTA_API_URL=http://localhost/api -AGENTA_WEB_URL=http://localhost -AGENTA_SERVICES_URL=http://localhost/services + +# ============================================================================ # +# Secrets - REPLACE ME IN PRODUCTION! +# ============================================================================ # AGENTA_AUTH_KEY=replace-me AGENTA_CRYPT_KEY=replace-me -# First-party (registry & service) -POSTGRES_PASSWORD=password -POSTGRES_USERNAME=username - -# First-party (optional) -AGENTA_AUTO_MIGRATIONS=true -AGENTA_PRICING= -AGENTA_DEMOS= -DOCKER_NETWORK_MODE=bridge -AGENTA_RUNTIME_PREFIX= -AGENTA_SEND_EMAIL_FROM_ADDRESS=mail@example.com -AGENTA_API_INTERNAL_URL= -AGENTA_TELEMETRY_ENABLED=true -AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true -AGENTA_OTLP_MAX_BATCH_BYTES=10485760 - -# Third-party (required) -TRAEFIK_DOMAIN=localhost -TRAEFIK_PORT=80 -TRAEFIK_PROTOCOL=http -TRAEFIK_UI_PORT=8080 -TRAEFIK_HTTPS_PORT=443 - -POSTGRES_URI_SUPERTOKENS=postgresql://username:password@postgres:5432/agenta_oss_supertokens -POSTGRES_URI_CORE=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_core -POSTGRES_URI_TRACING=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_tracing - -ALEMBIC_CFG_PATH_CORE=/app/oss/databases/postgres/migrations/core/alembic.ini -ALEMBIC_CFG_PATH_TRACING=/app/oss/databases/postgres/migrations/tracing/alembic.ini - -SUPERTOKENS_CONNECTION_URI=http://supertokens:3567 - +# ============================================================================ # +# Endpoints +# ============================================================================ # +# AGENTA_WEB_URL=http://localhost +# AGENTA_API_URL=http://localhost/api +# AGENTA_SERVICES_URL=http://localhost/services +# AGENTA_API_INTERNAL_URL= + +# ============================================================================ # +# Images +# ============================================================================ # +# AGENTA_WEB_IMAGE_NAME=agenta-web +# AGENTA_WEB_IMAGE_TAG=latest +# AGENTA_API_IMAGE_NAME=agenta-api +# AGENTA_API_IMAGE_TAG=latest +# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion +# AGENTA_COMPLETION_IMAGE_TAG=latest +# AGENTA_CHAT_IMAGE_NAME=agenta-chat +# AGENTA_CHAT_IMAGE_TAG=latest + +# ============================================================================ # +# OTLP +# ============================================================================ # +# AGENTA_OTLP_MAX_BATCH_BYTES=10485760 + +# ============================================================================ # +# Proxy - LLM Providers +# ============================================================================ # +# OPENAI_API_KEY= +# ANTHROPIC_API_KEY= +# COHERE_API_KEY= +# GROQ_API_KEY= +# GEMINI_API_KEY= +# MISTRAL_API_KEY= +# ALEPHALPHA_API_KEY= +# ANYSCALE_API_KEY= +# DEEPINFRA_API_KEY= +# OPENROUTER_API_KEY= +# PERPLEXITYAI_API_KEY= +# TOGETHERAI_API_KEY= + +# ============================================================================ # +# Docker - Compose +# ============================================================================ # +# COMPOSE_PROJECT_NAME=agenta-oss-dev + +# ============================================================================ # +# Network - Traefik +# ============================================================================ # +# TRAEFIK_PROTOCOL=http +# TRAEFIK_DOMAIN=localhost +# TRAEFIK_PORT=80 +# TRAEFIK_SSL_DIR= + +# ============================================================================ # +# Network - Nginx +# ============================================================================ # +# NGINX_PORT=80 + +# ============================================================================ # +# Databases - Postgres +# ============================================================================ # +# POSTGRES_USER=username +# POSTGRES_PASSWORD=password + +# POSTGRES_PORT=5432 +# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core +# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing +# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens + +# ============================================================================ # +# Databases - Alembic (migrations) +# ============================================================================ # +# ALEMBIC_AUTO_MIGRATIONS=true +# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini +# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini + +# ============================================================================ # +# Databases - Redis +# ============================================================================ # +# REDIS_URI_VOLATILE=redis://localhost:6379/0 +# REDIS_URI_DURABLE=redis://localhost:6381/0 + +# ============================================================================ # +# Authentication - SuperTokens +# ============================================================================ # +# SUPERTOKENS_EMAIL_DISABLED=false + +# ============================================================================ # +# Authentication - Email providers +# ============================================================================ # +# SENDGRID_API_KEY= +# SENDGRID_FROM_ADDRESS= + +# ============================================================================ # +# Authentication - OIDC providers +# ============================================================================ # +# GOOGLE_OAUTH_CLIENT_ID= +# GOOGLE_OAUTH_CLIENT_SECRET= + +# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID= +# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET= +# GOOGLE_WORKSPACES_HD= + +# APPLE_OAUTH_CLIENT_ID= +# APPLE_OAUTH_CLIENT_SECRET= +# APPLE_KEY_ID= +# APPLE_TEAM_ID= +# APPLE_PRIVATE_KEY= + +# DISCORD_OAUTH_CLIENT_ID= +# DISCORD_OAUTH_CLIENT_SECRET= + +# FACEBOOK_OAUTH_CLIENT_ID= +# FACEBOOK_OAUTH_CLIENT_SECRET= + +# GITHUB_OAUTH_CLIENT_ID= +# GITHUB_OAUTH_CLIENT_SECRET= + +# GITLAB_OAUTH_CLIENT_ID= +# GITLAB_OAUTH_CLIENT_SECRET= +# GITLAB_BASE_URL= + +# BITBUCKET_OAUTH_CLIENT_ID= +# BITBUCKET_OAUTH_CLIENT_SECRET= + +# LINKEDIN_OAUTH_CLIENT_ID= +# LINKEDIN_OAUTH_CLIENT_SECRET= + +# OKTA_OAUTH_CLIENT_ID= +# OKTA_OAUTH_CLIENT_SECRET= +# OKTA_DOMAIN= + +# AZURE_AD_OAUTH_CLIENT_ID= +# AZURE_AD_OAUTH_CLIENT_SECRET= +# AZURE_AD_DIRECTORY_ID= + +# BOXY_SAML_OAUTH_CLIENT_ID= +# BOXY_SAML_OAUTH_CLIENT_SECRET= +# BOXY_SAML_URL= + +# TWITTER_OAUTH_CLIENT_ID= +# TWITTER_OAUTH_CLIENT_SECRET= + +# ============================================================================ # +# Analytics - PostHog +# ============================================================================ # POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7 - -# Third-party (required for TLS/SSL) -AGENTA_SSL_DIR= - -# Third-party (optional) -NGINX_PORT=80 -SUPERTOKENS_API_KEY= -# Redis: set REDIS_URI for a single instance, or override with the split URIs below -REDIS_URI= -REDIS_URI_VOLATILE= -REDIS_URI_DURABLE= -POSTGRES_PORT= -SUPERTOKENS_PORT= - -GOOGLE_OAUTH_CLIENT_ID= -GOOGLE_OAUTH_CLIENT_SECRET= -GITHUB_OAUTH_CLIENT_ID= -GITHUB_OAUTH_CLIENT_SECRET= - -NEW_RELIC_LICENSE_KEY= -NRIA_LICENSE_KEY= - -LOOPS_API_KEY= - -SENDGRID_API_KEY= - -CRISP_WEBSITE_ID= - -STRIPE_API_KEY= -STRIPE_WEBHOOK_SECRET= - -# Third-party — LLM (optional) -ALEPHALPHA_API_KEY= -ANTHROPIC_API_KEY= -ANYSCALE_API_KEY= -COHERE_API_KEY= -DEEPINFRA_API_KEY= -GEMINI_API_KEY= -GROQ_API_KEY= -MISTRAL_API_KEY= -OPENAI_API_KEY= -OPENROUTER_API_KEY= -PERPLEXITYAI_API_KEY= -TOGETHERAI_API_KEY= - -# Legacy (deprecated, to be removed) -AGENTA_PORT=80 -BARE_DOMAIN_NAME=localhost -DOMAIN_NAME=http://localhost -WEBSITE_DOMAIN_NAME=http://localhost -SERVICE_URL_TEMPLATE=http://localhost:80/services/{path} -POSTGRES_DB=agenta_oss -POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss -ALEMBIC_CFG_PATH=/app/oss/databases/postgres/migrations/alembic.oss.ini diff --git a/sdk/agenta/client/backend/types/organization.py b/sdk/agenta/client/backend/types/organization.py index fef8794c5e..4b6f891ee0 100644 --- a/sdk/agenta/client/backend/types/organization.py +++ b/sdk/agenta/client/backend/types/organization.py @@ -8,9 +8,9 @@ class Organization(UniversalBaseModel): id: str - name: str + name: typing.Optional[str] = None owner: str - description: str + description: typing.Optional[str] = None type: typing.Optional[str] = None workspaces: typing.Optional[typing.List[str]] = None diff --git a/sdk/agenta/sdk/agenta_init.py b/sdk/agenta/sdk/agenta_init.py index 0cae1429c2..5b2efc82ea 100644 --- a/sdk/agenta/sdk/agenta_init.py +++ b/sdk/agenta/sdk/agenta_init.py @@ -123,9 +123,7 @@ def init( ) if self.api_key is None: - log.warning( - "API key is required (in most cases). Please set AGENTA_API_KEY environment variable or pass api_key parameter in ag.init()." - ) + log.warning("Agenta - API key: missing") log.info("Agenta - API URL: %s", self.api_url) diff --git a/sdk/agenta/sdk/assets.py b/sdk/agenta/sdk/assets.py index 4457ab357b..a584371842 100644 --- a/sdk/agenta/sdk/assets.py +++ b/sdk/agenta/sdk/assets.py @@ -1,3 +1,8 @@ +from typing import Dict, Optional, Tuple + +from litellm import cost_calculator + + supported_llm_models = { "anthropic": [ "anthropic/claude-sonnet-4-5", @@ -206,6 +211,58 @@ providers_list = list(supported_llm_models.keys()) + +def _get_model_costs(model: str) -> Optional[Tuple[float, float]]: + """ + Get the input and output costs per 1M tokens for a model. + + Uses litellm's cost_calculator (same as tracing/inline.py) for consistency. + + Args: + model: The model name (e.g., "gpt-4o" or "anthropic/claude-3-opus-20240229") + + Returns: + Tuple of (input_cost, output_cost) per 1M tokens, or None if not found. + """ + try: + costs = cost_calculator.cost_per_token( + model=model, + prompt_tokens=1_000_000, + completion_tokens=1_000_000, + ) + if costs: + input_cost, output_cost = costs + if input_cost > 0 or output_cost > 0: + return (input_cost, output_cost) + except Exception: + pass + return None + + +def _build_model_metadata() -> Dict[str, Dict[str, Dict[str, float]]]: + """ + Build metadata dictionary with costs for all supported models. + + Returns: + Nested dict: {provider: {model: {"input": cost, "output": cost}}} + """ + metadata: Dict[str, Dict[str, Dict[str, float]]] = {} + + for provider, models in supported_llm_models.items(): + metadata[provider] = {} + for model in models: + costs = _get_model_costs(model) + if costs: + metadata[provider][model] = { + "input": costs[0], + "output": costs[1], + } + + return metadata + + +model_metadata = _build_model_metadata() + model_to_provider_mapping = { model: provider for provider, models in supported_llm_models.items() diff --git a/sdk/agenta/sdk/middleware/config.py b/sdk/agenta/sdk/middleware/config.py index cda301ff07..98d960a411 100644 --- a/sdk/agenta/sdk/middleware/config.py +++ b/sdk/agenta/sdk/middleware/config.py @@ -224,6 +224,7 @@ async def _parse_variant_ref( baggage.get("ag.refs.variant.slug") # ALTERNATIVE or request.query_params.get("variant_slug") + or body.get("variant_slug") # LEGACY or baggage.get("variant_slug") or request.query_params.get("config") @@ -234,6 +235,7 @@ async def _parse_variant_ref( baggage.get("ag.refs.variant.version") # ALTERNATIVE or request.query_params.get("variant_version") + or body.get("variant_version") # LEGACY or baggage.get("variant_version") ) @@ -244,7 +246,7 @@ async def _parse_variant_ref( return Reference( id=variant_id, slug=variant_slug, - version=variant_version, + version=str(variant_version) if variant_version is not None else None, ) async def _parse_environment_ref( diff --git a/sdk/agenta/sdk/types.py b/sdk/agenta/sdk/types.py index 29e1fc8c9c..b3e634c574 100644 --- a/sdk/agenta/sdk/types.py +++ b/sdk/agenta/sdk/types.py @@ -8,7 +8,7 @@ from starlette.responses import StreamingResponse -from agenta.sdk.assets import supported_llm_models +from agenta.sdk.assets import supported_llm_models, model_metadata from agenta.client.backend.types import AgentaNodesResponse, AgentaNodeDto @@ -23,7 +23,11 @@ def MCField( # pylint: disable=invalid-name ) -> Field: # Pydantic 2.12+ no longer allows post-creation mutation of field properties if isinstance(choices, dict): - json_extra = {"choices": choices, "x-parameter": "grouped_choice"} + json_extra = { + "choices": choices, + "x-parameter": "grouped_choice", + "x-model-metadata": model_metadata, + } elif isinstance(choices, list): json_extra = {"choices": choices, "x-parameter": "choice"} else: diff --git a/sdk/agenta/sdk/utils/lazy.py b/sdk/agenta/sdk/utils/lazy.py index f208e9f8b6..b37b073297 100644 --- a/sdk/agenta/sdk/utils/lazy.py +++ b/sdk/agenta/sdk/utils/lazy.py @@ -10,9 +10,6 @@ from fastapi import APIRouter, Body, FastAPI, HTTPException, Request from jinja2 import Template, TemplateError from openai import AsyncOpenAI, OpenAIError - from RestrictedPython import safe_builtins, compile_restricted, utility_builtins - from RestrictedPython.Eval import default_guarded_getiter, default_guarded_getitem - from RestrictedPython.Guards import guarded_iter_unpack_sequence, full_write_guard from starlette.responses import Response as StarletteResponse, StreamingResponse from jsonpath import JSONPointer import jsonpath as jsonpath_module @@ -50,19 +47,6 @@ def safe_load(self, *args: Any, **kwargs: Any) -> Any: ... _openai_cached: Optional[Tuple[type["AsyncOpenAI"], type["OpenAIError"]]] = None _openai_checked = False -_restrictedpython_cached: Optional[ - Tuple[ - dict, - Callable[..., Any], - dict, - Callable[..., Any], - Callable[..., Any], - Callable[..., Any], - Callable[..., Any], - ] -] = None -_restrictedpython_checked = False - _yaml_module: Optional[_YamlModule] = None _yaml_checked = False @@ -157,55 +141,6 @@ def _load_openai() -> Tuple[type["AsyncOpenAI"], type["OpenAIError"]]: return _openai_cached -def _load_restrictedpython() -> Tuple[ - dict, - Callable[..., Any], - dict, - Callable[..., Any], - Callable[..., Any], - Callable[..., Any], - Callable[..., Any], -]: - global _restrictedpython_cached, _restrictedpython_checked # pylint: disable=global-statement - - if _restrictedpython_checked: - if _restrictedpython_cached is None: - raise ImportError( - "RestrictedPython is required for local sandbox execution. " - "Install it with `pip install restrictedpython`." - ) - return _restrictedpython_cached - - _restrictedpython_checked = True - try: - from RestrictedPython import safe_builtins, compile_restricted, utility_builtins - from RestrictedPython.Eval import ( - default_guarded_getiter, - default_guarded_getitem, - ) - from RestrictedPython.Guards import ( - guarded_iter_unpack_sequence, - full_write_guard, - ) - except Exception as exc: - _restrictedpython_cached = None - raise ImportError( - "RestrictedPython is required for local sandbox execution. " - "Install it with `pip install restrictedpython`." - ) from exc - - _restrictedpython_cached = ( - safe_builtins, - compile_restricted, - utility_builtins, - default_guarded_getiter, - default_guarded_getitem, - guarded_iter_unpack_sequence, - full_write_guard, - ) - return _restrictedpython_cached - - def _load_yaml() -> _YamlModule: global _yaml_module, _yaml_checked # pylint: disable=global-statement diff --git a/sdk/agenta/sdk/workflows/builtin.py b/sdk/agenta/sdk/workflows/builtin.py index 96fe546f66..d4139e7675 100644 --- a/sdk/agenta/sdk/workflows/builtin.py +++ b/sdk/agenta/sdk/workflows/builtin.py @@ -166,11 +166,13 @@ def auto_custom_code_run( # correct_answer_key: Optional[str] = "correct_answer", threshold: Optional[float] = 0.5, + runtime: Optional[str] = "python", ) -> Workflow: parameters = dict( code=code, correct_answer_key=correct_answer_key, threshold=threshold, + runtime=runtime, ) return evaluator( diff --git a/sdk/agenta/sdk/workflows/configurations.py b/sdk/agenta/sdk/workflows/configurations.py index 9086047c53..42310b9368 100644 --- a/sdk/agenta/sdk/workflows/configurations.py +++ b/sdk/agenta/sdk/workflows/configurations.py @@ -5,6 +5,7 @@ auto_exact_match_v0_configuration = WorkflowServiceConfiguration() auto_regex_test_v0_configuration = WorkflowServiceConfiguration() field_match_test_v0_configuration = WorkflowServiceConfiguration() +json_multi_field_match_v0_configuration = WorkflowServiceConfiguration() auto_webhook_test_v0_configuration = WorkflowServiceConfiguration() auto_custom_code_run_v0_configuration = WorkflowServiceConfiguration() auto_ai_critique_v0_configuration = WorkflowServiceConfiguration() diff --git a/sdk/agenta/sdk/workflows/handlers.py b/sdk/agenta/sdk/workflows/handlers.py index 3ecd036459..fa95fa9654 100644 --- a/sdk/agenta/sdk/workflows/handlers.py +++ b/sdk/agenta/sdk/workflows/handlers.py @@ -1,14 +1,14 @@ -from typing import List, Any, Optional, Any, Dict, Union -from json import dumps, loads -import traceback import json -import re import math +import re +import traceback +from difflib import SequenceMatcher +from json import dumps, loads +from typing import Any, Dict, List, Optional, Union import httpx from pydantic import BaseModel, Field -from difflib import SequenceMatcher from agenta.sdk.utils.logging import get_module_logger from agenta.sdk.utils.lazy import ( @@ -21,33 +21,30 @@ from agenta.sdk.litellm import mockllm from agenta.sdk.types import PromptTemplate, Message from agenta.sdk.managers.secrets import SecretsManager - from agenta.sdk.decorators.tracing import instrument - +from agenta.sdk.litellm.litellm import litellm_handler from agenta.sdk.models.shared import Data -from agenta.sdk.models.tracing import Trace from agenta.sdk.workflows.sandbox import execute_code_safely from agenta.sdk.workflows.templates import EVALUATOR_TEMPLATES from agenta.sdk.workflows.errors import ( + CustomCodeServerV0Error, InvalidConfigurationParametersV0Error, - MissingConfigurationParameterV0Error, InvalidConfigurationParameterV0Error, InvalidInputsV0Error, - MissingInputV0Error, InvalidInputV0Error, InvalidOutputsV0Error, - MissingOutputV0Error, InvalidSecretsV0Error, JSONDiffV0Error, LevenshteinDistanceV0Error, - SyntacticSimilarityV0Error, + MissingConfigurationParameterV0Error, + MissingInputV0Error, + PromptCompletionV0Error, + PromptFormattingV0Error, + RegexPatternV0Error, SemanticSimilarityV0Error, - WebhookServerV0Error, + SyntacticSimilarityV0Error, WebhookClientV0Error, - CustomCodeServerV0Error, - RegexPatternV0Error, - PromptFormattingV0Error, - PromptCompletionV0Error, + WebhookServerV0Error, ) log = get_module_logger(__name__) @@ -58,7 +55,6 @@ def _configure_litellm(): litellm = _load_litellm() if not litellm: raise ImportError("litellm is required for completion handling.") - from agenta.sdk.litellm.litellm import litellm_handler litellm.logging = False litellm.set_verbose = False @@ -86,9 +82,7 @@ def _compute_similarity(embedding_1: List[float], embedding_2: List[float]) -> f return dot / (norm1 * norm2) -import json -import re -from typing import Any, Dict, Iterable, Tuple, Optional +from typing import Any, Iterable, Tuple # ========= Scheme detection ========= @@ -393,7 +387,7 @@ def auto_exact_match_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -401,7 +395,7 @@ def auto_exact_match_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -438,7 +432,7 @@ def auto_regex_test_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "regex_pattern" in parameters: + if "regex_pattern" not in parameters: raise MissingConfigurationParameterV0Error(path="regex_pattern") regex_pattern = parameters["regex_pattern"] @@ -496,12 +490,12 @@ def field_match_test_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "json_field" in parameters: + if "json_field" not in parameters: raise MissingConfigurationParameterV0Error(path="json_field") json_field = str(parameters["json_field"]) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -509,7 +503,7 @@ def field_match_test_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -522,7 +516,7 @@ def field_match_test_v0( if isinstance(outputs, str): try: outputs_dict = loads(outputs) - except json.JSONDecodeError as e: + except json.JSONDecodeError: # raise InvalidOutputsV0Error(expected="dict", got=outputs) from e return {"success": False} @@ -530,7 +524,7 @@ def field_match_test_v0( # raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs) return {"success": False} - if not json_field in outputs_dict: + if json_field not in outputs_dict: # raise MissingOutputV0Error(path=json_field) return {"success": False} @@ -541,6 +535,148 @@ def field_match_test_v0( return {"success": success} +def _get_nested_value(obj: Any, path: str) -> Any: + """ + Get value from nested object using resolve_any() with graceful None on failure. + + Supports multiple path formats: + - Dot notation: "user.address.city", "items.0.name" + - JSON Path: "$.user.address.city", "$.items[0].name" + - JSON Pointer: "/user/address/city", "/items/0/name" + + Args: + obj: The object to traverse (dict or list) + path: Path expression in any supported format + + Returns: + The value at the path, or None if path doesn't exist or resolution fails + """ + if obj is None: + return None + + try: + return resolve_any(path, obj) + except (KeyError, IndexError, ValueError, TypeError, ImportError): + return None + + +@instrument(annotate=True) +def json_multi_field_match_v0( + parameters: Optional[Data] = None, + inputs: Optional[Data] = None, + outputs: Optional[Union[Data, str]] = None, +) -> Any: + """ + Multi-field JSON match evaluator for comparing multiple fields between expected and actual JSON. + + Each configured field becomes a separate score (0 or 1), and an aggregate_score shows + the percentage of matching fields. Useful for entity extraction validation. + + Args: + inputs: Testcase data with ground truth JSON + outputs: Output from the workflow execution (expected to be JSON string or dict) + parameters: Configuration with: + - fields: List of field paths to compare (e.g., ["name", "user.address.city"]) + - correct_answer_key: Key in inputs containing the expected JSON + + Returns: + Dict with per-field scores and aggregate_score, e.g.: + {"name": 1.0, "email": 0.0, "aggregate_score": 0.5} + """ + if parameters is None or not isinstance(parameters, dict): + raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) + + if "fields" not in parameters: + raise MissingConfigurationParameterV0Error(path="fields") + + fields = parameters["fields"] + + if not isinstance(fields, list) or len(fields) == 0: + raise InvalidConfigurationParameterV0Error( + path="fields", + expected="non-empty list", + got=fields, + ) + + if "correct_answer_key" not in parameters: + raise MissingConfigurationParameterV0Error(path="correct_answer_key") + + correct_answer_key = str(parameters["correct_answer_key"]) + + if inputs is None or not isinstance(inputs, dict): + raise InvalidInputsV0Error(expected="dict", got=inputs) + + if correct_answer_key not in inputs: + raise MissingInputV0Error(path=correct_answer_key) + + correct_answer = inputs[correct_answer_key] + + # Parse ground truth JSON + if isinstance(correct_answer, str): + try: + expected = json.loads(correct_answer) + except json.JSONDecodeError: + raise InvalidInputV0Error( + path=correct_answer_key, + expected="valid JSON string", + got=correct_answer, + ) + elif isinstance(correct_answer, dict): + expected = correct_answer + else: + raise InvalidInputV0Error( + path=correct_answer_key, + expected=["dict", "str"], + got=correct_answer, + ) + + # Parse output JSON + if not isinstance(outputs, str) and not isinstance(outputs, dict): + # Return all zeros if output is invalid + results: Dict[str, Any] = {field: 0.0 for field in fields} + results["aggregate_score"] = 0.0 + return results + + if isinstance(outputs, str): + try: + actual = json.loads(outputs) + except json.JSONDecodeError: + # Return all zeros if output is not valid JSON + results = {field: 0.0 for field in fields} + results["aggregate_score"] = 0.0 + return results + else: + actual = outputs + + if not isinstance(actual, dict): + # Return all zeros if parsed output is not a dict + results = {field: 0.0 for field in fields} + results["aggregate_score"] = 0.0 + return results + + # -------------------------------------------------------------------------- + # Compare each configured field + results = {} + matches = 0 + + for field_path in fields: + expected_val = _get_nested_value(expected, field_path) + actual_val = _get_nested_value(actual, field_path) + + # Exact match comparison + match = expected_val == actual_val + + results[field_path] = 1.0 if match else 0.0 + if match: + matches += 1 + + # Aggregate score is the percentage of matching fields + results["aggregate_score"] = matches / len(fields) if fields else 0.0 + # -------------------------------------------------------------------------- + + return results + + @instrument(annotate=True) async def auto_webhook_test_v0( parameters: Optional[Data] = None, @@ -561,12 +697,12 @@ async def auto_webhook_test_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "webhook_url" in parameters: + if "webhook_url" not in parameters: raise MissingConfigurationParameterV0Error(path="webhook_url") webhook_url = str(parameters["webhook_url"]) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -574,7 +710,7 @@ async def auto_webhook_test_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -666,12 +802,12 @@ async def auto_custom_code_run_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "code" in parameters: + if "code" not in parameters: raise MissingConfigurationParameterV0Error(path="code") code = str(parameters["code"]) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -679,7 +815,7 @@ async def auto_custom_code_run_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -768,7 +904,7 @@ async def auto_ai_critique_v0( correct_answer_key = parameters.get("correct_answer_key") - if not "prompt_template" in parameters: + if "prompt_template" not in parameters: raise MissingConfigurationParameterV0Error(path="prompt_template") prompt_template = parameters.get("prompt_template") @@ -799,7 +935,7 @@ async def auto_ai_critique_v0( "json_schema" if template_version == "4" else "text" ) - if not response_type in ["text", "json_object", "json_schema"]: + if response_type not in ["text", "json_object", "json_schema"]: raise InvalidConfigurationParameterV0Error( path="response_type", expected=["text", "json_object", "json_schema"], @@ -1004,7 +1140,7 @@ def auto_starts_with_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "prefix" in parameters: + if "prefix" not in parameters: raise MissingConfigurationParameterV0Error(path="prefix") prefix = parameters["prefix"] @@ -1053,7 +1189,7 @@ def auto_ends_with_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "suffix" in parameters: + if "suffix" not in parameters: raise MissingConfigurationParameterV0Error(path="suffix") suffix = parameters["suffix"] @@ -1102,7 +1238,7 @@ def auto_contains_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "substring" in parameters: + if "substring" not in parameters: raise MissingConfigurationParameterV0Error(path="substring") substring = parameters["substring"] @@ -1151,7 +1287,7 @@ def auto_contains_any_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "substrings" in parameters: + if "substrings" not in parameters: raise MissingConfigurationParameterV0Error(path="substrings") substrings = parameters["substrings"] @@ -1209,7 +1345,7 @@ def auto_contains_all_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "substrings" in parameters: + if "substrings" not in parameters: raise MissingConfigurationParameterV0Error(path="substrings") substrings = parameters["substrings"] @@ -1309,7 +1445,7 @@ def auto_json_diff_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -1317,7 +1453,7 @@ def auto_json_diff_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -1401,7 +1537,7 @@ def auto_levenshtein_distance_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -1411,7 +1547,7 @@ def auto_levenshtein_distance_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -1506,7 +1642,7 @@ def auto_similarity_match_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -1516,7 +1652,7 @@ def auto_similarity_match_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -1599,7 +1735,7 @@ async def auto_semantic_similarity_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "correct_answer_key" in parameters: + if "correct_answer_key" not in parameters: raise MissingConfigurationParameterV0Error(path="correct_answer_key") correct_answer_key = str(parameters["correct_answer_key"]) @@ -1612,7 +1748,7 @@ async def auto_semantic_similarity_v0( if inputs is None or not isinstance(inputs, dict): raise InvalidInputsV0Error(expected="dict", got=inputs) - if not correct_answer_key in inputs: + if correct_answer_key not in inputs: raise MissingInputV0Error(path=correct_answer_key) correct_answer = inputs[correct_answer_key] @@ -1715,7 +1851,7 @@ async def completion_v0( if parameters is None or not isinstance(parameters, dict): raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters) - if not "prompt" in parameters: + if "prompt" not in parameters: raise MissingConfigurationParameterV0Error(path="prompt") params: Dict[str, Any] = {**(parameters or {})} diff --git a/sdk/agenta/sdk/workflows/interfaces.py b/sdk/agenta/sdk/workflows/interfaces.py index 85334ab6cb..6c1e5edfbf 100644 --- a/sdk/agenta/sdk/workflows/interfaces.py +++ b/sdk/agenta/sdk/workflows/interfaces.py @@ -169,6 +169,53 @@ ), ) +json_multi_field_match_v0_interface = WorkflowServiceInterface( + uri="agenta:built-in:json_multi_field_match:v0", + schemas=dict( # type: ignore + parameters={ + "type": "object", + "title": "JSON Multi-Field Match Parameters", + "description": "Settings for comparing multiple JSON fields against expected values from a ground truth column.", + "properties": { + "correct_answer_key": { + "type": "string", + "title": "Ground Truth Column", + "description": "Column in test data containing the JSON ground truth.", + "default": "correct_answer", + }, + "fields": { + "type": "array", + "title": "Fields to Compare", + "description": "List of JSON field paths (dot notation) to compare. Each field becomes a separate score.", + "items": {"type": "string"}, + "default": [], + }, + }, + "required": ["correct_answer_key", "fields"], + "additionalProperties": False, + }, + inputs={ + "type": "object", + "title": "JSON Multi-Field Match Inputs", + "description": "Testcase data including the JSON ground truth.", + }, + outputs={ + "type": "object", + "title": "JSON Multi-Field Match Outputs", + "description": "Per-field match scores and aggregate score. Each field produces a 0 or 1 output.", + "properties": { + "aggregate_score": { + "type": "number", + "title": "Aggregate Score", + "description": "Percentage of matched fields (0-1).", + }, + }, + "required": ["aggregate_score"], + "additionalProperties": True, # Allows dynamic field outputs + }, + ), +) + auto_webhook_test_v0_interface = WorkflowServiceInterface( uri="agenta:built-in:auto_webhook_test:v0", schemas=dict( # type: ignore diff --git a/sdk/agenta/sdk/workflows/runners/daytona.py b/sdk/agenta/sdk/workflows/runners/daytona.py index eaf6fea1d2..a05a57db1b 100644 --- a/sdk/agenta/sdk/workflows/runners/daytona.py +++ b/sdk/agenta/sdk/workflows/runners/daytona.py @@ -1,6 +1,7 @@ import os import json -from typing import Any, Dict, Union, Optional, TYPE_CHECKING +from contextlib import contextmanager +from typing import Any, Dict, Generator, Union, Optional, TYPE_CHECKING import agenta as ag from agenta.sdk.workflows.runners.base import CodeRunner @@ -15,6 +16,42 @@ log = get_module_logger(__name__) +def _extract_error_message(error_text: str) -> str: + """Extract a clean error message from a Python traceback. + + Given a full traceback string, extracts just the final error line + (e.g., "NameError: name 'foo' is not defined") instead of the full + noisy traceback with base64-encoded code. + + Args: + error_text: Full error/traceback string + + Returns: + Clean error message, or original text if extraction fails + """ + if not error_text: + return "Unknown error" + + lines = error_text.strip().split("\n") + + # Look for common Python error patterns from the end + for line in reversed(lines): + line = line.strip() + # Match patterns like "NameError: ...", "ValueError: ...", etc. + if ": " in line and not line.startswith("File "): + # Check if it looks like an error line (ErrorType: message) + parts = line.split(": ", 1) + if parts[0].replace(".", "").replace("_", "").isalnum(): + return line + + # Fallback: return last non-empty line + for line in reversed(lines): + if line.strip(): + return line.strip() + + return error_text[:200] if len(error_text) > 200 else error_text + + class DaytonaRunner(CodeRunner): """Remote code runner using Daytona sandbox for execution.""" @@ -186,6 +223,29 @@ def _create_sandbox(self, runtime: Optional[str] = None) -> Any: except Exception as e: raise RuntimeError(f"Failed to create sandbox from snapshot: {e}") + @contextmanager + def _sandbox_context( + self, runtime: Optional[str] = None + ) -> Generator["Sandbox", None, None]: + """Context manager for sandbox lifecycle. + + Ensures sandbox is deleted even if an error occurs during execution. + + Args: + runtime: Runtime environment (python, javascript, typescript), None = python + + Yields: + Sandbox instance + """ + sandbox = self._create_sandbox(runtime=runtime) + try: + yield sandbox + finally: + try: + sandbox.delete() + except Exception as e: + log.error("Failed to delete sandbox: %s", e) + def run( self, code: str, @@ -218,95 +278,98 @@ def run( runtime = runtime or "python" self._initialize_client() - sandbox: Sandbox = self._create_sandbox(runtime=runtime) - try: - # Prepare all parameters as a single dict - params = { - "app_params": app_params, - "inputs": inputs, - "output": output, - "correct_answer": correct_answer, - } - params_json = json.dumps(params) - - if not templates: - raise RuntimeError("Missing evaluator templates for Daytona execution") - - template = templates.get(runtime) - if template is None: - raise RuntimeError( - f"Missing evaluator template for runtime '{runtime}'" + with self._sandbox_context(runtime=runtime) as sandbox: + try: + # Prepare all parameters as a single dict + params = { + "app_params": app_params, + "inputs": inputs, + "output": output, + "correct_answer": correct_answer, + } + params_json = json.dumps(params) + + if not templates: + raise RuntimeError( + "Missing evaluator templates for Daytona execution" + ) + + template = templates.get(runtime) + if template is None: + raise RuntimeError( + f"Missing evaluator template for runtime '{runtime}'" + ) + + # Wrap the user code with the necessary context and evaluation + wrapped_code = template.format( + params_json=params_json, + user_code=code, ) - # Wrap the user code with the necessary context and evaluation - wrapped_code = template.format( - params_json=params_json, - user_code=code, - ) - - # Execute the code in the Daytona sandbox - response = sandbox.process.code_run(wrapped_code) - response_stdout = response.result if hasattr(response, "result") else "" - response_exit_code = getattr(response, "exit_code", 0) - response_error = getattr(response, "error", None) or getattr( - response, "stderr", None - ) - - sandbox.delete() - - if response_exit_code and response_exit_code != 0: - error_details = response_error or response_stdout or "Unknown error" - log.error( - "Sandbox execution error (exit_code=%s): %s", - response_exit_code, - error_details, - ) - raise RuntimeError( - f"Sandbox execution failed (exit_code={response_exit_code}): " - f"{error_details}" + # Execute the code in the Daytona sandbox + response = sandbox.process.code_run(wrapped_code) + response_stdout = response.result if hasattr(response, "result") else "" + response_exit_code = getattr(response, "exit_code", 0) + response_error = getattr(response, "error", None) or getattr( + response, "stderr", None ) - # Parse the result from stdout - output_lines = response_stdout.strip().split("\n") - for line in reversed(output_lines): - if not line.strip(): - continue - try: - result_obj = json.loads(line) + if response_exit_code and response_exit_code != 0: + raw_error = response_error or response_stdout or "Unknown error" + # Log full error for debugging + # log.warning( + # "Sandbox execution error (exit_code=%s): %s", + # response_exit_code, + # raw_error, + # ) + # Extract clean error message for user display + clean_error = _extract_error_message(raw_error) + raise RuntimeError(clean_error) + + # Parse the result from stdout + output_lines = response_stdout.strip().split("\n") + for line in reversed(output_lines): + if not line.strip(): + continue + try: + result_obj = json.loads(line) + if isinstance(result_obj, dict) and "result" in result_obj: + result = result_obj["result"] + if isinstance(result, (float, int, type(None))): + return float(result) if result is not None else None + except json.JSONDecodeError: + continue + + # Fallback: attempt to extract a JSON object containing "result" + for line in reversed(output_lines): + if "result" not in line: + continue + start = line.find("{") + end = line.rfind("}") + if start == -1 or end == -1 or end <= start: + continue + try: + result_obj = json.loads(line[start : end + 1]) + except json.JSONDecodeError: + continue if isinstance(result_obj, dict) and "result" in result_obj: result = result_obj["result"] if isinstance(result, (float, int, type(None))): return float(result) if result is not None else None - except json.JSONDecodeError: - continue - - # Fallback: attempt to extract a JSON object containing "result" - for line in reversed(output_lines): - if "result" not in line: - continue - start = line.find("{") - end = line.rfind("}") - if start == -1 or end == -1 or end <= start: - continue - try: - result_obj = json.loads(line[start : end + 1]) - except json.JSONDecodeError: - continue - if isinstance(result_obj, dict) and "result" in result_obj: - result = result_obj["result"] - if isinstance(result, (float, int, type(None))): - return float(result) if result is not None else None - - log.error( - "Evaluation output did not include JSON result: %s", response_stdout - ) - raise ValueError("Could not parse evaluation result from Daytona output") - except Exception as e: - log.error(f"Error during Daytona code execution: {e}", exc_info=True) - # print(f"Exception details: {type(e).__name__}: {e}") - raise RuntimeError(f"Error during Daytona code execution: {e}") + # log.warning( + # "Evaluation output did not include JSON result: %s", response_stdout + # ) + raise ValueError( + "Could not parse evaluation result from Daytona output" + ) + + except Exception as e: + # log.warning( + # f"Error during Daytona code execution:\n {e}", exc_info=True + # ) + raise RuntimeError(e) def cleanup(self) -> None: """Clean up Daytona client resources.""" diff --git a/sdk/agenta/sdk/workflows/runners/local.py b/sdk/agenta/sdk/workflows/runners/local.py index 4e0aca20a2..f9309d8bba 100644 --- a/sdk/agenta/sdk/workflows/runners/local.py +++ b/sdk/agenta/sdk/workflows/runners/local.py @@ -1,7 +1,6 @@ from typing import Any, Dict, Union, Optional from agenta.sdk.workflows.runners.base import CodeRunner -from agenta.sdk.utils.lazy import _load_restrictedpython class LocalRunner(CodeRunner): diff --git a/sdk/agenta/sdk/workflows/runners/registry.py b/sdk/agenta/sdk/workflows/runners/registry.py index 57c944e653..9984e9ad68 100644 --- a/sdk/agenta/sdk/workflows/runners/registry.py +++ b/sdk/agenta/sdk/workflows/runners/registry.py @@ -19,7 +19,7 @@ def get_runner() -> CodeRunner: Registry to get the appropriate code runner based on environment configuration. Uses AGENTA_SERVICES_SANDBOX_RUNNER environment variable: - - "local" (default): Uses RestrictedPython for local execution + - "local" (default): Uses current container for local execution - "daytona": Uses Daytona remote sandbox Returns: diff --git a/sdk/agenta/sdk/workflows/templates.py b/sdk/agenta/sdk/workflows/templates.py index 7dc030c2f9..9e131d5ce3 100644 --- a/sdk/agenta/sdk/workflows/templates.py +++ b/sdk/agenta/sdk/workflows/templates.py @@ -48,7 +48,7 @@ // Ensure result is a number result = Number(result); if (!Number.isFinite(result)) {{ - result = 0.0; + result = null; }} // Print result for capture @@ -71,7 +71,7 @@ // Ensure result is a number result = Number(result); if (!Number.isFinite(result)) {{ - result = 0.0; + result = null; }} // Print result for capture diff --git a/sdk/agenta/sdk/workflows/utils.py b/sdk/agenta/sdk/workflows/utils.py index d86f499da4..2ecd57d219 100644 --- a/sdk/agenta/sdk/workflows/utils.py +++ b/sdk/agenta/sdk/workflows/utils.py @@ -9,6 +9,7 @@ auto_exact_match_v0, auto_regex_test_v0, field_match_test_v0, + json_multi_field_match_v0, auto_webhook_test_v0, auto_custom_code_run_v0, auto_ai_critique_v0, @@ -31,6 +32,7 @@ auto_exact_match_v0_interface, auto_regex_test_v0_interface, field_match_test_v0_interface, + json_multi_field_match_v0_interface, auto_webhook_test_v0_interface, auto_custom_code_run_v0_interface, auto_ai_critique_v0_interface, @@ -54,6 +56,7 @@ auto_exact_match_v0_configuration, auto_regex_test_v0_configuration, field_match_test_v0_configuration, + json_multi_field_match_v0_configuration, auto_webhook_test_v0_configuration, auto_custom_code_run_v0_configuration, auto_ai_critique_v0_configuration, @@ -78,6 +81,7 @@ auto_exact_match=dict(v0=auto_exact_match_v0_interface), auto_regex_test=dict(v0=auto_regex_test_v0_interface), field_match_test=dict(v0=field_match_test_v0_interface), + json_multi_field_match=dict(v0=json_multi_field_match_v0_interface), auto_webhook_test=dict(v0=auto_webhook_test_v0_interface), auto_custom_code_run=dict(v0=auto_custom_code_run_v0_interface), auto_ai_critique=dict(v0=auto_ai_critique_v0_interface), @@ -104,6 +108,7 @@ auto_exact_match=dict(v0=auto_exact_match_v0_configuration), auto_regex_test=dict(v0=auto_regex_test_v0_configuration), field_match_test=dict(v0=field_match_test_v0_configuration), + json_multi_field_match=dict(v0=json_multi_field_match_v0_configuration), auto_webhook_test=dict(v0=auto_webhook_test_v0_configuration), auto_custom_code_run=dict(v0=auto_custom_code_run_v0_configuration), auto_ai_critique=dict(v0=auto_ai_critique_v0_configuration), @@ -160,6 +165,7 @@ auto_exact_match=dict(v0=auto_exact_match_v0), auto_regex_test=dict(v0=auto_regex_test_v0), field_match_test=dict(v0=field_match_test_v0), + json_multi_field_match=dict(v0=json_multi_field_match_v0), auto_webhook_test=dict(v0=auto_webhook_test_v0), auto_custom_code_run=dict(v0=auto_custom_code_run_v0), auto_ai_critique=dict(v0=auto_ai_critique_v0), diff --git a/sdk/poetry.lock b/sdk/poetry.lock index 4317e7389c..069e7bed54 100644 --- a/sdk/poetry.lock +++ b/sdk/poetry.lock @@ -26,132 +26,132 @@ files = [ [[package]] name = "aiohttp" -version = "3.13.2" +version = "3.13.3" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"}, - {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"}, - {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"}, - {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"}, - {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"}, - {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"}, - {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"}, - {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"}, - {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"}, - {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"}, - {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"}, - {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"}, - {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"}, - {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"}, - {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"}, - {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"}, - {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"}, - {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"}, - {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"}, - {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"}, - {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"}, - {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"}, - {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"}, - {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"}, - {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"}, - {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"}, - {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"}, - {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"}, - {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"}, - {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"}, - {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"}, - {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"}, - {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"}, - {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"}, - {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"}, - {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"}, - {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"}, - {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821"}, + {file = "aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455"}, + {file = "aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29"}, + {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11"}, + {file = "aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd"}, + {file = "aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64"}, + {file = "aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1"}, + {file = "aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4"}, + {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29"}, + {file = "aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239"}, + {file = "aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168"}, + {file = "aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc"}, + {file = "aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce"}, + {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a"}, + {file = "aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046"}, + {file = "aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9"}, + {file = "aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0"}, + {file = "aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0"}, + {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591"}, + {file = "aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf"}, + {file = "aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415"}, + {file = "aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1"}, + {file = "aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c"}, + {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43"}, + {file = "aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1"}, + {file = "aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592"}, + {file = "aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8"}, + {file = "aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df"}, + {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa"}, + {file = "aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767"}, + {file = "aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7"}, + {file = "aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a"}, + {file = "aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704"}, + {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f"}, + {file = "aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1"}, + {file = "aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538"}, + {file = "aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88"}, ] [package.dependencies] @@ -164,7 +164,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi"] +speedups = ["Brotli (>=1.2)", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi (>=1.2)"] [[package]] name = "aiohttp-retry" @@ -223,14 +223,14 @@ files = [ [[package]] name = "anyio" -version = "4.12.0" +version = "4.12.1" description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb"}, - {file = "anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0"}, + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, ] [package.dependencies] @@ -266,18 +266,18 @@ files = [ [[package]] name = "boto3" -version = "1.42.16" +version = "1.42.23" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.42.16-py3-none-any.whl", hash = "sha256:37a43d42aebd06a8f93ee801ea1b7b5181ac42a30869ef403c9dadc160a748e5"}, - {file = "boto3-1.42.16.tar.gz", hash = "sha256:811391611db88c8a061f6e6fabbd7ca784ad9de04490a879f091cbaa9de7de74"}, + {file = "boto3-1.42.23-py3-none-any.whl", hash = "sha256:2ed797bdb394b08550f6269babf0a31bbeb853684bb2cb67116620df0ed632dc"}, + {file = "boto3-1.42.23.tar.gz", hash = "sha256:f681a8d43b46b3d8acf0be4f3894eb85e40e75945431d0dfe0542edda7025512"}, ] [package.dependencies] -botocore = ">=1.42.16,<1.43.0" +botocore = ">=1.42.23,<1.43.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.16.0,<0.17.0" @@ -286,14 +286,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.42.16" +version = "1.42.23" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.42.16-py3-none-any.whl", hash = "sha256:b1f584a0f8645c12e07bf6ec9c18e05221a789f2a9b2d3c6291deb42f8c1c542"}, - {file = "botocore-1.42.16.tar.gz", hash = "sha256:29ee8555cd5d5023350405387cedcf3fe1c7f02fcb8060bf9e01602487482c25"}, + {file = "botocore-1.42.23-py3-none-any.whl", hash = "sha256:d5042e0252b81f25ca1152fff9ed25463bab2438fbc4530ba53d5390d00ca1b1"}, + {file = "botocore-1.42.23.tar.gz", hash = "sha256:453ce449bd1021acd67e75c814aae1b132b1ab3ee0ecff248de863bf19e58be8"}, ] [package.dependencies] @@ -306,14 +306,14 @@ crt = ["awscrt (==0.29.2)"] [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" groups = ["main", "dev"] files = [ - {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"}, - {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"}, + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, ] [[package]] @@ -638,14 +638,14 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "fastapi" -version = "0.127.0" +version = "0.128.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"}, - {file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"}, + {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"}, + {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"}, ] [package.dependencies] @@ -749,14 +749,14 @@ files = [ [[package]] name = "filelock" -version = "3.20.1" +version = "3.20.2" description = "A platform independent file lock." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a"}, - {file = "filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c"}, + {file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"}, + {file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"}, ] [[package]] @@ -1201,14 +1201,14 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "1.2.3" +version = "1.2.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.9.0" groups = ["main"] files = [ - {file = "huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642"}, - {file = "huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7"}, + {file = "huggingface_hub-1.2.4-py3-none-any.whl", hash = "sha256:2db69b91877d9d34825f5cd2a63b94f259011a77dcf761b437bf510fbe9522e9"}, + {file = "huggingface_hub-1.2.4.tar.gz", hash = "sha256:7a1d9ec4802e64372d1d152d69fb8e26d943f15a2289096fbc8e09e7b90c21a5"}, ] [package.dependencies] @@ -1221,13 +1221,13 @@ pyyaml = ">=5.1" shellingham = "*" tqdm = ">=4.42.1" typer-slim = "*" -typing-extensions = ">=3.7.4.3" +typing-extensions = ">=4.1.0" [package.extras] all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-xet = ["hf-xet (>=1.1.3,<2.0.0)"] +hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"] mcp = ["mcp (>=1.8.0)"] oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"] @@ -1467,89 +1467,89 @@ referencing = ">=0.31.0" [[package]] name = "librt" -version = "0.7.5" +version = "0.7.7" description = "Mypyc runtime library" optional = false python-versions = ">=3.9" groups = ["dev"] markers = "platform_python_implementation != \"PyPy\"" files = [ - {file = "librt-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81056e01bba1394f1d92904ec61a4078f66df785316275edbaf51d90da8c6e26"}, - {file = "librt-0.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d7c72c8756eeb3aefb1b9e3dac7c37a4a25db63640cac0ab6fc18e91a0edf05a"}, - {file = "librt-0.7.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ddc4a16207f88f9597b397fc1f60781266d13b13de922ff61c206547a29e4bbd"}, - {file = "librt-0.7.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63055d3dda433ebb314c9f1819942f16a19203c454508fdb2d167613f7017169"}, - {file = "librt-0.7.5-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f85f9b5db87b0f52e53c68ad2a0c5a53e00afa439bd54a1723742a2b1021276"}, - {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c566a4672564c5d54d8ab65cdaae5a87ee14c1564c1a2ddc7a9f5811c750f023"}, - {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fee15c2a190ef389f14928135c6fb2d25cd3fdb7887bfd9a7b444bbdc8c06b96"}, - {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:584cb3e605ec45ba350962cec853e17be0a25a772f21f09f1e422f7044ae2a7d"}, - {file = "librt-0.7.5-cp310-cp310-win32.whl", hash = "sha256:9c08527055fbb03c641c15bbc5b79dd2942fb6a3bd8dabf141dd7e97eeea4904"}, - {file = "librt-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:dd810f2d39c526c42ea205e0addad5dc08ef853c625387806a29d07f9d150d9b"}, - {file = "librt-0.7.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f952e1a78c480edee8fb43aa2bf2e84dcd46c917d44f8065b883079d3893e8fc"}, - {file = "librt-0.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75965c1f4efb7234ff52a58b729d245a21e87e4b6a26a0ec08052f02b16274e4"}, - {file = "librt-0.7.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:732e0aa0385b59a1b2545159e781c792cc58ce9c134249233a7c7250a44684c4"}, - {file = "librt-0.7.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cdde31759bd8888f3ef0eebda80394a48961328a17c264dce8cc35f4b9cde35d"}, - {file = "librt-0.7.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:df3146d52465b3b6397d25d513f428cb421c18df65b7378667bb5f1e3cc45805"}, - {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29c8d2fae11d4379ea207ba7fc69d43237e42cf8a9f90ec6e05993687e6d648b"}, - {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb41f04046b4f22b1e7ba5ef513402cd2e3477ec610e5f92d38fe2bba383d419"}, - {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8bb7883c1e94ceb87c2bf81385266f032da09cd040e804cc002f2c9d6b842e2f"}, - {file = "librt-0.7.5-cp311-cp311-win32.whl", hash = "sha256:84d4a6b9efd6124f728558a18e79e7cc5c5d4efc09b2b846c910de7e564f5bad"}, - {file = "librt-0.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:ab4b0d3bee6f6ff7017e18e576ac7e41a06697d8dea4b8f3ab9e0c8e1300c409"}, - {file = "librt-0.7.5-cp311-cp311-win_arm64.whl", hash = "sha256:730be847daad773a3c898943cf67fb9845a3961d06fb79672ceb0a8cd8624cfa"}, - {file = "librt-0.7.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ba1077c562a046208a2dc6366227b3eeae8f2c2ab4b41eaf4fd2fa28cece4203"}, - {file = "librt-0.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:654fdc971c76348a73af5240d8e2529265b9a7ba6321e38dd5bae7b0d4ab3abe"}, - {file = "librt-0.7.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6b7b58913d475911f6f33e8082f19dd9b120c4f4a5c911d07e395d67b81c6982"}, - {file = "librt-0.7.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e0fd344bad57026a8f4ccfaf406486c2fc991838050c2fef156170edc3b775"}, - {file = "librt-0.7.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46aa91813c267c3f60db75d56419b42c0c0b9748ec2c568a0e3588e543fb4233"}, - {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ddc0ab9dbc5f9ceaf2bf7a367bf01f2697660e908f6534800e88f43590b271db"}, - {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7a488908a470451338607650f1c064175094aedebf4a4fa37890682e30ce0b57"}, - {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e47fc52602ffc374e69bf1b76536dc99f7f6dd876bd786c8213eaa3598be030a"}, - {file = "librt-0.7.5-cp312-cp312-win32.whl", hash = "sha256:cda8b025875946ffff5a9a7590bf9acde3eb02cb6200f06a2d3e691ef3d9955b"}, - {file = "librt-0.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:b591c094afd0ffda820e931148c9e48dc31a556dc5b2b9b3cc552fa710d858e4"}, - {file = "librt-0.7.5-cp312-cp312-win_arm64.whl", hash = "sha256:532ddc6a8a6ca341b1cd7f4d999043e4c71a212b26fe9fd2e7f1e8bb4e873544"}, - {file = "librt-0.7.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b1795c4b2789b458fa290059062c2f5a297ddb28c31e704d27e161386469691a"}, - {file = "librt-0.7.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2fcbf2e135c11f721193aa5f42ba112bb1046afafbffd407cbc81d8d735c74d0"}, - {file = "librt-0.7.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c039bbf79a9a2498404d1ae7e29a6c175e63678d7a54013a97397c40aee026c5"}, - {file = "librt-0.7.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3919c9407faeeee35430ae135e3a78acd4ecaaaa73767529e2c15ca1d73ba325"}, - {file = "librt-0.7.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26b46620e1e0e45af510d9848ea0915e7040605dd2ae94ebefb6c962cbb6f7ec"}, - {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9bbb8facc5375476d392990dd6a71f97e4cb42e2ac66f32e860f6e47299d5e89"}, - {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e9e9c988b5ffde7be02180f864cbd17c0b0c1231c235748912ab2afa05789c25"}, - {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:edf6b465306215b19dbe6c3fb63cf374a8f3e1ad77f3b4c16544b83033bbb67b"}, - {file = "librt-0.7.5-cp313-cp313-win32.whl", hash = "sha256:060bde69c3604f694bd8ae21a780fe8be46bb3dbb863642e8dfc75c931ca8eee"}, - {file = "librt-0.7.5-cp313-cp313-win_amd64.whl", hash = "sha256:a82d5a0ee43aeae2116d7292c77cc8038f4841830ade8aa922e098933b468b9e"}, - {file = "librt-0.7.5-cp313-cp313-win_arm64.whl", hash = "sha256:3c98a8d0ac9e2a7cb8ff8c53e5d6e8d82bfb2839abf144fdeaaa832f2a12aa45"}, - {file = "librt-0.7.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:9937574e6d842f359b8585903d04f5b4ab62277a091a93e02058158074dc52f2"}, - {file = "librt-0.7.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5cd3afd71e9bc146203b6c8141921e738364158d4aa7cdb9a874e2505163770f"}, - {file = "librt-0.7.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9cffa3ef0af29687455161cb446eff059bf27607f95163d6a37e27bcb37180f6"}, - {file = "librt-0.7.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:82f3f088482e2229387eadf8215c03f7726d56f69cce8c0c40f0795aebc9b361"}, - {file = "librt-0.7.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7aa33153a5bb0bac783d2c57885889b1162823384e8313d47800a0e10d0070e"}, - {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:265729b551a2dd329cc47b323a182fb7961af42abf21e913c9dd7d3331b2f3c2"}, - {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:168e04663e126416ba712114050f413ac306759a1791d87b7c11d4428ba75760"}, - {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:553dc58987d1d853adda8aeadf4db8e29749f0b11877afcc429a9ad892818ae2"}, - {file = "librt-0.7.5-cp314-cp314-win32.whl", hash = "sha256:263f4fae9eba277513357c871275b18d14de93fd49bf5e43dc60a97b81ad5eb8"}, - {file = "librt-0.7.5-cp314-cp314-win_amd64.whl", hash = "sha256:85f485b7471571e99fab4f44eeb327dc0e1f814ada575f3fa85e698417d8a54e"}, - {file = "librt-0.7.5-cp314-cp314-win_arm64.whl", hash = "sha256:49c596cd18e90e58b7caa4d7ca7606049c1802125fcff96b8af73fa5c3870e4d"}, - {file = "librt-0.7.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:54d2aef0b0f5056f130981ad45081b278602ff3657fe16c88529f5058038e802"}, - {file = "librt-0.7.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0b4791202296ad51ac09a3ff58eb49d9da8e3a4009167a6d76ac418a974e5fd4"}, - {file = "librt-0.7.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6e860909fea75baef941ee6436e0453612505883b9d0d87924d4fda27865b9a2"}, - {file = "librt-0.7.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f02c4337bf271c4f06637f5ff254fad2238c0b8e32a3a480ebb2fc5e26f754a5"}, - {file = "librt-0.7.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7f51ffe59f4556243d3cc82d827bde74765f594fa3ceb80ec4de0c13ccd3416"}, - {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0b7f080ba30601dfa3e3deed3160352273e1b9bc92e652f51103c3e9298f7899"}, - {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fb565b4219abc8ea2402e61c7ba648a62903831059ed3564fa1245cc245d58d7"}, - {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a3cfb15961e7333ea6ef033dc574af75153b5c230d5ad25fbcd55198f21e0cf"}, - {file = "librt-0.7.5-cp314-cp314t-win32.whl", hash = "sha256:118716de5ad6726332db1801bc90fa6d94194cd2e07c1a7822cebf12c496714d"}, - {file = "librt-0.7.5-cp314-cp314t-win_amd64.whl", hash = "sha256:3dd58f7ce20360c6ce0c04f7bd9081c7f9c19fc6129a3c705d0c5a35439f201d"}, - {file = "librt-0.7.5-cp314-cp314t-win_arm64.whl", hash = "sha256:08153ea537609d11f774d2bfe84af39d50d5c9ca3a4d061d946e0c9d8bce04a1"}, - {file = "librt-0.7.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:df2e210400b28e50994477ebf82f055698c79797b6ee47a1669d383ca33263e1"}, - {file = "librt-0.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d2cc7d187e8c6e9b7bdbefa9697ce897a704ea7a7ce844f2b4e0e2aa07ae51d3"}, - {file = "librt-0.7.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:39183abee670bc37b85f11e86c44a9cad1ed6efa48b580083e89ecee13dd9717"}, - {file = "librt-0.7.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:191cbd42660446d67cf7a95ac7bfa60f49b8b3b0417c64f216284a1d86fc9335"}, - {file = "librt-0.7.5-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea1b60b86595a5dc1f57b44a801a1c4d8209c0a69518391d349973a4491408e6"}, - {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:af69d9e159575e877c7546d1ee817b4ae089aa221dd1117e20c24ad8dc8659c7"}, - {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0e2bf8f91093fac43e3eaebacf777f12fd539dce9ec5af3efc6d8424e96ccd49"}, - {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8dcae24de1bc9da93aa689cb6313c70e776d7cea2fcf26b9b6160fedfe6bd9af"}, - {file = "librt-0.7.5-cp39-cp39-win32.whl", hash = "sha256:cdb001a1a0e4f41e613bca2c0fc147fc8a7396f53fc94201cbfd8ec7cd69ca4b"}, - {file = "librt-0.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:a9eacbf983319b26b5f340a2e0cd47ac1ee4725a7f3a72fd0f15063c934b69d6"}, - {file = "librt-0.7.5.tar.gz", hash = "sha256:de4221a1181fa9c8c4b5f35506ed6f298948f44003d84d2a8b9885d7e01e6cfa"}, + {file = "librt-0.7.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4836c5645f40fbdc275e5670819bde5ab5f2e882290d304e3c6ddab1576a6d0"}, + {file = "librt-0.7.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae8aec43117a645a31e5f60e9e3a0797492e747823b9bda6972d521b436b4e8"}, + {file = "librt-0.7.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:aea05f701ccd2a76b34f0daf47ca5068176ff553510b614770c90d76ac88df06"}, + {file = "librt-0.7.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b16ccaeff0ed4355dfb76fe1ea7a5d6d03b5ad27f295f77ee0557bc20a72495"}, + {file = "librt-0.7.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48c7e150c095d5e3cea7452347ba26094be905d6099d24f9319a8b475fcd3e0"}, + {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4dcee2f921a8632636d1c37f1bbdb8841d15666d119aa61e5399c5268e7ce02e"}, + {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14ef0f4ac3728ffd85bfc58e2f2f48fb4ef4fa871876f13a73a7381d10a9f77c"}, + {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e4ab69fa37f8090f2d971a5d2bc606c7401170dbdae083c393d6cbf439cb45b8"}, + {file = "librt-0.7.7-cp310-cp310-win32.whl", hash = "sha256:4bf3cc46d553693382d2abf5f5bd493d71bb0f50a7c0beab18aa13a5545c8900"}, + {file = "librt-0.7.7-cp310-cp310-win_amd64.whl", hash = "sha256:f0c8fe5aeadd8a0e5b0598f8a6ee3533135ca50fd3f20f130f9d72baf5c6ac58"}, + {file = "librt-0.7.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a487b71fbf8a9edb72a8c7a456dda0184642d99cd007bc819c0b7ab93676a8ee"}, + {file = "librt-0.7.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4d4efb218264ecf0f8516196c9e2d1a0679d9fb3bb15df1155a35220062eba8"}, + {file = "librt-0.7.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b8bb331aad734b059c4b450cd0a225652f16889e286b2345af5e2c3c625c3d85"}, + {file = "librt-0.7.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:467dbd7443bda08338fc8ad701ed38cef48194017554f4c798b0a237904b3f99"}, + {file = "librt-0.7.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50d1d1ee813d2d1a3baf2873634ba506b263032418d16287c92ec1cc9c1a00cb"}, + {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7e5070cf3ec92d98f57574da0224f8c73faf1ddd6d8afa0b8c9f6e86997bc74"}, + {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bdb9f3d865b2dafe7f9ad7f30ef563c80d0ddd2fdc8cc9b8e4f242f475e34d75"}, + {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8185c8497d45164e256376f9da5aed2bb26ff636c798c9dabe313b90e9f25b28"}, + {file = "librt-0.7.7-cp311-cp311-win32.whl", hash = "sha256:44d63ce643f34a903f09ff7ca355aae019a3730c7afd6a3c037d569beeb5d151"}, + {file = "librt-0.7.7-cp311-cp311-win_amd64.whl", hash = "sha256:7d13cc340b3b82134f8038a2bfe7137093693dcad8ba5773da18f95ad6b77a8a"}, + {file = "librt-0.7.7-cp311-cp311-win_arm64.whl", hash = "sha256:983de36b5a83fe9222f4f7dcd071f9b1ac6f3f17c0af0238dadfb8229588f890"}, + {file = "librt-0.7.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a85a1fc4ed11ea0eb0a632459ce004a2d14afc085a50ae3463cd3dfe1ce43fc"}, + {file = "librt-0.7.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87654e29a35938baead1c4559858f346f4a2a7588574a14d784f300ffba0efd"}, + {file = "librt-0.7.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c9faaebb1c6212c20afd8043cd6ed9de0a47d77f91a6b5b48f4e46ed470703fe"}, + {file = "librt-0.7.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1908c3e5a5ef86b23391448b47759298f87f997c3bd153a770828f58c2bb4630"}, + {file = "librt-0.7.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbc4900e95a98fc0729523be9d93a8fedebb026f32ed9ffc08acd82e3e181503"}, + {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7ea4e1fbd253e5c68ea0fe63d08577f9d288a73f17d82f652ebc61fa48d878d"}, + {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ef7699b7a5a244b1119f85c5bbc13f152cd38240cbb2baa19b769433bae98e50"}, + {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:955c62571de0b181d9e9e0a0303c8bc90d47670a5eff54cf71bf5da61d1899cf"}, + {file = "librt-0.7.7-cp312-cp312-win32.whl", hash = "sha256:1bcd79be209313b270b0e1a51c67ae1af28adad0e0c7e84c3ad4b5cb57aaa75b"}, + {file = "librt-0.7.7-cp312-cp312-win_amd64.whl", hash = "sha256:4353ee891a1834567e0302d4bd5e60f531912179578c36f3d0430f8c5e16b456"}, + {file = "librt-0.7.7-cp312-cp312-win_arm64.whl", hash = "sha256:a76f1d679beccccdf8c1958e732a1dfcd6e749f8821ee59d7bec009ac308c029"}, + {file = "librt-0.7.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4a0b0a3c86ba9193a8e23bb18f100d647bf192390ae195d84dfa0a10fb6244"}, + {file = "librt-0.7.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5335890fea9f9e6c4fdf8683061b9ccdcbe47c6dc03ab8e9b68c10acf78be78d"}, + {file = "librt-0.7.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b4346b1225be26def3ccc6c965751c74868f0578cbcba293c8ae9168483d811"}, + {file = "librt-0.7.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a10b8eebdaca6e9fdbaf88b5aefc0e324b763a5f40b1266532590d5afb268a4c"}, + {file = "librt-0.7.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:067be973d90d9e319e6eb4ee2a9b9307f0ecd648b8a9002fa237289a4a07a9e7"}, + {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:23d2299ed007812cccc1ecef018db7d922733382561230de1f3954db28433977"}, + {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6b6f8ea465524aa4c7420c7cc4ca7d46fe00981de8debc67b1cc2e9957bb5b9d"}, + {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8df32a99cc46eb0ee90afd9ada113ae2cafe7e8d673686cf03ec53e49635439"}, + {file = "librt-0.7.7-cp313-cp313-win32.whl", hash = "sha256:86f86b3b785487c7760247bcdac0b11aa8bf13245a13ed05206286135877564b"}, + {file = "librt-0.7.7-cp313-cp313-win_amd64.whl", hash = "sha256:4862cb2c702b1f905c0503b72d9d4daf65a7fdf5a9e84560e563471e57a56949"}, + {file = "librt-0.7.7-cp313-cp313-win_arm64.whl", hash = "sha256:0996c83b1cb43c00e8c87835a284f9057bc647abd42b5871e5f941d30010c832"}, + {file = "librt-0.7.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:23daa1ab0512bafdd677eb1bfc9611d8ffbe2e328895671e64cb34166bc1b8c8"}, + {file = "librt-0.7.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:558a9e5a6f3cc1e20b3168fb1dc802d0d8fa40731f6e9932dcc52bbcfbd37111"}, + {file = "librt-0.7.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2567cb48dc03e5b246927ab35cbb343376e24501260a9b5e30b8e255dca0d1d2"}, + {file = "librt-0.7.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6066c638cdf85ff92fc6f932d2d73c93a0e03492cdfa8778e6d58c489a3d7259"}, + {file = "librt-0.7.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a609849aca463074c17de9cda173c276eb8fee9e441053529e7b9e249dc8b8ee"}, + {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:add4e0a000858fe9bb39ed55f31085506a5c38363e6eb4a1e5943a10c2bfc3d1"}, + {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a3bfe73a32bd0bdb9a87d586b05a23c0a1729205d79df66dee65bb2e40d671ba"}, + {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0ecce0544d3db91a40f8b57ae26928c02130a997b540f908cefd4d279d6c5848"}, + {file = "librt-0.7.7-cp314-cp314-win32.whl", hash = "sha256:8f7a74cf3a80f0c3b0ec75b0c650b2f0a894a2cec57ef75f6f72c1e82cdac61d"}, + {file = "librt-0.7.7-cp314-cp314-win_amd64.whl", hash = "sha256:3d1fe2e8df3268dd6734dba33ededae72ad5c3a859b9577bc00b715759c5aaab"}, + {file = "librt-0.7.7-cp314-cp314-win_arm64.whl", hash = "sha256:2987cf827011907d3dfd109f1be0d61e173d68b1270107bb0e89f2fca7f2ed6b"}, + {file = "librt-0.7.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e92c8de62b40bfce91d5e12c6e8b15434da268979b1af1a6589463549d491e6"}, + {file = "librt-0.7.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f683dcd49e2494a7535e30f779aa1ad6e3732a019d80abe1309ea91ccd3230e3"}, + {file = "librt-0.7.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b15e5d17812d4d629ff576699954f74e2cc24a02a4fc401882dd94f81daba45"}, + {file = "librt-0.7.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c084841b879c4d9b9fa34e5d5263994f21aea7fd9c6add29194dbb41a6210536"}, + {file = "librt-0.7.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c8fb9966f84737115513fecbaf257f9553d067a7dd45a69c2c7e5339e6a8dc"}, + {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9b5fb1ecb2c35362eab2dbd354fd1efa5a8440d3e73a68be11921042a0edc0ff"}, + {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:d1454899909d63cc9199a89fcc4f81bdd9004aef577d4ffc022e600c412d57f3"}, + {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7ef28f2e7a016b29792fe0a2dd04dec75725b32a1264e390c366103f834a9c3a"}, + {file = "librt-0.7.7-cp314-cp314t-win32.whl", hash = "sha256:5e419e0db70991b6ba037b70c1d5bbe92b20ddf82f31ad01d77a347ed9781398"}, + {file = "librt-0.7.7-cp314-cp314t-win_amd64.whl", hash = "sha256:d6b7d93657332c817b8d674ef6bf1ab7796b4f7ce05e420fd45bd258a72ac804"}, + {file = "librt-0.7.7-cp314-cp314t-win_arm64.whl", hash = "sha256:142c2cd91794b79fd0ce113bd658993b7ede0fe93057668c2f98a45ca00b7e91"}, + {file = "librt-0.7.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c8ffe3431d98cc043a14e88b21288b5ec7ee12cb01260e94385887f285ef9389"}, + {file = "librt-0.7.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e40d20ae1722d6b8ea6acf4597e789604649dcd9c295eb7361a28225bc2e9e12"}, + {file = "librt-0.7.7-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f2cb63c49bc96847c3bb8dca350970e4dcd19936f391cfdfd057dcb37c4fa97e"}, + {file = "librt-0.7.7-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f2f8dcf5ab9f80fb970c6fd780b398efb2f50c1962485eb8d3ab07788595a48"}, + {file = "librt-0.7.7-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a1f5cc41a570269d1be7a676655875e3a53de4992a9fa38efb7983e97cf73d7c"}, + {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ff1fb2dfef035549565a4124998fadcb7a3d4957131ddf004a56edeb029626b3"}, + {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ab2a2a9cd7d044e1a11ca64a86ad3361d318176924bbe5152fbc69f99be20b8c"}, + {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad3fc2d859a709baf9dd9607bb72f599b1cfb8a39eafd41307d0c3c4766763cb"}, + {file = "librt-0.7.7-cp39-cp39-win32.whl", hash = "sha256:f83c971eb9d2358b6a18da51dc0ae00556ac7c73104dde16e9e14c15aaf685ca"}, + {file = "librt-0.7.7-cp39-cp39-win_amd64.whl", hash = "sha256:264720fc288c86039c091a4ad63419a5d7cabbf1c1c9933336a957ed2483e570"}, + {file = "librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c"}, ] [[package]] @@ -1691,19 +1691,19 @@ files = [ [[package]] name = "marshmallow" -version = "4.1.2" +version = "4.2.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "marshmallow-4.1.2-py3-none-any.whl", hash = "sha256:a8cfa18bd8d0e5f7339e734edf84815fe8db1bdb57358c7ccc05472b746eeadc"}, - {file = "marshmallow-4.1.2.tar.gz", hash = "sha256:083f250643d2e75fd363f256aeb6b1af369a7513ad37647ce4a601f6966e3ba5"}, + {file = "marshmallow-4.2.0-py3-none-any.whl", hash = "sha256:1dc369bd13a8708a9566d6f73d1db07d50142a7580f04fd81e1c29a4d2e10af4"}, + {file = "marshmallow-4.2.0.tar.gz", hash = "sha256:908acabd5aa14741419d3678d3296bda6abe28a167b7dcd05969ceb8256943ac"}, ] [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] -docs = ["autodocsumm (==0.2.14)", "furo (==2025.9.25)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"] +docs = ["autodocsumm (==0.2.14)", "furo (==2025.12.19)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"] tests = ["pytest", "simplejson"] [[package]] @@ -2314,16 +2314,22 @@ files = [ [[package]] name = "pathspec" -version = "0.12.1" +version = "1.0.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, - {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, + {file = "pathspec-1.0.1-py3-none-any.whl", hash = "sha256:8870061f22c58e6d83463cfce9a7dd6eca0512c772c1001fb09ac64091816721"}, + {file = "pathspec-1.0.1.tar.gz", hash = "sha256:e2769b508d0dd47b09af6ee2c75b2744a2cb1f474ae4b1494fd6a1b7a841613c"}, ] +[package.extras] +hyperscan = ["hyperscan (>=0.7)"] +optional = ["typing-extensions (>=4)"] +re2 = ["google-re2 (>=1.1)"] +tests = ["pytest (>=9)", "typing-extensions (>=4.15)"] + [[package]] name = "pexpect" version = "4.9.0" @@ -2357,14 +2363,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "posthog" -version = "7.4.2" +version = "7.4.3" description = "Integrate PostHog into any python application." optional = false python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "posthog-7.4.2-py3-none-any.whl", hash = "sha256:36954f06f4adede905d97faeb24926a705a4d86f4a308506b15b41b661ef064c"}, - {file = "posthog-7.4.2.tar.gz", hash = "sha256:5953f31a21c5e2485ac57eb5d600a231a70118f884f438c0e8b493c30373c409"}, + {file = "posthog-7.4.3-py3-none-any.whl", hash = "sha256:ae068f8954ee7a56d10ce35261580f1b8d99c6a2b6e878964eeacea1ec906b4a"}, + {file = "posthog-7.4.3.tar.gz", hash = "sha256:02484a32c8bf44ab489dcef270ada46e5ce324021258c322f0d1b567c2d6f174"}, ] [package.dependencies] @@ -2825,14 +2831,14 @@ cli = ["click (>=5.0)"] [[package]] name = "python-jsonpath" -version = "2.0.1" +version = "2.0.2" description = "JSONPath, JSON Pointer and JSON Patch for Python." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "python_jsonpath-2.0.1-py3-none-any.whl", hash = "sha256:ebd518b7c883acc5b976518d76b6c96288405edec7d9ef838641869c1e1a5eb7"}, - {file = "python_jsonpath-2.0.1.tar.gz", hash = "sha256:32a84ebb2dc0ec1b42a6e165b0f9174aef8310bad29154ad9aee31ac37cca18f"}, + {file = "python_jsonpath-2.0.2-py3-none-any.whl", hash = "sha256:3f8ab612f815ce10c03bf0deaede87235f3381b109a60b4a22744069953627e3"}, + {file = "python_jsonpath-2.0.2.tar.gz", hash = "sha256:41abb6660b3ee54d5ae77e4b0e901049fb1662ad90de241f038df47edc75ee60"}, ] [package.extras] @@ -3407,27 +3413,36 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73"}, - {file = "tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f"}, - {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a"}, - {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390"}, - {file = "tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82"}, - {file = "tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138"}, - {file = "tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9"}, + {file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"}, + {file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5"}, + {file = "tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319f659ee992222f04e58f84cbf407cfa66a65fe3a8de44e8ad2bc53e7d99012"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e50f8554d504f617d9e9d6e4c2c2884a12b388a97c5c77f0bc6cf4cd032feee"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a62ba2c5faa2dd175aaeed7b15abf18d20266189fb3406c5d0550dd34dd5f37"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143b999bdc46d10febb15cbffb4207ddd1f410e2c755857b5a0797961bbdc113"}, + {file = "tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917"}, ] [package.dependencies] @@ -3436,7 +3451,7 @@ huggingface-hub = ">=0.16.4,<2.0" [package.extras] dev = ["tokenizers[testing]"] docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] -testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"] +testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", "ty"] [[package]] name = "toml" @@ -3474,14 +3489,14 @@ telegram = ["requests"] [[package]] name = "typer-slim" -version = "0.21.0" +version = "0.21.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e"}, - {file = "typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557"}, + {file = "typer_slim-0.21.1-py3-none-any.whl", hash = "sha256:6e6c31047f171ac93cc5a973c9e617dbc5ab2bddc4d0a3135dc161b4e2020e0d"}, + {file = "typer_slim-0.21.1.tar.gz", hash = "sha256:73495dd08c2d0940d611c5a8c04e91c2a0a98600cbd4ee19192255a233b6dbfd"}, ] [package.dependencies] diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml index 8577876238..d6e1c588e3 100644 --- a/sdk/pyproject.toml +++ b/sdk/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "agenta" -version = "0.72.1" +version = "0.76.0" description = "The SDK for agenta is an open-source LLMOps platform." readme = "README.md" authors = [ diff --git a/web/ee/package.json b/web/ee/package.json index c9f4581604..d5b52fc6c6 100644 --- a/web/ee/package.json +++ b/web/ee/package.json @@ -1,6 +1,6 @@ { "name": "@agenta/ee", - "version": "0.72.1", + "version": "0.76.0", "private": true, "engines": { "node": ">=18" @@ -26,8 +26,8 @@ "@lexical/code-shiki": "^0.38.2", "@monaco-editor/react": "^4.7.0-rc.0", "@phosphor-icons/react": "^2.1.10", - "@tanstack/query-core": "^5.90.12", - "@tanstack/react-query": "^5.90.12", + "@tanstack/query-core": "^5.90.16", + "@tanstack/react-query": "^5.90.16", "@tremor/react": "^3.18.7", "@types/js-yaml": "^4.0.9", "@types/lodash": "^4.17.18", @@ -37,7 +37,7 @@ "@types/react-window": "^1.8.8", "@types/recharts": "^2.0.1", "@types/uuid": "^10.0.0", - "antd": "^6.1.0", + "antd": "^6.1.3", "autoprefixer": "10.4.20", "axios": "^1.12.2", "classnames": "^2.3.2", @@ -47,7 +47,7 @@ "dotenv": "^16.5.0", "fast-deep-equal": "^3.1.3", "immer": "^10.1.1", - "jotai": "^2.16.0", + "jotai": "^2.16.1", "jotai-eager": "^0.2.3", "jotai-immer": "^0.4.1", "jotai-scheduler": "^0.0.5", diff --git a/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx index 5de2069511..12eabd7732 100644 --- a/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx +++ b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx @@ -17,7 +17,7 @@ const LoadEvaluatorPresetContent = ({ const [searchTerm, setSearchTerm] = useState("") const [format, setFormat] = useState<"yaml" | "json">("yaml") - const filteredTestset = !searchTerm + const filteredPresets = !searchTerm ? settingsPresets : settingsPresets.filter((preset: SettingsPreset) => preset.name.toLowerCase().includes(searchTerm.toLowerCase()), @@ -45,7 +45,7 @@ const LoadEvaluatorPresetContent = ({ ({ + items={filteredPresets.map((preset) => ({ key: preset.key, label: preset.name, }))} diff --git a/web/ee/src/state/billing/atoms.ts b/web/ee/src/state/billing/atoms.ts index 974e9a8dec..95f6f3e80e 100644 --- a/web/ee/src/state/billing/atoms.ts +++ b/web/ee/src/state/billing/atoms.ts @@ -7,6 +7,7 @@ import {User} from "@/oss/lib/Types" import {selectedOrgIdAtom} from "@/oss/state/org" import {profileQueryAtom} from "@/oss/state/profile/selectors/user" import {projectIdAtom} from "@/oss/state/project" +import {sessionExistsAtom} from "@/oss/state/session" import {BillingPlan, DataUsageType, SubscriptionType} from "../../services/billing/types" @@ -51,6 +52,7 @@ export const subscriptionQueryAtom = atomWithQuery((get) => { const user = profileQuery.data as User | undefined const projectId = get(projectIdAtom) const organizationId = get(selectedOrgIdAtom) + const sessionExists = get(sessionExistsAtom) return { queryKey: ["billing", "subscription", projectId, user?.id, organizationId], @@ -64,7 +66,7 @@ export const subscriptionQueryAtom = atomWithQuery((get) => { refetchOnWindowFocus: false, refetchOnReconnect: false, refetchOnMount: true, - enabled: !!organizationId && !!user && !!projectId, + enabled: sessionExists && !!organizationId && !!user && !!projectId, retry: (failureCount, error) => { // Don't retry on client errors if ((error as any)?.response?.status >= 400 && (error as any)?.response?.status < 500) { diff --git a/web/entrypoint.sh b/web/entrypoint.sh index 84a845bd2c..cfd76431a9 100755 --- a/web/entrypoint.sh +++ b/web/entrypoint.sh @@ -10,8 +10,9 @@ if [ "$ENTRYPOINT_DIR" != "." ]; then ENTRYPOINT_DIR="/app" fi -# Infer AGENTA_SENDGRID_ENABLED from SENDGRID_API_KEY -if [ -n "$SENDGRID_API_KEY" ]; then +# Infer AGENTA_SENDGRID_ENABLED from SENDGRID_API_KEY and sender address +SENDGRID_FROM_ADDRESS_VALUE="${SENDGRID_FROM_ADDRESS:-${AGENTA_AUTHN_EMAIL_FROM:-${AGENTA_SEND_EMAIL_FROM_ADDRESS}}}" +if [ -n "$SENDGRID_API_KEY" ] && [ -n "$SENDGRID_FROM_ADDRESS_VALUE" ]; then export AGENTA_SENDGRID_ENABLED="true" else export AGENTA_SENDGRID_ENABLED="false" @@ -19,17 +20,130 @@ fi mkdir -p "${ENTRYPOINT_DIR}/${AGENTA_LICENSE}/public" +# Derive frontend auth feature flags +# First, check if OIDC providers are configured +AUTH_OIDC_ENABLED="false" +EFFECTIVE_GOOGLE_OAUTH_CLIENT_ID="" +if [ -n "${GOOGLE_OAUTH_CLIENT_ID}" ] && [ -n "${GOOGLE_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_GOOGLE_OAUTH_CLIENT_ID="${GOOGLE_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_GOOGLE_WORKSPACES_OAUTH_CLIENT_ID="" +if [ -n "${GOOGLE_WORKSPACES_OAUTH_CLIENT_ID}" ] && [ -n "${GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_GOOGLE_WORKSPACES_OAUTH_CLIENT_ID="${GOOGLE_WORKSPACES_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_GITHUB_OAUTH_CLIENT_ID="" +if [ -n "${GITHUB_OAUTH_CLIENT_ID}" ] && [ -n "${GITHUB_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_GITHUB_OAUTH_CLIENT_ID="${GITHUB_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_FACEBOOK_OAUTH_CLIENT_ID="" +if [ -n "${FACEBOOK_OAUTH_CLIENT_ID}" ] && [ -n "${FACEBOOK_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_FACEBOOK_OAUTH_CLIENT_ID="${FACEBOOK_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_APPLE_OAUTH_CLIENT_ID="" +if [ -n "${APPLE_OAUTH_CLIENT_ID}" ] && { [ -n "${APPLE_OAUTH_CLIENT_SECRET}" ] || { [ -n "${APPLE_KEY_ID}" ] && [ -n "${APPLE_TEAM_ID}" ] && [ -n "${APPLE_PRIVATE_KEY}" ]; }; }; then + EFFECTIVE_APPLE_OAUTH_CLIENT_ID="${APPLE_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_DISCORD_OAUTH_CLIENT_ID="" +if [ -n "${DISCORD_OAUTH_CLIENT_ID}" ] && [ -n "${DISCORD_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_DISCORD_OAUTH_CLIENT_ID="${DISCORD_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_TWITTER_OAUTH_CLIENT_ID="" +if [ -n "${TWITTER_OAUTH_CLIENT_ID}" ] && [ -n "${TWITTER_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_TWITTER_OAUTH_CLIENT_ID="${TWITTER_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_GITLAB_OAUTH_CLIENT_ID="" +if [ -n "${GITLAB_OAUTH_CLIENT_ID}" ] && [ -n "${GITLAB_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_GITLAB_OAUTH_CLIENT_ID="${GITLAB_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_BITBUCKET_OAUTH_CLIENT_ID="" +if [ -n "${BITBUCKET_OAUTH_CLIENT_ID}" ] && [ -n "${BITBUCKET_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_BITBUCKET_OAUTH_CLIENT_ID="${BITBUCKET_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_LINKEDIN_OAUTH_CLIENT_ID="" +if [ -n "${LINKEDIN_OAUTH_CLIENT_ID}" ] && [ -n "${LINKEDIN_OAUTH_CLIENT_SECRET}" ]; then + EFFECTIVE_LINKEDIN_OAUTH_CLIENT_ID="${LINKEDIN_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_OKTA_OAUTH_CLIENT_ID="" +if [ -n "${OKTA_OAUTH_CLIENT_ID}" ] && [ -n "${OKTA_OAUTH_CLIENT_SECRET}" ] && [ -n "${OKTA_DOMAIN}" ]; then + EFFECTIVE_OKTA_OAUTH_CLIENT_ID="${OKTA_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_AZURE_AD_OAUTH_CLIENT_ID="" +AZURE_AD_OAUTH_CLIENT_ID_VALUE="${AZURE_AD_OAUTH_CLIENT_ID:-${ACTIVE_DIRECTORY_OAUTH_CLIENT_ID}}" +AZURE_AD_OAUTH_CLIENT_SECRET_VALUE="${AZURE_AD_OAUTH_CLIENT_SECRET:-${ACTIVE_DIRECTORY_OAUTH_CLIENT_SECRET}}" +AZURE_AD_DIRECTORY_ID_VALUE="${AZURE_AD_DIRECTORY_ID:-${ACTIVE_DIRECTORY_DIRECTORY_ID}}" +if [ -n "${AZURE_AD_OAUTH_CLIENT_ID_VALUE}" ] && [ -n "${AZURE_AD_OAUTH_CLIENT_SECRET_VALUE}" ] && [ -n "${AZURE_AD_DIRECTORY_ID_VALUE}" ]; then + EFFECTIVE_AZURE_AD_OAUTH_CLIENT_ID="${AZURE_AD_OAUTH_CLIENT_ID_VALUE}" + AUTH_OIDC_ENABLED="true" +fi + +EFFECTIVE_BOXY_SAML_OAUTH_CLIENT_ID="" +if [ -n "${BOXY_SAML_OAUTH_CLIENT_ID}" ] && [ -n "${BOXY_SAML_OAUTH_CLIENT_SECRET}" ] && [ -n "${BOXY_SAML_URL}" ]; then + EFFECTIVE_BOXY_SAML_OAUTH_CLIENT_ID="${BOXY_SAML_OAUTH_CLIENT_ID}" + AUTH_OIDC_ENABLED="true" +fi + +# Derive email auth method from SUPERTOKENS_EMAIL_DISABLED and SendGrid status +SUPERTOKENS_EMAIL_DISABLED_VALUE="${SUPERTOKENS_EMAIL_DISABLED:-false}" +EFFECTIVE_AUTHN_EMAIL="" +if [ "${SUPERTOKENS_EMAIL_DISABLED_VALUE}" = "true" ]; then + EFFECTIVE_AUTHN_EMAIL="" +elif [ "${AGENTA_SENDGRID_ENABLED}" = "true" ]; then + EFFECTIVE_AUTHN_EMAIL="otp" +else + EFFECTIVE_AUTHN_EMAIL="password" +fi + +AUTH_EMAIL_ENABLED="false" +if [ "${EFFECTIVE_AUTHN_EMAIL}" = "password" ] || [ "${EFFECTIVE_AUTHN_EMAIL}" = "otp" ]; then + AUTH_EMAIL_ENABLED="true" +fi + cat > "${ENTRYPOINT_DIR}/${AGENTA_LICENSE}/public/__env.js" <=18" @@ -50,7 +50,7 @@ "@scalar/openapi-types": "^0.1.5", "@tailwindcss/container-queries": "^0.1.1", "@tailwindcss/forms": "^0.5.7", - "@tanstack/react-query": "^5.90.12", + "@tanstack/react-query": "^5.90.16", "@tremor/react": "^3.18.7", "@types/diff": "^5.0.9", "@types/js-beautify": "^1.14.0", @@ -65,7 +65,7 @@ "@types/react-window": "^1.8.8", "@types/uuid": "^10.0.0", "ajv": "^8.17.1", - "antd": "^6.1.0", + "antd": "^6.1.3", "autoprefixer": "10.4.20", "axios": "^1.12.2", "blakejs": "^1.2.1", @@ -77,7 +77,7 @@ "fast-deep-equal": "^3.1.3", "highlight.js": "^11.11.1", "immer": "^10.1.3", - "jotai": "^2.16.0", + "jotai": "^2.16.1", "jotai-eager": "^0.2.3", "jotai-immer": "^0.4.1", "jotai-scheduler": "^0.0.5", diff --git a/web/oss/src/assets/custom-resize-handle.css b/web/oss/src/assets/custom-resize-handle.css index c72529fe9b..2b46924618 100644 --- a/web/oss/src/assets/custom-resize-handle.css +++ b/web/oss/src/assets/custom-resize-handle.css @@ -2,8 +2,8 @@ position: absolute; right: -9px; top: 50%; - width: 0px; - height: 50%; + width: 18px; + height: 100%; cursor: col-resize; z-index: 2; transform: translateY(-50%); diff --git a/web/oss/src/code_snippets/endpoints/fetch_config/python.ts b/web/oss/src/code_snippets/endpoints/fetch_config/python.ts index ff594c9c64..3be4fb23c9 100644 --- a/web/oss/src/code_snippets/endpoints/fetch_config/python.ts +++ b/web/oss/src/code_snippets/endpoints/fetch_config/python.ts @@ -1,18 +1,17 @@ import {getEnv} from "@/oss/lib/helpers/dynamicEnv" export default function pythonCode(appName: string, env_name: string, apiKey: string): string { - return ` -import os + return `import os import agenta as ag -os.environ["AGENTA_API_KEY"] = "${apiKey}" # Add your API key here +os.environ["AGENTA_API_KEY"] = "${apiKey}" os.environ["AGENTA_HOST"] = "${getEnv("NEXT_PUBLIC_AGENTA_API_URL")}" ag.init() config = ag.ConfigManager.get_from_registry( app_slug="${appName}", - environment_slug="${env_name}" - ) + environment_slug="${env_name}", +) print(config) ` } diff --git a/web/oss/src/code_snippets/endpoints/fetch_config/typescript.ts b/web/oss/src/code_snippets/endpoints/fetch_config/typescript.ts index b6b134ce9d..39e742436d 100644 --- a/web/oss/src/code_snippets/endpoints/fetch_config/typescript.ts +++ b/web/oss/src/code_snippets/endpoints/fetch_config/typescript.ts @@ -22,8 +22,8 @@ const getConfig = async (appName: string, environmentSlug: string) => { }, }, { headers: { - 'Content-Type': 'application/json', - 'Authorization': "ApiKey ${apiKey}", // Add your API key here + 'Content-Type': 'application/json', + 'Authorization': "ApiKey ${apiKey}", }, }); diff --git a/web/oss/src/code_snippets/endpoints/fetch_variant/curl.ts b/web/oss/src/code_snippets/endpoints/fetch_variant/curl.ts index 35cdc05cb7..b950a62422 100644 --- a/web/oss/src/code_snippets/endpoints/fetch_variant/curl.ts +++ b/web/oss/src/code_snippets/endpoints/fetch_variant/curl.ts @@ -1,13 +1,14 @@ +import {getEnv} from "@/oss/lib/helpers/dynamicEnv" + export const buildCurlSnippet = ( appSlug: string, variantSlug: string, variantVersion: number, apiKey: string, ) => { - return `# Fetch configuration by variant -curl -X POST "https://cloud.agenta.ai/api/variants/configs/fetch" \\ + return `curl -X POST "${getEnv("NEXT_PUBLIC_AGENTA_API_URL")}/variants/configs/fetch" \\ -H "Content-Type: application/json" \\ - -H "Authorization: Bearer ${apiKey}" \\ + -H "Authorization: ApiKey ${apiKey}" \\ -d '{ "variant_ref": { "slug": "${variantSlug}", diff --git a/web/oss/src/code_snippets/endpoints/fetch_variant/python.ts b/web/oss/src/code_snippets/endpoints/fetch_variant/python.ts index c6c744f3ce..ac86c5d77f 100644 --- a/web/oss/src/code_snippets/endpoints/fetch_variant/python.ts +++ b/web/oss/src/code_snippets/endpoints/fetch_variant/python.ts @@ -3,13 +3,12 @@ export const buildPythonSnippet = ( variantSlug: string, variantVersion: number, ) => { - return `# Fetch configuration by variant -import agenta as ag + return `import agenta as ag config = ag.ConfigManager.get_from_registry( app_slug="${appSlug}", variant_slug="${variantSlug}", - variant_version=${variantVersion} # Optional: If not provided, fetches the latest version + variant_version=${variantVersion}, ) print("Fetched configuration:") diff --git a/web/oss/src/code_snippets/endpoints/fetch_variant/typescript.ts b/web/oss/src/code_snippets/endpoints/fetch_variant/typescript.ts index 122581f586..0953ac4f7d 100644 --- a/web/oss/src/code_snippets/endpoints/fetch_variant/typescript.ts +++ b/web/oss/src/code_snippets/endpoints/fetch_variant/typescript.ts @@ -1,32 +1,33 @@ +import {getEnv} from "@/oss/lib/helpers/dynamicEnv" + export const buildTypescriptSnippet = ( appSlug: string, variantSlug: string, variantVersion: number, apiKey: string, ) => { - return `// Fetch configuration by variant - const fetchResponse = await fetch('https://cloud.agenta.ai/api/variants/configs/fetch', { + return `const fetchResponse = await fetch('${getEnv("NEXT_PUBLIC_AGENTA_API_URL")}/variants/configs/fetch', { method: 'POST', headers: { 'Content-Type': 'application/json', - 'Authorization': 'Bearer ${apiKey}' + 'Authorization': 'ApiKey ${apiKey}', }, body: JSON.stringify({ variant_ref: { - slug: '${variantSlug}', - version: ${variantVersion}, - id: null + slug: '${variantSlug}', + version: ${variantVersion}, + id: null, }, application_ref: { - slug: '${appSlug}', - version: null, - id: null - } - }) - }); + slug: '${appSlug}', + version: null, + id: null, + }, + }), +}); - const config = await fetchResponse.json(); - console.log('Fetched configuration:'); - console.log(config); - ` +const config = await fetchResponse.json(); +console.log('Fetched configuration:'); +console.log(config); +` } diff --git a/web/oss/src/code_snippets/endpoints/invoke_llm_app/curl.ts b/web/oss/src/code_snippets/endpoints/invoke_llm_app/curl.ts index 5169dcf0e2..12106a466c 100644 --- a/web/oss/src/code_snippets/endpoints/invoke_llm_app/curl.ts +++ b/web/oss/src/code_snippets/endpoints/invoke_llm_app/curl.ts @@ -2,10 +2,9 @@ export default function cURLCode(uri: string, params: string, apiKey: string): s const parsedParams = JSON.parse(params) const isChat = parsedParams.messages !== undefined - return `# Add your API key to the Authorization header -curl -X POST "${uri}" \\ + return `curl -X POST "${uri}" \\ -H "Content-Type: application/json" \\ --H "Authorization: ApiKey ${apiKey}" \\${isChat ? '\n-H "Baggage: ag.session.id=your_session_id" \\ # Optional: track chat sessions' : ""} +-H "Authorization: ApiKey ${apiKey}" \\${isChat ? '\n-H "Baggage: ag.session.id=your_session_id" \\' : ""} -d '${params}' ` } diff --git a/web/oss/src/code_snippets/endpoints/invoke_llm_app/python.ts b/web/oss/src/code_snippets/endpoints/invoke_llm_app/python.ts index d8eca52722..897deb0352 100644 --- a/web/oss/src/code_snippets/endpoints/invoke_llm_app/python.ts +++ b/web/oss/src/code_snippets/endpoints/invoke_llm_app/python.ts @@ -8,8 +8,8 @@ import json url = "${uri}" params = ${params} headers = { - "Content-Type": "application/json", - "Authorization": "ApiKey ${apiKey}", # Add your API key here${isChat ? '\n "Baggage": "ag.session.id=your_session_id", # Optional: track chat sessions' : ""} + "Content-Type": "application/json", + "Authorization": "ApiKey ${apiKey}",${isChat ? '\n "Baggage": "ag.session.id=your_session_id",' : ""} } response = requests.post(url, json=params, headers=headers) diff --git a/web/oss/src/code_snippets/endpoints/invoke_llm_app/typescript.ts b/web/oss/src/code_snippets/endpoints/invoke_llm_app/typescript.ts index d3ab9f8b4e..fa2fdeccd0 100644 --- a/web/oss/src/code_snippets/endpoints/invoke_llm_app/typescript.ts +++ b/web/oss/src/code_snippets/endpoints/invoke_llm_app/typescript.ts @@ -11,7 +11,7 @@ const generate = async () => { const data = ${params}; const headers = { "Content-Type": "application/json", - "Authorization": "ApiKey ${apiKey}", // Add your API key here${isChat ? '\n "Baggage": "ag.session.id=your_session_id" // Optional: track chat sessions' : ""} + "Authorization": "ApiKey ${apiKey}",${isChat ? '\n "Baggage": "ag.session.id=your_session_id",' : ""} }; const response = await axios.post(url, data, { headers }); diff --git a/web/oss/src/components/AppGlobalWrappers/index.tsx b/web/oss/src/components/AppGlobalWrappers/index.tsx index 824f7ee6b5..65fc01449d 100644 --- a/web/oss/src/components/AppGlobalWrappers/index.tsx +++ b/web/oss/src/components/AppGlobalWrappers/index.tsx @@ -49,7 +49,9 @@ const VariantDrawerWrapper = dynamic( const VariantComparisonModalWrapper = dynamic( () => - import("@/oss/components/VariantsComponents/Modals/VariantComparisonModal/VariantComparisonModalWrapper"), + import( + "@/oss/components/VariantsComponents/Modals/VariantComparisonModal/VariantComparisonModalWrapper" + ), {ssr: false}, ) @@ -60,13 +62,17 @@ const DeleteEvaluationModalWrapper = dynamic( const DeployVariantModalWrapper = dynamic( () => - import("@/oss/components/Playground/Components/Modals/DeployVariantModal/DeployVariantModalWrapper"), + import( + "@/oss/components/Playground/Components/Modals/DeployVariantModal/DeployVariantModalWrapper" + ), {ssr: false}, ) const DeleteVariantModalWrapper = dynamic( () => - import("@/oss/components/Playground/Components/Modals/DeleteVariantModal/DeleteVariantModalWrapper"), + import( + "@/oss/components/Playground/Components/Modals/DeleteVariantModal/DeleteVariantModalWrapper" + ), {ssr: false}, ) diff --git a/web/oss/src/components/CellRenderers/CellContentPopover.tsx b/web/oss/src/components/CellRenderers/CellContentPopover.tsx new file mode 100644 index 0000000000..052c98a551 --- /dev/null +++ b/web/oss/src/components/CellRenderers/CellContentPopover.tsx @@ -0,0 +1,101 @@ +import {memo, useCallback, type ReactNode} from "react" + +import {Copy} from "@phosphor-icons/react" +import {Button, Popover} from "antd" + +import {message} from "@/oss/components/AppMessageContext" + +interface PopoverContentProps { + children: ReactNode + onCopy?: () => void +} + +/** + * Popover content wrapper with copy button + */ +const PopoverContentWrapper = memo(({children, onCopy}: PopoverContentProps) => { + return ( +
e.stopPropagation()}> + {onCopy && ( +
+ +
+ )} +
{children}
+
+ ) +}) +PopoverContentWrapper.displayName = "PopoverContentWrapper" + +interface CellContentPopoverProps { + /** The cell content to wrap */ + children: ReactNode + /** Full content to show in popover */ + fullContent: ReactNode + /** Raw text for copy functionality */ + copyText?: string + /** Disable popover */ + disabled?: boolean + /** Max width of popover */ + maxWidth?: number + /** Show copy button */ + showCopy?: boolean +} + +/** + * Wraps table cell content with a hover popover that shows the full content. + * Used to preview truncated cell content without opening the focus drawer. + * + * Features: + * - Hover trigger with delay to prevent accidental opens + * - Copy button for easy content copying + * - Destroy on hidden for performance + */ +const CellContentPopover = memo( + ({ + children, + fullContent, + copyText, + disabled, + maxWidth = 500, + showCopy = true, + }: CellContentPopoverProps) => { + const handleCopy = useCallback(() => { + if (copyText) { + navigator.clipboard.writeText(copyText) + message.success("Copied to clipboard") + } + }, [copyText]) + + if (disabled) { + return <>{children} + } + + return ( + + {fullContent} + + } + > + {children} + + ) + }, +) +CellContentPopover.displayName = "CellContentPopover" + +export default CellContentPopover diff --git a/web/oss/src/components/CellRenderers/ChatMessagesCellContent.tsx b/web/oss/src/components/CellRenderers/ChatMessagesCellContent.tsx new file mode 100644 index 0000000000..f07da8e8f1 --- /dev/null +++ b/web/oss/src/components/CellRenderers/ChatMessagesCellContent.tsx @@ -0,0 +1,219 @@ +import {memo, useMemo} from "react" + +import {ROLE_COLORS} from "./constants" +import {extractChatMessages, normalizeChatMessages, truncateContent, tryParseJson} from "./utils" + +interface ChatMessagesCellContentProps { + /** Value that may contain chat messages */ + value: unknown + /** Unique key prefix for React keys */ + keyPrefix: string + /** Max lines per message when truncated */ + maxLines?: number + /** Max total lines for the entire cell (used to calculate how many messages fit) */ + maxTotalLines?: number + /** Whether to truncate content (default: true for cell, false for popover) */ + truncate?: boolean + /** Show dividers between messages */ + showDividers?: boolean +} + +/** + * Format tool calls for display + */ +const formatToolCalls = (toolCalls: unknown[]): string => { + return toolCalls + .map((tc: any) => { + const name = tc?.function?.name || tc?.name || "tool" + const args = tc?.function?.arguments || tc?.arguments || "" + return `${name}(${typeof args === "string" ? args : JSON.stringify(args)})` + }) + .join("\n") +} + +/** + * Get content as string for display - uses compact JSON to minimize lines + */ +const getContentString = (content: unknown): string => { + if (content === null || content === undefined) return "" + if (typeof content === "string") return content + if (Array.isArray(content)) { + // Handle OpenAI content array format + const textPart = content.find((c: any) => c?.type === "text") + if (textPart?.text) return String(textPart.text) + } + // Use compact JSON (no pretty printing) to minimize rendered lines + try { + return JSON.stringify(content) + } catch { + return String(content) + } +} + +interface SingleMessageProps { + message: {role: string; content: unknown; tool_calls?: unknown[]} + keyPrefix: string + index: number + truncate: boolean + maxLines: number + showDivider: boolean +} + +// Chars per line estimate - use generous value to fill wider columns +// Actual truncation is controlled by maxLines, this just sets max chars +const CHARS_PER_LINE = 80 + +/** + * Renders a single chat message + */ +const SingleMessage = memo( + ({message, keyPrefix, index, truncate, maxLines, showDivider}: SingleMessageProps) => { + const contentString = useMemo(() => getContentString(message.content), [message.content]) + // Calculate max chars based on maxLines to prevent overflow + const maxChars = maxLines * CHARS_PER_LINE + const displayContent = useMemo( + () => (truncate ? truncateContent(contentString, maxLines, maxChars) : contentString), + [contentString, truncate, maxLines, maxChars], + ) + const roleColor = ROLE_COLORS[message.role.toLowerCase()] ?? "#6b7280" + + return ( +
+ + {message.role} + + {displayContent && ( + {displayContent} + )} + {message.tool_calls && message.tool_calls.length > 0 && ( +
+ Tool Calls: + + {formatToolCalls(message.tool_calls)} + +
+ )} + {showDivider && ( +
+ )} +
+ ) + }, +) +SingleMessage.displayName = "SingleMessage" + +/** + * Renders chat messages (OpenAI format) as lightweight plain text blocks. + * Uses plain HTML elements instead of heavy editor components for performance. + * + * Features: + * - Auto-detects chat message arrays in various formats + * - Role-based color coding + * - Tool calls display + * - Truncation for cell preview + * + * Returns null if value doesn't contain chat messages. + */ +/** + * Select messages that fit within maxTotalLines budget + * Each message takes: 1 line for role + content lines (capped by maxLinesPerMessage) + */ +const selectMessagesToFit = ( + messages: unknown[], + maxTotalLines: number, + maxLinesPerMessage: number, +): {selected: unknown[]; totalCount: number} => { + const totalCount = messages.length + if (!maxTotalLines) { + return {selected: messages, totalCount} + } + + const selected: unknown[] = [] + let usedLines = 0 + const ROLE_LINE = 1 + + for (const msg of messages) { + // Each message will use at most: 1 role line + maxLinesPerMessage content lines + // Content is truncated to maxLinesPerMessage * CHARS_PER_LINE chars + const msgLines = ROLE_LINE + maxLinesPerMessage + + if (usedLines + msgLines > maxTotalLines) { + break + } + + selected.push(msg) + usedLines += msgLines + } + + // Always show at least one message + if (selected.length === 0 && messages.length > 0) { + selected.push(messages[0]) + } + + return {selected, totalCount} +} + +const ChatMessagesCellContent = memo( + ({ + value, + keyPrefix, + maxLines = 4, + maxTotalLines, + truncate = true, + showDividers = true, + }: ChatMessagesCellContentProps) => { + // Memoize message extraction and smart selection together + const {displayMessages, totalCount} = useMemo(() => { + // Parse JSON string if needed, otherwise use value directly + const parsed = typeof value === "string" ? tryParseJson(value).parsed : value + const extracted = extractChatMessages(parsed) + if (!extracted) return {displayMessages: [], totalCount: 0} + + // Smart selection: pick messages that fit within line budget + const {selected, totalCount: total} = selectMessagesToFit( + extracted, + maxTotalLines ?? 0, + maxLines, + ) + + // Only normalize the selected messages + const normalized = normalizeChatMessages(selected) + + return {displayMessages: normalized, totalCount: total} + }, [value, maxTotalLines, maxLines]) + + if (displayMessages.length === 0) { + return null + } + + const hasMore = maxTotalLines && totalCount > displayMessages.length + + return ( +
+ {displayMessages.map((msg, i) => ( + + ))} + {hasMore && ( + + +{totalCount - displayMessages.length} more message + {totalCount - displayMessages.length > 1 ? "s" : ""} + + )} +
+ ) + }, +) +ChatMessagesCellContent.displayName = "ChatMessagesCellContent" + +export default ChatMessagesCellContent diff --git a/web/oss/src/components/CellRenderers/JsonCellContent.tsx b/web/oss/src/components/CellRenderers/JsonCellContent.tsx new file mode 100644 index 0000000000..0ed7a7713c --- /dev/null +++ b/web/oss/src/components/CellRenderers/JsonCellContent.tsx @@ -0,0 +1,46 @@ +import {memo, useMemo} from "react" + +import {JSON_HIGHLIGHT_COLOR} from "./constants" +import {safeJsonStringify, truncateContent} from "./utils" + +interface JsonCellContentProps { + /** JSON value to render */ + value: unknown + /** Max lines to show when truncated */ + maxLines?: number + /** Max characters to show when truncated */ + maxChars?: number + /** Whether to truncate content (default: true for cell, false for popover) */ + truncate?: boolean +} + +/** + * Renders JSON content as plain formatted text with syntax highlighting color. + * Uses plain
 tags instead of heavy editor components for performance.
+ *
+ * Optimizations:
+ * - No Lexical/CodeMirror editor overhead
+ * - Truncation for cell preview
+ * - Memoized JSON stringification
+ */
+const JsonCellContent = memo(
+    ({value, maxLines = 10, maxChars = 500, truncate = true}: JsonCellContentProps) => {
+        const jsonString = useMemo(() => {
+            const full = safeJsonStringify(value)
+            if (!truncate) return full
+            return truncateContent(full, maxLines, maxChars)
+        }, [value, truncate, maxLines, maxChars])
+
+        return (
+            
+                {jsonString}
+            
+ ) + }, +) +JsonCellContent.displayName = "JsonCellContent" + +export default JsonCellContent diff --git a/web/oss/src/components/CellRenderers/SmartCellContent.tsx b/web/oss/src/components/CellRenderers/SmartCellContent.tsx new file mode 100644 index 0000000000..df50c58a30 --- /dev/null +++ b/web/oss/src/components/CellRenderers/SmartCellContent.tsx @@ -0,0 +1,149 @@ +import {memo, useMemo} from "react" + +import {Typography} from "antd" + +import CellContentPopover from "./CellContentPopover" +import ChatMessagesCellContent from "./ChatMessagesCellContent" +import JsonCellContent from "./JsonCellContent" +import TextCellContent from "./TextCellContent" +import {extractChatMessages, normalizeValue, safeJsonStringify, tryParseJson} from "./utils" + +const {Text} = Typography + +interface SmartCellContentProps { + /** Value to render - auto-detects type */ + value: unknown + /** Unique key prefix for React keys (used for chat messages) */ + keyPrefix?: string + /** Max lines to show in cell preview */ + maxLines?: number + /** CSS class for the container */ + className?: string + /** Whether to show popover on hover */ + showPopover?: boolean +} + +/** + * Smart cell content renderer that auto-detects value type and renders appropriately. + * + * Detection order: + * 1. Empty/null → placeholder + * 2. Chat messages array → ChatMessagesCellContent + * 3. JSON object/array → JsonCellContent + * 4. Plain text → TextCellContent + * + * Features: + * - Auto-detection of content type + * - Truncation for cell preview + * - Full content in popover on hover + * - Copy functionality in popover + */ +const SmartCellContent = memo( + ({ + value, + keyPrefix = "cell", + maxLines = 10, + className = "", + showPopover = true, + }: SmartCellContentProps) => { + // Parse JSON if needed + const {parsed: jsonValue, isJson} = useMemo(() => tryParseJson(value), [value]) + + // Check for chat messages + const chatMessages = useMemo(() => extractChatMessages(jsonValue), [jsonValue]) + const isChatMessages = chatMessages !== null && chatMessages.length > 0 + + // Get display value for plain text + const displayValue = useMemo(() => normalizeValue(value), [value]) + + // Get copy text for popover + const copyText = useMemo(() => { + if (value === undefined || value === null) return undefined + if (isChatMessages || isJson) return safeJsonStringify(jsonValue) + return displayValue + }, [value, isChatMessages, isJson, jsonValue, displayValue]) + + // Handle empty values + if (value === undefined || value === null || value === "") { + return ( +
+ + — + +
+ ) + } + + // Render chat messages + if (isChatMessages) { + const cellContent = ( +
+ +
+ ) + + if (!showPopover) return cellContent + + return ( + + } + copyText={copyText} + > + {cellContent} + + ) + } + + // Render JSON + if (isJson) { + const cellContent = ( +
+ +
+ ) + + if (!showPopover) return cellContent + + return ( + } + copyText={copyText} + > + {cellContent} + + ) + } + + // Render plain text + const cellContent = ( +
+ +
+ ) + + if (!showPopover) return cellContent + + return ( + } + copyText={copyText} + > + {cellContent} + + ) + }, +) +SmartCellContent.displayName = "SmartCellContent" + +export default SmartCellContent diff --git a/web/oss/src/components/CellRenderers/TextCellContent.tsx b/web/oss/src/components/CellRenderers/TextCellContent.tsx new file mode 100644 index 0000000000..e5513ed8b4 --- /dev/null +++ b/web/oss/src/components/CellRenderers/TextCellContent.tsx @@ -0,0 +1,43 @@ +import {memo, useMemo} from "react" + +import {Typography} from "antd" + +import {truncateContent} from "./utils" + +const {Text} = Typography + +interface TextCellContentProps { + /** Text value to render */ + value: string + /** Max lines to show when truncated */ + maxLines?: number + /** Max characters to show when truncated */ + maxChars?: number + /** Whether to truncate content (default: true for cell, false for popover) */ + truncate?: boolean + /** Additional CSS class */ + className?: string +} + +/** + * Renders plain text content with truncation support. + * + * Optimizations: + * - Truncation for cell preview + * - Memoized truncation + */ +const TextCellContent = memo( + ({value, maxLines = 10, maxChars = 500, truncate = true, className}: TextCellContentProps) => { + const displayValue = useMemo(() => { + if (!truncate) return value + return truncateContent(value, maxLines, maxChars) + }, [value, truncate, maxLines, maxChars]) + + return ( + {displayValue} + ) + }, +) +TextCellContent.displayName = "TextCellContent" + +export default TextCellContent diff --git a/web/oss/src/components/CellRenderers/constants.ts b/web/oss/src/components/CellRenderers/constants.ts new file mode 100644 index 0000000000..869d7bcc8b --- /dev/null +++ b/web/oss/src/components/CellRenderers/constants.ts @@ -0,0 +1,22 @@ +/** + * Shared constants for cell content rendering + */ + +// Default max lines for cell preview based on row height +// Small (80px): ~4 lines, Medium (160px): ~10 lines, Large (280px): ~18 lines +export const DEFAULT_MAX_LINES = 10 + +// Max characters for cell preview - prevents rendering huge text blocks +export const MAX_CELL_CHARS = 500 + +// JSON syntax highlighting color +export const JSON_HIGHLIGHT_COLOR = "#9d4edd" + +// Chat message role colors +export const ROLE_COLORS: Record = { + user: "#3b82f6", + assistant: "#10b981", + system: "#f59e0b", + function: "#8b5cf6", + tool: "#ec4899", +} diff --git a/web/oss/src/components/CellRenderers/index.ts b/web/oss/src/components/CellRenderers/index.ts new file mode 100644 index 0000000000..91369b7717 --- /dev/null +++ b/web/oss/src/components/CellRenderers/index.ts @@ -0,0 +1,21 @@ +/** + * Shared cell content renderers for table cells + * + * These components provide optimized, consistent rendering for testcase/scenario data + * across different tables (TestcasesTable, EvalRunDetails scenarios table, etc.) + * + * Key optimizations: + * - Plain text rendering instead of heavy editor components + * - Truncation for cell preview (lines + characters) + * - Full content in popover on hover + * - Memoization for performance + */ + +export {default as CellContentPopover} from "./CellContentPopover" +export {default as JsonCellContent} from "./JsonCellContent" +export {default as TextCellContent} from "./TextCellContent" +export {default as ChatMessagesCellContent} from "./ChatMessagesCellContent" +export {default as SmartCellContent} from "./SmartCellContent" + +export * from "./utils" +export * from "./constants" diff --git a/web/oss/src/components/CellRenderers/utils.ts b/web/oss/src/components/CellRenderers/utils.ts new file mode 100644 index 0000000000..b98333395c --- /dev/null +++ b/web/oss/src/components/CellRenderers/utils.ts @@ -0,0 +1,228 @@ +/** + * Shared utility functions for cell content rendering + */ + +import {DEFAULT_MAX_LINES, MAX_CELL_CHARS} from "./constants" + +/** + * Truncate string to first N lines for cell preview + */ +export const truncateToLines = (str: string, maxLines: number = DEFAULT_MAX_LINES): string => { + const lines = str.split("\n") + if (lines.length <= maxLines) return str + return lines.slice(0, maxLines).join("\n") + "\n..." +} + +/** + * Truncate string to max characters for cell preview + * This is critical for performance - prevents rendering huge text blocks + */ +export const truncateToChars = (str: string, maxChars: number = MAX_CELL_CHARS): string => { + if (str.length <= maxChars) return str + return str.slice(0, maxChars) + "..." +} + +/** + * Apply both line and character truncation + */ +export const truncateContent = ( + str: string, + maxLines: number = DEFAULT_MAX_LINES, + maxChars: number = MAX_CELL_CHARS, +): string => { + const linesTruncated = truncateToLines(str, maxLines) + return truncateToChars(linesTruncated, maxChars) +} + +/** + * Safely stringify a value to JSON + */ +export const safeJsonStringify = (value: unknown): string => { + try { + return JSON.stringify(value, null, 2) + } catch { + return String(value) + } +} + +/** + * Try to parse a JSON string, returns the parsed value and whether it's JSON + */ +export const tryParseJson = (value: unknown): {parsed: unknown; isJson: boolean} => { + if (value === null || value === undefined) { + return {parsed: value, isJson: false} + } + // Already an object/array + if (typeof value === "object") { + return {parsed: value, isJson: true} + } + // Try to parse string as JSON + if (typeof value === "string") { + const trimmed = value.trim() + if ( + (trimmed.startsWith("{") && trimmed.endsWith("}")) || + (trimmed.startsWith("[") && trimmed.endsWith("]")) + ) { + try { + const parsed = JSON.parse(trimmed) + return {parsed, isJson: true} + } catch { + return {parsed: value, isJson: false} + } + } + } + return {parsed: value, isJson: false} +} + +/** + * Normalize value to display string + */ +export const normalizeValue = (value: unknown): string => { + if (value === null || value === undefined) return "—" + if (typeof value === "string") return value + if (typeof value === "number" || typeof value === "boolean") { + return String(value) + } + return safeJsonStringify(value) +} + +/** + * Check if a single entry looks like a chat message + */ +const isChatEntry = (entry: unknown): boolean => { + if (!entry || typeof entry !== "object") return false + const obj = entry as Record + + const hasRole = + typeof obj.role === "string" || + typeof obj.sender === "string" || + typeof obj.author === "string" + + if (!hasRole) return false + + // Check for content in various formats + return ( + obj.content !== undefined || + obj.text !== undefined || + obj.message !== undefined || + Array.isArray(obj.content) || + Array.isArray(obj.parts) || + Array.isArray(obj.tool_calls) || + typeof (obj.delta as Record)?.content === "string" + ) +} + +/** + * Check if a value looks like chat messages (array with role/content structure) + */ +export const isChatMessagesArray = (value: unknown): boolean => { + if (!Array.isArray(value)) return false + if (value.length === 0) return false + + // Check if at least one item looks like a chat message + return value.some(isChatEntry) +} + +/** + * Extract chat messages array from various formats + */ +const CHAT_ARRAY_KEYS = [ + "messages", + "message_history", + "history", + "chat", + "conversation", + "logs", + "responses", + "output_messages", +] + +export const extractChatMessages = (value: unknown): unknown[] | null => { + if (!value) return null + + // Direct array - check if it looks like chat messages + if (Array.isArray(value)) { + // Return array if it has chat-like entries + if (isChatMessagesArray(value)) { + return value + } + return null + } + + if (typeof value !== "object") return null + + // Object with known chat array keys - less strict, just check if array exists + for (const key of CHAT_ARRAY_KEYS) { + const arr = (value as Record)[key] + if (Array.isArray(arr)) { + return arr + } + } + + // OpenAI choices format + const choices = (value as Record).choices + if (Array.isArray(choices)) { + const messages = choices + .map((choice: any) => choice?.message || choice?.delta) + .filter(Boolean) + if (messages.length) { + return messages + } + } + + // Single message object - check if it looks like a chat entry + if (isChatMessagesArray([value])) { + return [value] + } + + return null +} + +/** + * Normalize chat messages to consistent format + */ +export interface NormalizedChatMessage { + role: string + content: unknown + tool_calls?: unknown[] +} + +export const normalizeChatMessages = (messages: unknown[]): NormalizedChatMessage[] => { + const result: NormalizedChatMessage[] = [] + + for (const entry of messages) { + if (!entry) continue + + if (typeof entry === "string") { + result.push({role: "assistant", content: entry}) + continue + } + + if (typeof entry !== "object") continue + + const obj = entry as Record + const role = + (typeof obj.role === "string" && obj.role) || + (typeof obj.sender === "string" && obj.sender) || + (typeof obj.author === "string" && obj.author) || + "assistant" + + const content = + obj.content ?? + obj.text ?? + obj.message ?? + (obj.delta as Record)?.content ?? + obj.response ?? + (Array.isArray(obj.parts) ? obj.parts : undefined) + + const toolCalls = Array.isArray(obj.tool_calls) ? obj.tool_calls : undefined + + if (content === undefined && !toolCalls) { + continue + } + + result.push({role, content, tool_calls: toolCalls}) + } + + return result +} diff --git a/web/oss/src/components/CustomUIs/CustomTreeComponent/assets/styles.ts b/web/oss/src/components/CustomUIs/CustomTreeComponent/assets/styles.ts index 7e156bb88b..3090759158 100644 --- a/web/oss/src/components/CustomUIs/CustomTreeComponent/assets/styles.ts +++ b/web/oss/src/components/CustomUIs/CustomTreeComponent/assets/styles.ts @@ -9,12 +9,13 @@ export const useStyles = createUseStyles((theme: JSSTheme) => ({ position: "absolute", left: 6, top: 0, - bottom: -6, + bottom: -12, width: 1, backgroundColor: theme.colorBorder, }, "&.last::before": { height: "50%", + bottom: "auto", }, }, nodeLabel: { @@ -22,8 +23,8 @@ export const useStyles = createUseStyles((theme: JSSTheme) => ({ cursor: "default", display: "flex", alignItems: "center", - marginTop: 4, - marginBottom: 4, + marginTop: 12, + marginBottom: 12, "&::before": { content: "''", position: "absolute", diff --git a/web/oss/src/components/CustomUIs/CustomTreeComponent/index.tsx b/web/oss/src/components/CustomUIs/CustomTreeComponent/index.tsx index 6fd0ec11cf..a7b78b0e1b 100644 --- a/web/oss/src/components/CustomUIs/CustomTreeComponent/index.tsx +++ b/web/oss/src/components/CustomUIs/CustomTreeComponent/index.tsx @@ -1,67 +1,96 @@ -import React, {useState} from "react" +import React, {useMemo, useState} from "react" import {MinusSquareOutlined, PlusSquareOutlined} from "@ant-design/icons" -import {TreeContent} from "@/oss/components/SharedDrawers/TraceDrawer/components/TraceTree" -import {TraceSpanNode} from "@/oss/services/tracing/types" - import {useStyles} from "./assets/styles" /** * CustomTree is a recursive tree view component for rendering a hierarchy of nodes. * * This component is highly customizable and highlights the selected node. - * It supports displaying additional metrics like latency, cost, and token usage. + * It supports custom node rendering and optional default expansion. * * Example usage: * ```tsx * node.id} + * getChildren={(node) => node.children} + * renderLabel={(node) => node.title} * selectedKey={selectedNodeId} - * onSelect={(key) => setSelectedNodeId(key)} + * onSelect={(key, node) => setSelectedNodeId(key)} * /> * ``` */ -interface TreeProps { +interface TreeProps { /** * Root node of the hierarchical data structure. */ - data: TraceSpanNode + data: TNode + + /** + * Returns a stable key for a node. + */ + getKey: (node: TNode) => string /** - * Settings for what additional metrics to show in each node. + * Returns child nodes for a node. */ - settings: { - latency: boolean - cost: boolean - tokens: boolean - } + getChildren: (node: TNode) => TNode[] | undefined + + /** + * Render the label content for a node. + */ + renderLabel: (node: TNode) => React.ReactNode /** * The currently selected node key (ID). */ - selectedKey: string | null + selectedKey?: string | null /** * Function to handle when a node is selected. */ - onSelect: (key: string) => void + onSelect?: (key: string, node: TNode) => void + + /** + * Default expansion state for nodes without explicit `expanded` metadata. + */ + defaultExpanded?: boolean } -const TreeNodeComponent: React.FC<{ - node: TraceSpanNode +const TreeNodeComponent = ({ + node, + isLast, + getKey, + getChildren, + renderLabel, + selectedKey, + onSelect, + defaultExpanded = true, + isRoot = false, +}: { + node: TNode isLast: boolean - settings: {latency: boolean; cost: boolean; tokens: boolean} - selectedKey: string | null - onSelect: (key: string) => void + getKey: (node: TNode) => string + getChildren: (node: TNode) => TNode[] | undefined + renderLabel: (node: TNode) => React.ReactNode + selectedKey?: string | null + onSelect?: (key: string, node: TNode) => void + defaultExpanded?: boolean isRoot?: boolean -}> = ({node, isLast, settings, selectedKey, onSelect, isRoot = false}) => { +}) => { const classes = useStyles() - const [expanded, setExpanded] = useState( - typeof (node as any).expanded === "boolean" ? (node as any).expanded : true, - ) - const hasChildren = node.children && node.children.length > 0 + const initialExpanded = useMemo(() => { + if (typeof (node as {expanded?: boolean}).expanded === "boolean") { + return (node as {expanded?: boolean}).expanded as boolean + } + return defaultExpanded + }, [defaultExpanded, node]) + const [expanded, setExpanded] = useState(initialExpanded) + const children = getChildren(node) ?? [] + const hasChildren = children.length > 0 + const nodeKey = getKey(node) const toggle = () => setExpanded((prev) => !prev) @@ -81,7 +110,7 @@ const TreeNodeComponent: React.FC<{ ? `${classes.nodeLabel} ${shouldShowAsLast ? "last" : ""}` : "flex items-center" } - onClick={() => onSelect(node.span_id)} + onClick={() => onSelect?.(nodeKey, node)} > {hasChildren && ( - + {renderLabel(node)} {hasChildren && expanded && (
- {node.children!.map((child, index) => ( + {children.map((child, index) => ( ))} @@ -125,15 +157,26 @@ const TreeNodeComponent: React.FC<{ ) } -const CustomTree: React.FC = ({data, settings, selectedKey, onSelect}) => { +const CustomTree = ({ + data, + getKey, + getChildren, + renderLabel, + selectedKey, + onSelect, + defaultExpanded, +}: TreeProps) => { return ( -
+
diff --git a/web/oss/src/components/DeploymentsDashboard/assets/VariantUseApiContent.tsx b/web/oss/src/components/DeploymentsDashboard/assets/VariantUseApiContent.tsx index 915b8a7396..b68f4fdf08 100644 --- a/web/oss/src/components/DeploymentsDashboard/assets/VariantUseApiContent.tsx +++ b/web/oss/src/components/DeploymentsDashboard/assets/VariantUseApiContent.tsx @@ -1,19 +1,23 @@ import {useCallback, useEffect, useMemo, useState} from "react" import {PythonOutlined} from "@ant-design/icons" -import {FileCodeIcon, FileTsIcon} from "@phosphor-icons/react" -import {Tabs, Typography} from "antd" +import {FileCode, FileTs} from "@phosphor-icons/react" +import {Spin, Tabs, Typography} from "antd" import {useAtomValue} from "jotai" import dynamic from "next/dynamic" import {buildCurlSnippet} from "@/oss/code_snippets/endpoints/fetch_variant/curl" import {buildPythonSnippet} from "@/oss/code_snippets/endpoints/fetch_variant/python" import {buildTypescriptSnippet} from "@/oss/code_snippets/endpoints/fetch_variant/typescript" -import CopyButton from "@/oss/components/CopyButton/CopyButton" -import CodeBlock from "@/oss/components/DynamicCodeBlock/CodeBlock" +import invokeLlmAppcURLCode from "@/oss/code_snippets/endpoints/invoke_llm_app/curl" +import invokeLlmApppythonCode from "@/oss/code_snippets/endpoints/invoke_llm_app/python" +import invokeLlmApptsCode from "@/oss/code_snippets/endpoints/invoke_llm_app/typescript" +import LanguageCodeBlock from "@/oss/components/pages/overview/deployments/DeploymentDrawer/assets/LanguageCodeBlock" import SelectVariant from "@/oss/components/Playground/Components/Menus/SelectVariant" import VariantDetailsWithStatus from "@/oss/components/VariantDetailsWithStatus" -import {currentAppAtom} from "@/oss/state/app" +import {useAppId} from "@/oss/hooks/useAppId" +import {currentAppAtom, useURI} from "@/oss/state/app" +import {stablePromptVariablesAtomFamily} from "@/oss/state/newPlayground/core/prompts" import {revisionsByVariantIdAtomFamily, variantsAtom} from "@/oss/state/variant/atoms/fetcher" import { latestRevisionInfoByVariantIdAtomFamily, @@ -29,13 +33,8 @@ interface VariantUseApiContentProps { initialRevisionId?: string } -interface CodeSnippets { - python: string - typescript: string - bash: string -} - const VariantUseApiContent = ({initialRevisionId}: VariantUseApiContentProps) => { + const appId = useAppId() const variants = useAtomValue(variantsAtom) const revisionList = useAtomValue(revisionListAtom) const currentApp = useAtomValue(currentAppAtom) @@ -45,6 +44,15 @@ const VariantUseApiContent = ({initialRevisionId}: VariantUseApiContentProps) => const [selectedLang, setSelectedLang] = useState("python") const [apiKeyValue, setApiKeyValue] = useState("") + // Get URI for the selected variant + const {data: uri, isLoading: isUriQueryLoading} = useURI(appId, selectedVariantId) + const isLoading = Boolean(selectedVariantId) && isUriQueryLoading + + // Get variable names for the selected revision + const variableNames = useAtomValue( + stablePromptVariablesAtomFamily(selectedRevisionId || ""), + ) as string[] + const initialRevision = useMemo( () => revisionList.find((rev) => rev.id === initialRevisionId), [initialRevisionId, revisionList], @@ -120,13 +128,52 @@ const VariantUseApiContent = ({initialRevisionId}: VariantUseApiContentProps) => const variantSlug = (selectedVariant as any)?.variantSlug || selectedVariant?.variantName || - selectedRevision?.variantName || + (selectedRevision as any)?.variantName || "my-variant-slug" const variantVersion = selectedRevision?.revision ?? latestRevision?.revision ?? 1 const appSlug = (currentApp as any)?.app_slug || currentApp?.app_name || "my-app-slug" const apiKey = apiKeyValue || "YOUR_API_KEY" - const codeSnippets: CodeSnippets = useMemo( + const invokeLlmUrl = uri ?? "" + + // Build params for invoke LLM (with variant refs instead of environment) + const params = useMemo(() => { + const synthesized = variableNames.map((name) => ({name, input: name === "messages"})) + + const mainParams: Record = {} + const secondaryParams: Record = {} + + synthesized.forEach((item) => { + if (item.input) { + mainParams[item.name] = "add_a_value" + } else { + secondaryParams[item.name] = "add_a_value" + } + }) + + const hasMessagesParam = synthesized.some((p) => p?.name === "messages") + const isChat = currentApp?.app_type === "chat" || hasMessagesParam + if (isChat) { + mainParams["messages"] = [ + { + role: "user", + content: "", + }, + ] + mainParams["inputs"] = secondaryParams + } else if (Object.keys(secondaryParams).length > 0) { + mainParams["inputs"] = secondaryParams + } + + // Use variant refs instead of environment + mainParams["app"] = appSlug + mainParams["variant_slug"] = variantSlug + mainParams["variant_version"] = variantVersion + + return JSON.stringify(mainParams, null, 2) + }, [variableNames, currentApp?.app_type, appSlug, variantSlug, variantVersion]) + + const fetchConfigCodeSnippet = useMemo( () => ({ python: buildPythonSnippet(appSlug, variantSlug, variantVersion), typescript: buildTypescriptSnippet(appSlug, variantSlug, variantVersion, apiKey), @@ -135,48 +182,48 @@ const VariantUseApiContent = ({initialRevisionId}: VariantUseApiContentProps) => [apiKey, appSlug, variantSlug, variantVersion], ) - const renderTabChildren = useCallback(() => { - const activeSnippet = codeSnippets[selectedLang as keyof CodeSnippets] + const invokeLlmAppCodeSnippet = useMemo( + () => ({ + python: invokeLlmApppythonCode(invokeLlmUrl, params, apiKeyValue || "x.xxxxxxxx"), + bash: invokeLlmAppcURLCode(invokeLlmUrl, params, apiKeyValue || "x.xxxxxxxx"), + typescript: invokeLlmApptsCode(invokeLlmUrl, params, apiKeyValue || "x.xxxxxxxx"), + }), + [apiKeyValue, invokeLlmUrl, params], + ) + const renderTabChildren = useCallback(() => { return ( -
-
- Use API - -
- -
+ + {}} + invokeLlmUrl={invokeLlmUrl} + /> + ) - }, [ - apiKeyValue, - codeSnippets, - revisionList, - selectedLang, - selectedRevision?.id, - selectedRevision?.isLatestRevision, - selectedRevision?.revision, - selectedRevisionId, - ]) + }, [fetchConfigCodeSnippet, invokeLlmAppCodeSnippet, invokeLlmUrl, isLoading, selectedLang]) const tabItems = useMemo( () => [ { key: "python", label: "Python", - icon: , children: renderTabChildren(), + icon: , }, { key: "typescript", label: "TypeScript", - icon: , children: renderTabChildren(), + icon: , }, { key: "bash", label: "cURL", - icon: , children: renderTabChildren(), + icon: , }, ], [renderTabChildren], @@ -221,10 +268,10 @@ const VariantUseApiContent = ({initialRevisionId}: VariantUseApiContentProps) =>
) diff --git a/web/oss/src/components/DeploymentsDashboard/components/Drawer/index.tsx b/web/oss/src/components/DeploymentsDashboard/components/Drawer/index.tsx index 3e83ed37b6..d688a165d6 100644 --- a/web/oss/src/components/DeploymentsDashboard/components/Drawer/index.tsx +++ b/web/oss/src/components/DeploymentsDashboard/components/Drawer/index.tsx @@ -41,10 +41,11 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({ }, })) -interface DeploymentsDrawerTitleProps extends Pick< - DeploymentsDrawerProps, - "onClose" | "expandable" | "initialWidth" | "selectedRevisionId" -> { +interface DeploymentsDrawerTitleProps + extends Pick< + DeploymentsDrawerProps, + "onClose" | "expandable" | "initialWidth" | "selectedRevisionId" + > { drawerWidth: number setDrawerWidth: (width: number) => void envName?: string diff --git a/web/oss/src/components/DeploymentsDashboard/components/Modal/SelectDeployVariantModal.tsx b/web/oss/src/components/DeploymentsDashboard/components/Modal/SelectDeployVariantModal.tsx index 3193e46c6b..807b5d2749 100644 --- a/web/oss/src/components/DeploymentsDashboard/components/Modal/SelectDeployVariantModal.tsx +++ b/web/oss/src/components/DeploymentsDashboard/components/Modal/SelectDeployVariantModal.tsx @@ -26,7 +26,7 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({ textTransform: "capitalize", }, container: { - "& .ant-modal-body": { + "& .ant-modal-container": { height: 600, overflow: "auto", }, @@ -86,6 +86,7 @@ const SelectDeployVariantModal = ({ }} okText="Deploy" width={1200} + height={600} className={classes.container} {...props} > diff --git a/web/oss/src/components/DrillInView/DrillInBreadcrumb.tsx b/web/oss/src/components/DrillInView/DrillInBreadcrumb.tsx index 55b5a4989a..3daf9a427b 100644 --- a/web/oss/src/components/DrillInView/DrillInBreadcrumb.tsx +++ b/web/oss/src/components/DrillInView/DrillInBreadcrumb.tsx @@ -1,7 +1,7 @@ -import {memo, type ReactNode} from "react" +import {memo, type ReactNode, useMemo} from "react" -import {ArrowLeft, CaretRight} from "@phosphor-icons/react" -import {Button} from "antd" +import {ArrowLeft, CaretRight, DotsThree} from "@phosphor-icons/react" +import {Button, Dropdown} from "antd" export interface DrillInBreadcrumbProps { /** Current navigation path */ @@ -18,11 +18,33 @@ export interface DrillInBreadcrumbProps { prefix?: ReactNode /** Whether to show the back arrow button (default: true) */ showBackArrow?: boolean + /** Maximum visible segments before truncation (default: 4) */ + maxVisibleSegments?: number +} + +/** + * Format a path segment for display + * Converts numeric indices to human-friendly names based on parent key + * e.g., "0" after "messages" becomes "Message 1" + */ +function formatSegment(segment: string, parentSegment?: string): string { + // Check if this is a numeric index + const numericIndex = parseInt(segment, 10) + if (!isNaN(numericIndex) && String(numericIndex) === segment) { + // Get singular name from parent key + const parentKey = parentSegment || "" + const singularName = parentKey.endsWith("s") ? parentKey.slice(0, -1) : parentKey || "Item" + // Capitalize first letter + const displayName = singularName.charAt(0).toUpperCase() + singularName.slice(1) + return `${displayName} ${numericIndex + 1}` + } + return segment } /** * Reusable breadcrumb navigation component for drill-in views * Used by TestcaseEditDrawer and TraceDataDrillIn + * Supports smart truncation for long paths: root > ... > prevKey > currentKey */ const DrillInBreadcrumb = memo( ({ @@ -33,42 +55,116 @@ const DrillInBreadcrumb = memo( renderRoot, prefix, showBackArrow = true, + maxVisibleSegments = 3, }: DrillInBreadcrumbProps) => { + // Calculate which segments to show and which to hide + const {visibleSegments, hiddenSegments, showEllipsis} = useMemo(() => { + if (currentPath.length <= maxVisibleSegments) { + return { + visibleSegments: currentPath.map((seg, i) => ({ + segment: seg, + originalIndex: i, + })), + hiddenSegments: [], + showEllipsis: false, + } + } + + // Show only the last (maxVisibleSegments) segments, hide the rest + const lastSegments = currentPath.slice(-maxVisibleSegments).map((seg, i) => ({ + segment: seg, + originalIndex: currentPath.length - maxVisibleSegments + i, + })) + const hidden = currentPath.slice(0, -maxVisibleSegments).map((seg, i) => ({ + segment: seg, + originalIndex: i, + })) + + return { + visibleSegments: lastSegments, + hiddenSegments: hidden, + showEllipsis: true, + } + }, [currentPath, maxVisibleSegments]) + + // Build dropdown menu items for hidden segments + const dropdownItems = useMemo( + () => + hiddenSegments.map(({segment, originalIndex}) => { + const parentSegment = + originalIndex > 0 ? currentPath[originalIndex - 1] : undefined + const displaySegment = formatSegment(segment, parentSegment) + return { + key: String(originalIndex), + label: displaySegment, + onClick: () => onNavigateToIndex(originalIndex + 1), + } + }), + [hiddenSegments, currentPath, onNavigateToIndex], + ) + return ( -
- {prefix} - {showBackArrow && currentPath.length > 0 && ( - - )} - {currentPath.map((segment, index) => ( -
- +
+ {/* Fixed prefix (span navigation) - doesn't scroll */} + {prefix &&
{prefix}
} + + {/* Breadcrumb path */} +
+ {showBackArrow && currentPath.length > 0 && ( + -
- ))} + )} + + {/* Ellipsis dropdown for hidden segments - shown right after root */} + {showEllipsis && hiddenSegments.length > 0 && ( +
+ + + + +
+ )} + + {/* Visible segments (last N segments) */} + {visibleSegments.map(({segment, originalIndex}) => { + const parentSegment = + originalIndex > 0 ? currentPath[originalIndex - 1] : undefined + const displaySegment = formatSegment(segment, parentSegment) + return ( +
+ + +
+ ) + })} +
) }, diff --git a/web/oss/src/components/DrillInView/DrillInContent.tsx b/web/oss/src/components/DrillInView/DrillInContent.tsx new file mode 100644 index 0000000000..3fb57b5cd0 --- /dev/null +++ b/web/oss/src/components/DrillInView/DrillInContent.tsx @@ -0,0 +1,1079 @@ +import {type ReactNode, useCallback, useEffect, useMemo, useRef, useState} from "react" + +import {InputNumber, Select, Switch} from "antd" +import {useAtomValue} from "jotai" + +import {ChatMessageEditor, ChatMessageList} from "@/oss/components/ChatMessageEditor" +import {EditorProvider} from "@/oss/components/Editor/Editor" +import {DrillInProvider} from "@/oss/components/Editor/plugins/code/context/DrillInContext" +import {markdownViewAtom} from "@/oss/components/Editor/state/assets/atoms" +import SharedEditor from "@/oss/components/Playground/Components/SharedEditor" +import { + detectDataType, + getTextModeValue, + isChatMessageObject, + isMessagesArray, + parseMessages, + textModeToStorageValue, + type DataType, +} from "@/oss/components/TestcasesTableNew/components/TestcaseEditDrawer/fieldUtils" + +import DrillInBreadcrumb from "./DrillInBreadcrumb" +import {DrillInControls, type PropertyType} from "./DrillInControls" +import DrillInFieldHeader from "./DrillInFieldHeader" +import {EditorMarkdownToggleExposer} from "./EditorMarkdownToggleExposer" +import {JsonEditorWithLocalState} from "./JsonEditorWithLocalState" +import {canToggleRawMode} from "./utils" + +// Helper component to read markdown view state for a field +function MarkdownViewState({ + editorId, + children, +}: { + editorId: string + children: (isMarkdownView: boolean) => React.ReactNode +}) { + const isMarkdownView = useAtomValue(markdownViewAtom(editorId)) + return <>{children(isMarkdownView)} +} + +export interface PathItem { + key: string + name: string + value: unknown + /** If true, this item cannot be deleted (e.g., column definitions) */ + isColumn?: boolean +} + +export interface DrillInContentProps { + /** Function to get value at a specific path */ + getValue: (path: string[]) => unknown + /** Function to update value at a specific path */ + setValue: (path: string[], value: unknown) => void + /** Function to get root level items */ + getRootItems: () => PathItem[] + /** Root title for breadcrumb */ + rootTitle?: string + /** Optional prefix element for breadcrumb (e.g., span navigation) */ + breadcrumbPrefix?: ReactNode + /** Whether to show the back arrow in breadcrumb (default: true) */ + showBackArrow?: boolean + /** Whether editing is enabled (default: true) */ + editable?: boolean + /** Whether to show add item/property controls (default: false) */ + showAddControls?: boolean + /** Whether to show delete button for non-column items (default: false) */ + showDeleteControls?: boolean + /** Column options for mapping dropdown */ + columnOptions?: {value: string; label: string}[] + /** Callback when user wants to map a field to a column */ + onMapToColumn?: (dataPath: string, column: string) => void + /** Callback when user wants to remove a mapping */ + onUnmap?: (dataPath: string) => void + /** Map of data paths to column names (for visual indication) */ + mappedPaths?: Map + /** Path to focus/navigate to (e.g., "inputs.prompt" or "data.inputs.prompt") */ + focusPath?: string + /** Callback when focusPath has been handled */ + onFocusPathHandled?: () => void + /** Callback when a JSON property key is Cmd/Meta+clicked */ + onPropertyClick?: (fullPath: string) => void + /** How values are stored: 'string' (JSON stringified) or 'native' (direct types) */ + valueMode?: "string" | "native" + /** Optional header content shown above the drill-in view */ + headerContent?: ReactNode + /** Function to get default value for a property type (used when adding properties) */ + getDefaultValueForType?: (type: PropertyType) => unknown + /** Locked field types to prevent UI switching (key is path string) */ + lockedFieldTypes?: Record + /** Callback to update locked field types */ + onLockedFieldTypesChange?: (types: Record) => void + /** Initial path to start navigation at (e.g., "inputs.prompt" or ["inputs", "prompt"]) */ + initialPath?: string | string[] + /** Callback when navigation path changes */ + onPathChange?: (path: string[]) => void +} + +/** + * Reusable drill-in content component for navigating nested data structures. + * Supports both string-based storage (TestcaseEditDrawer) and native types (TraceDataDrillIn). + */ +export function DrillInContent({ + getValue, + setValue, + getRootItems, + rootTitle = "Root", + breadcrumbPrefix, + showBackArrow = true, + editable = true, + showAddControls = false, + showDeleteControls = false, + columnOptions, + onMapToColumn, + onUnmap, + mappedPaths, + focusPath, + onFocusPathHandled, + onPropertyClick, + valueMode = "native", + headerContent, + getDefaultValueForType, + lockedFieldTypes = {}, + onLockedFieldTypesChange, + initialPath, + onPathChange, +}: DrillInContentProps) { + // Parse initialPath to array format, removing rootTitle prefix if present + const parsedInitialPath = (() => { + if (!initialPath) return [] + const pathArray = typeof initialPath === "string" ? initialPath.split(".") : initialPath + // Remove the rootTitle prefix if present + const startIndex = pathArray[0] === rootTitle ? 1 : 0 + return pathArray.slice(startIndex) + })() + + const [currentPath, setCurrentPath] = useState(parsedInitialPath) + const [collapsedFields, setCollapsedFields] = useState>({}) + const [rawModeFields, setRawModeFields] = useState>({}) + + // Track markdown toggle functions per field (registered by EditorMarkdownToggleExposer) + const markdownToggleFnsRef = useRef void>>(new Map()) + + // Callback to register markdown toggle function for a field + const registerMarkdownToggle = useCallback((fieldKey: string, toggleFn: () => void) => { + markdownToggleFnsRef.current.set(fieldKey, toggleFn) + }, []) + + // Notify parent when path changes (for persistence across navigation) + useEffect(() => { + onPathChange?.(currentPath) + }, [currentPath, onPathChange]) + + // Handle focusPath - navigate directly to the clicked property path + useEffect(() => { + if (focusPath) { + // Parse the path (e.g., "data.parameters.prompt" or "parameters.prompt") + const pathParts = focusPath.split(".") + // Remove the rootTitle prefix if present + const startIndex = pathParts[0] === rootTitle ? 1 : 0 + let targetPath = pathParts.slice(startIndex) + + // For trace span entities, the data is wrapped in "ag.data" structure + // If the focus path comes from mapping (starts with rootTitle like "data.inputs.country"), + // we need to prepend "ag.data" to navigate within the entity structure + if (startIndex > 0 && targetPath.length > 0) { + // Check if we're already inside ag.data by looking at parsedInitialPath + // If initialPath was "ag.data", we should prepend ["ag", "data"] to targetPath + if ( + parsedInitialPath.length >= 2 && + parsedInitialPath[0] === "ag" && + parsedInitialPath[1] === "data" + ) { + targetPath = ["ag", "data", ...targetPath] + } + } + + if (targetPath.length > 0) { + setCurrentPath(targetPath) + onFocusPathHandled?.() + } + } + }, [focusPath, rootTitle, onFocusPathHandled, parsedInitialPath]) + + // Navigation functions + const navigateInto = useCallback((key: string) => { + setCurrentPath((prev) => [...prev, key]) + }, []) + + const navigateBack = useCallback(() => { + setCurrentPath((prev) => prev.slice(0, -1)) + }, []) + + const navigateToIndex = useCallback((index: number) => { + setCurrentPath((prev) => prev.slice(0, index)) + }, []) + + // Toggle functions + const toggleFieldCollapse = useCallback((fieldKey: string) => { + setCollapsedFields((prev) => ({...prev, [fieldKey]: !prev[fieldKey]})) + }, []) + + const toggleRawMode = useCallback((fieldKey: string) => { + setRawModeFields((prev) => ({...prev, [fieldKey]: !prev[fieldKey]})) + }, []) + + // Get current value at path + const currentValue = useMemo(() => { + return getValue(currentPath) + }, [currentPath, getValue]) + + // Convert value to string for data type detection and editing + const valueToString = useCallback( + (value: unknown): string => { + if (valueMode === "string") { + // Already a string + return value as string + } + // Native mode - stringify if needed + if (typeof value === "string") return value + return JSON.stringify(value, null, 2) + }, + [valueMode], + ) + + // Parse string value back to native type + const _stringToValue = useCallback( + (str: string): unknown => { + if (valueMode === "string") { + // Keep as string + return str + } + // Native mode - try to parse + try { + return JSON.parse(str) + } catch { + return str + } + }, + [valueMode], + ) + + // Get current level items + const currentLevelItems = useMemo((): PathItem[] => { + if (currentPath.length === 0) { + return getRootItems() + } + + const value = currentValue + + // Handle undefined + if (value === undefined) { + return [] + } + + // For string mode, need to parse the value + if (valueMode === "string") { + const strValue = value as string + if (!strValue) return [] + + try { + const parsed = JSON.parse(strValue) + if (Array.isArray(parsed)) { + return parsed.map((item, index) => ({ + key: String(index), + name: `Item ${index + 1}`, + value: typeof item === "string" ? item : JSON.stringify(item, null, 2), + isColumn: false, + })) + } else if (typeof parsed === "object" && parsed !== null) { + return Object.keys(parsed) + .sort() + .map((key) => ({ + key, + name: key, + value: + typeof parsed[key] === "string" + ? parsed[key] + : JSON.stringify(parsed[key], null, 2), + isColumn: false, + })) + } + // Primitive - show as single item + const fieldName = currentPath[currentPath.length - 1] || "value" + return [{key: fieldName, name: fieldName, value: strValue, isColumn: false}] + } catch { + // Not valid JSON - treat as primitive string + const fieldName = currentPath[currentPath.length - 1] || "value" + return [{key: fieldName, name: fieldName, value: strValue, isColumn: false}] + } + } + + // Native mode + if (value === null) { + const fieldName = currentPath[currentPath.length - 1] || "value" + return [{key: fieldName, name: fieldName, value: null, isColumn: false}] + } + + if (Array.isArray(value)) { + const parentKey = currentPath[currentPath.length - 1] || "" + const singularName = parentKey.endsWith("s") + ? parentKey.slice(0, -1) + : parentKey || "Item" + const displayName = singularName.charAt(0).toUpperCase() + singularName.slice(1) + + return value.map((item, index) => ({ + key: String(index), + name: `${displayName} ${index + 1}`, + value: item, + isColumn: false, + })) + } + + if (typeof value === "object") { + return Object.keys(value) + .sort() + .map((key) => ({ + key, + name: key, + value: (value as Record)[key], + isColumn: false, + })) + } + + // Check if string value contains JSON (stringified JSON in native mode) + if (typeof value === "string") { + try { + const parsed = JSON.parse(value) + if (Array.isArray(parsed)) { + const parentKey = currentPath[currentPath.length - 1] || "" + const singularName = parentKey.endsWith("s") + ? parentKey.slice(0, -1) + : parentKey || "Item" + const displayName = singularName.charAt(0).toUpperCase() + singularName.slice(1) + + return parsed.map((item, index) => ({ + key: String(index), + name: `${displayName} ${index + 1}`, + value: item, + isColumn: false, + })) + } else if (typeof parsed === "object" && parsed !== null) { + return Object.keys(parsed) + .sort() + .map((key) => ({ + key, + name: key, + value: parsed[key], + isColumn: false, + })) + } + } catch { + // Not valid JSON, treat as primitive string + } + } + + // Primitive value + const fieldName = currentPath[currentPath.length - 1] || "value" + return [{key: fieldName, name: fieldName, value: value, isColumn: false}] + }, [currentPath, currentValue, getRootItems, valueMode]) + + // Check if a value is expandable + const isExpandable = useCallback( + (value: unknown): boolean => { + const strValue = valueToString(value) + try { + const parsed = JSON.parse(strValue) + return ( + (Array.isArray(parsed) && parsed.length > 0) || + (typeof parsed === "object" && + parsed !== null && + Object.keys(parsed).length > 0) + ) + } catch { + return false + } + }, + [valueToString], + ) + + // Get item count for arrays/objects + const getItemCount = useCallback( + (value: unknown): string => { + const strValue = valueToString(value) + try { + const parsed = JSON.parse(strValue) + if (Array.isArray(parsed)) return `${parsed.length} items` + if (typeof parsed === "object" && parsed !== null) + return `${Object.keys(parsed).length} properties` + } catch { + // Not JSON + } + return "" + }, + [valueToString], + ) + + // Get current path data type (for add controls) + const currentPathDataType = useMemo((): "array" | "object" | "root" | null => { + if (currentPath.length === 0) return "root" + const value = currentValue + if (value == null) return null + + const strValue = valueToString(value) + try { + const parsed = JSON.parse(strValue) + if (Array.isArray(parsed)) return "array" + if (typeof parsed === "object" && parsed !== null) return "object" + } catch { + // Not JSON + } + return null + }, [currentPath, currentValue, valueToString]) + + // Add array item + const addArrayItem = useCallback(() => { + if (currentPath.length === 0) return + const value = currentValue + if (value == null) return + + const strValue = valueToString(value) + try { + const parsed = JSON.parse(strValue) + if (Array.isArray(parsed)) { + const updated = [...parsed, ""] + setValue( + currentPath, + valueMode === "string" ? JSON.stringify(updated, null, 2) : updated, + ) + } + } catch { + // Not valid JSON + } + }, [currentPath, currentValue, valueToString, setValue, valueMode]) + + // Add object property + const addObjectProperty = useCallback( + (propertyName: string, propertyType: PropertyType) => { + if (currentPath.length === 0) return + const value = currentValue + if (value == null) return + + const strValue = valueToString(value) + try { + const parsed = JSON.parse(strValue) + if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) { + // Get default value for type + const defaultValue = + getDefaultValueForType?.(propertyType) ?? getDefaultValue(propertyType) + const updated = {...parsed, [propertyName]: defaultValue} + setValue( + currentPath, + valueMode === "string" ? JSON.stringify(updated, null, 2) : updated, + ) + + // Lock the type for this new property + if (onLockedFieldTypesChange) { + const newFieldPath = [...currentPath, propertyName].join(".") + onLockedFieldTypesChange({ + ...lockedFieldTypes, + [newFieldPath]: propertyTypeToDataType(propertyType), + }) + } + } + } catch { + // Not valid JSON + } + }, + [ + currentPath, + currentValue, + valueToString, + setValue, + valueMode, + getDefaultValueForType, + lockedFieldTypes, + onLockedFieldTypesChange, + ], + ) + + // Delete item + const deleteItem = useCallback( + (itemKey: string) => { + const parentPath = currentPath + const parentValue = getValue(parentPath) + if (parentValue == null) return + + const strValue = valueToString(parentValue) + try { + const parsed = JSON.parse(strValue) + if (Array.isArray(parsed)) { + const index = parseInt(itemKey, 10) + const updated = parsed.filter((_, i) => i !== index) + setValue(parentPath, valueMode === "string" ? JSON.stringify(updated) : updated) + } else if (typeof parsed === "object" && parsed !== null) { + const {[itemKey]: _, ...rest} = parsed + setValue(parentPath, valueMode === "string" ? JSON.stringify(rest) : rest) + } + } catch { + // Ignore + } + }, + [currentPath, getValue, valueToString, setValue, valueMode], + ) + + // Drill-in is always enabled for navigation within the view + // Property click allows drilling into nested JSON properties + const drillInEnabled = true + + return ( + +
+ {/* Optional header content */} + {headerContent} + + {/* Breadcrumb navigation and add controls */} +
+
+
+ +
+ {showAddControls && ( + + )} +
+
+ + {/* Current level items */} + {currentLevelItems.length === 0 && ( +
No items to display
+ )} + +
+ {currentLevelItems.map((item) => { + const fieldKey = `${currentPath.join(".")}.${item.key}` + // When drilling into a primitive, currentPath already contains the full path + // and item.key is just the last segment (duplicate). Use currentPath directly. + // IMPORTANT: Must verify value is actually primitive, not just matching key name + // (e.g., nested objects like inputs.inputs should NOT trigger this) + const isDrilledPrimitive = + currentPath.length > 0 && + currentPath[currentPath.length - 1] === item.key && + currentLevelItems.length === 1 && + !isExpandable(item.value) + const fullPath = isDrilledPrimitive + ? currentPath + : [...currentPath, item.key] + const stringValue = valueToString(item.value) + + // Use locked type if available, otherwise detect from value + const dataType = lockedFieldTypes[fieldKey] ?? detectDataType(stringValue) + const isRawMode = rawModeFields[fieldKey] ?? false + const isCollapsed = collapsedFields[fieldKey] ?? false + const expandable = isExpandable(item.value) + const itemCount = getItemCount(item.value) + const showRawToggle = editable && canToggleRawMode(dataType) + + // Build full data path for mapping + // Skip "ag.data" prefix if present (trace span internal structure) + let pathForMapping = fullPath + let dataPath = "" + let checkPathForNested = "" + + if ( + fullPath.length >= 2 && + fullPath[0] === "ag" && + fullPath[1] === "data" + ) { + // Inside ag.data structure - skip the wrapper + pathForMapping = fullPath.slice(2) + dataPath = [rootTitle, ...pathForMapping].join(".") + checkPathForNested = dataPath + } else if (fullPath.length === 1 && fullPath[0] === "ag") { + // At the ag wrapper itself - check for any mappings under "data." + dataPath = [rootTitle, "ag"].join(".") + checkPathForNested = rootTitle + } else { + // Normal path + dataPath = [rootTitle, ...pathForMapping].join(".") + checkPathForNested = dataPath + } + + const mappedColumn = mappedPaths?.get(dataPath) + const isMapped = !!mappedColumn + + // Count nested mappings + const nestedMappingCount = expandable + ? Array.from(mappedPaths?.keys() || []).filter( + (path) => + path.startsWith(checkPathForNested + ".") && + path !== checkPathForNested, + ).length + : 0 + + // Determine if markdown toggle should be shown (only for string fields) + const showMarkdownToggle = + !expandable && (dataType === "string" || dataType === "null") + const editorId = `drill-field-${fieldKey}` + + return ( +
+ {/* Field header - wrap with markdown state if showing toggle */} + {showMarkdownToggle ? ( + + {(isMarkdownView) => ( + + toggleFieldCollapse(fieldKey) + } + itemCount={itemCount} + expandable={expandable} + onDrillIn={ + expandable + ? () => navigateInto(item.key) + : undefined + } + showRawToggle={showRawToggle} + isRawMode={isRawMode} + onToggleRawMode={ + showRawToggle + ? () => toggleRawMode(fieldKey) + : undefined + } + showDelete={showDeleteControls && !item.isColumn} + onDelete={ + showDeleteControls && !item.isColumn + ? () => deleteItem(item.key) + : undefined + } + alwaysShowCopy={false} + columnOptions={columnOptions} + onMapToColumn={ + onMapToColumn + ? (column: string) => + onMapToColumn(dataPath, column) + : undefined + } + onUnmap={ + onUnmap ? () => onUnmap(dataPath) : undefined + } + isMapped={isMapped} + mappedColumn={mappedColumn} + nestedMappingCount={nestedMappingCount} + showMarkdownToggle={showMarkdownToggle} + isMarkdownView={isMarkdownView} + onToggleMarkdownView={() => { + const fn = + markdownToggleFnsRef.current.get(fieldKey) + if (fn) fn() + }} + /> + )} + + ) : ( + toggleFieldCollapse(fieldKey)} + itemCount={itemCount} + expandable={expandable} + onDrillIn={ + expandable ? () => navigateInto(item.key) : undefined + } + showRawToggle={showRawToggle} + isRawMode={isRawMode} + onToggleRawMode={ + showRawToggle + ? () => toggleRawMode(fieldKey) + : undefined + } + showDelete={showDeleteControls && !item.isColumn} + onDelete={ + showDeleteControls && !item.isColumn + ? () => deleteItem(item.key) + : undefined + } + alwaysShowCopy={false} + columnOptions={columnOptions} + onMapToColumn={ + onMapToColumn + ? (column: string) => + onMapToColumn(dataPath, column) + : undefined + } + onUnmap={onUnmap ? () => onUnmap(dataPath) : undefined} + isMapped={isMapped} + mappedColumn={mappedColumn} + nestedMappingCount={nestedMappingCount} + /> + )} + + {/* Field content - collapsible */} + {!isCollapsed && ( +
+ {renderFieldContent({ + item, + stringValue, + dataType, + isRawMode, + fullPath, + fieldKey, + editable, + setValue, + valueMode, + setCurrentPath, + registerMarkdownToggle, + })} +
+ )} +
+ ) + })} +
+
+
+ ) +} + +// Helper functions + +function getDefaultValue(type: PropertyType): unknown { + switch (type) { + case "string": + return "" + case "number": + return 0 + case "boolean": + return false + case "object": + return {} + case "array": + return [] + default: + return "" + } +} + +function propertyTypeToDataType(propType: PropertyType): DataType { + switch (propType) { + case "string": + return "string" + case "number": + return "number" + case "boolean": + return "boolean" + case "object": + return "json-object" + case "array": + return "json-array" + default: + return "string" + } +} + +interface RenderFieldContentProps { + item: PathItem + stringValue: string + dataType: DataType + isRawMode: boolean + fullPath: string[] + fieldKey: string + editable: boolean + setValue: (path: string[], value: unknown) => void + valueMode: "string" | "native" + setCurrentPath: (path: string[]) => void + registerMarkdownToggle: (fieldKey: string, toggleFn: () => void) => void +} + +function renderFieldContent({ + item, + stringValue, + dataType, + isRawMode, + fullPath, + fieldKey, + editable, + setValue, + valueMode, + setCurrentPath, + registerMarkdownToggle, +}: RenderFieldContentProps) { + if (!editable) { + // Read-only preview + return ( +
+                {stringValue}
+            
+ ) + } + + if (isRawMode) { + // Raw mode - read-only view showing the value in its storage format + // If original was a JSON string (escaped), show escaped format + // If original was a nested object, show JSON editor view (read-only) + const originalWasString = typeof item.value === "string" + + // For nested objects/arrays (not originally strings), use JSON editor (read-only) + if ( + !originalWasString && + (dataType === "json-object" || dataType === "json-array" || dataType === "messages") + ) { + return ( + {}} + readOnly + /> + ) + } + + // For string-encoded JSON, show escaped format (read-only) + // For primitives, behavior depends on whether we're in string mode (stringified JSON structure) + let rawValue = stringValue + + if ( + originalWasString && + (dataType === "json-object" || dataType === "json-array" || dataType === "messages") + ) { + // String-encoded JSON: show as escaped string literal + try { + const parsed = JSON.parse(stringValue) + const compactJson = JSON.stringify(parsed) + rawValue = JSON.stringify(compactJson) // Escape to show as string literal + } catch { + // If parsing fails, use stringValue as-is + } + } else if (dataType === "string") { + if (valueMode === "string") { + // Part of stringified JSON structure: show double-escaped + // "system" -> "\"system\"" (shows how it appears in the JSON string) + const withQuotes = JSON.stringify(stringValue) + rawValue = JSON.stringify(withQuotes) + } else { + // Native mode: just show with quotes + // "system" -> "system" + rawValue = JSON.stringify(stringValue) + } + } + // Numbers and booleans stay as-is (no escaping needed) + + return ( +
+                {rawValue}
+            
+ ) + } + + // Type-specific rendering + // Note: "null" dataType is handled by the string editor at the end (using empty string) + // to avoid focus loss when null becomes a string value + + if (dataType === "boolean") { + const boolValue = + valueMode === "string" ? JSON.parse(stringValue) === true : item.value === true + return ( +
+ { + // Only stringify if editing a top-level column + // For nested fields, pass native value to preserve types in JSON + const value = + valueMode === "string" && fullPath.length === 1 + ? JSON.stringify(checked) + : checked + setValue(fullPath, value) + }} + /> + {boolValue ? "true" : "false"} +
+ ) + } + + if (dataType === "number") { + const numValue = valueMode === "string" ? JSON.parse(stringValue) : (item.value as number) + return ( + { + // Only stringify if editing a top-level column + // For nested fields, pass native value to preserve types in JSON + const finalValue = + valueMode === "string" && fullPath.length === 1 + ? JSON.stringify(value ?? 0) + : (value ?? 0) + setValue(fullPath, finalValue) + }} + className="w-full" + size="middle" + /> + ) + } + + if (dataType === "json-array") { + const arrayItems = JSON.parse(stringValue) as unknown[] + const originalWasString = typeof item.value === "string" + + const getPreview = (arrItem: unknown) => + typeof arrItem === "string" + ? arrItem.length > 60 + ? arrItem.substring(0, 60) + "..." + : arrItem + : typeof arrItem === "object" && arrItem !== null + ? JSON.stringify(arrItem).substring(0, 60) + "..." + : String(arrItem) + + return ( +
+ {/* Navigation select for drilling into items */} + {arrayItems.length > 0 && ( + setNewPropertyName(e.target.value)} + placeholder="Property name" + size="middle" + className="flex-1" + autoFocus + onKeyDown={(e) => { + if (e.key === "Enter") { + handleAddProperty() + } else if (e.key === "Escape") { + handleCancelAddProperty() + } + }} + /> + onItemChange?.(value)} + options={items} + popupMatchSelectWidth={false} + /> + / +
+ ) + }, [items, selectedItemId, entityId, onItemChange]) + + return ( +
+ {/* Header with toggle and actions */} +
+
+ {headerContent} + {showDirtyBadge && isDirty && ( + + edited + + )} +
+
+ {showViewToggle && ( + setEditMode(value as EditMode)} + options={[ + { + value: "fields", + icon: , + }, + { + value: "json", + icon: , + }, + ]} + /> + )} + {showRevertButton && isDirty && ( + +
+
+ + {/* Content */} + {editMode === "fields" ? ( + + ) : ( +
+ +
+ )} +
+ ) +} + +export const EntityDualViewEditor = memo( + EntityDualViewEditorInner, +) as typeof EntityDualViewEditorInner diff --git a/web/oss/src/components/DrillInView/JsonEditorWithLocalState.tsx b/web/oss/src/components/DrillInView/JsonEditorWithLocalState.tsx new file mode 100644 index 0000000000..67ac12006d --- /dev/null +++ b/web/oss/src/components/DrillInView/JsonEditorWithLocalState.tsx @@ -0,0 +1,73 @@ +import {useCallback, useEffect, useState} from "react" + +import {EditorProvider} from "@/oss/components/Editor/Editor" +import {DrillInProvider} from "@/oss/components/Editor/plugins/code/context/DrillInContext" +import SharedEditor from "@/oss/components/Playground/Components/SharedEditor" + +interface JsonEditorWithLocalStateProps { + initialValue: string + onValidChange: (value: string) => void + editorKey: string + /** Callback when a JSON property key is clicked */ + onPropertyClick?: (path: string) => void + /** Make the editor read-only */ + readOnly?: boolean +} + +/** + * JSON Editor wrapper that manages local state to prevent breaking on invalid JSON. + * Shows validation errors in real-time but only calls onValidChange when JSON is valid. + */ +export function JsonEditorWithLocalState({ + initialValue, + onValidChange, + editorKey, + onPropertyClick, + readOnly, +}: JsonEditorWithLocalStateProps) { + const [localValue, setLocalValue] = useState(initialValue) + + // Sync local value when initialValue changes (e.g., when toggling raw mode) + useEffect(() => { + setLocalValue(initialValue) + }, [initialValue]) + + const handleChange = useCallback( + (value: string) => { + setLocalValue(value) + try { + JSON.parse(value) + onValidChange(value) + } catch { + // Invalid JSON - keep local state but don't sync to parent + } + }, + [onValidChange], + ) + + return ( + + + + + + ) +} diff --git a/web/oss/src/components/DrillInView/TestcaseDrillInView.tsx b/web/oss/src/components/DrillInView/TestcaseDrillInView.tsx new file mode 100644 index 0000000000..94705991c7 --- /dev/null +++ b/web/oss/src/components/DrillInView/TestcaseDrillInView.tsx @@ -0,0 +1,66 @@ +import {memo} from "react" + +import {testcase, type TestcaseColumn} from "@/oss/state/entities/testcase" +import type {Column} from "@/oss/state/entities/testcase/columnState" + +import type {DrillInContentProps} from "./DrillInContent" +import {EntityDrillInView} from "./EntityDrillInView" + +// Re-export TestcaseColumn for convenience +export type {TestcaseColumn} from "@/oss/state/entities/testcase" + +// ============================================================================ +// TYPES +// ============================================================================ + +export interface TestcaseDrillInViewProps extends Omit< + DrillInContentProps, + "getValue" | "setValue" | "getRootItems" | "valueMode" +> { + /** Testcase ID to display/edit */ + testcaseId: string + /** Column definitions for the testcase (determines what fields to show) */ + columns: Column[] | TestcaseColumn[] +} + +// ============================================================================ +// COMPONENT +// ============================================================================ + +/** + * Drill-in viewer for testcase data. + * + * Uses the unified testcase entity API for all state management. + * This is a thin wrapper that passes the testcase controller to EntityDrillInView. + * + * @example + * ```tsx + * + * ``` + */ +export const TestcaseDrillInView = memo( + ({testcaseId, columns, ...drillInProps}: TestcaseDrillInViewProps) => { + // Type assertion needed because testcase.drillIn is optional in the general type + // but we know it's configured for the testcase entity + const entityWithDrillIn = testcase as typeof testcase & { + drillIn: NonNullable + } + + return ( + + ) + }, +) + +TestcaseDrillInView.displayName = "TestcaseDrillInView" diff --git a/web/oss/src/components/DrillInView/TraceSpanDrillInView.tsx b/web/oss/src/components/DrillInView/TraceSpanDrillInView.tsx new file mode 100644 index 0000000000..17062b882e --- /dev/null +++ b/web/oss/src/components/DrillInView/TraceSpanDrillInView.tsx @@ -0,0 +1,124 @@ +import {memo, type ReactNode} from "react" + +import {traceSpan} from "@/oss/state/entities/trace" + +import type {DrillInContentProps} from "./DrillInContent" +import {EntityDrillInView} from "./EntityDrillInView" + +// ============================================================================ +// TYPES +// ============================================================================ + +export interface TraceSpanDrillInViewProps extends Omit< + DrillInContentProps, + "getValue" | "setValue" | "getRootItems" | "valueMode" +> { + /** The span ID to display */ + spanId: string + /** Optional title for the root level */ + title?: string + /** Optional prefix element for breadcrumb (e.g., span navigation) */ + breadcrumbPrefix?: ReactNode + /** Whether to show the back arrow in breadcrumb (default: true) */ + showBackArrow?: boolean + /** Whether editing is enabled (default: false for traces) */ + editable?: boolean + /** Column options for mapping dropdown */ + columnOptions?: {value: string; label: string}[] + /** Callback when user wants to map a field to a column */ + onMapToColumn?: (dataPath: string, column: string) => void + /** Callback when user wants to remove a mapping */ + onUnmap?: (dataPath: string) => void + /** Map of data paths to column names (for visual indication) */ + mappedPaths?: Map + /** Path to focus/navigate to (e.g., "data.inputs.prompt") */ + focusPath?: string + /** Callback when focusPath has been handled */ + onFocusPathHandled?: () => void + /** Callback when a JSON property key is Cmd/Meta+clicked */ + onPropertyClick?: (path: string) => void + /** Initial path to start navigation at */ + initialPath?: string | string[] +} + +// ============================================================================ +// COMPONENT +// ============================================================================ + +/** + * Drill-in viewer for trace span data. + * + * Uses the unified traceSpan entity API for all state management. + * This is a thin wrapper that passes the trace controller to EntityDrillInView. + * + * Default behavior for traces: + * - Read-only (editable=false) + * - No add/delete controls + * - Root title is "data" + * + * @example + * ```tsx + * // Read-only trace viewing with column mapping + * + * + * // Editable trace + * + * ``` + */ +export const TraceSpanDrillInView = memo( + ({ + spanId, + title = "data", + breadcrumbPrefix, + showBackArrow = true, + editable = false, + columnOptions, + onMapToColumn, + onUnmap, + mappedPaths, + focusPath, + onFocusPathHandled, + onPropertyClick, + initialPath, + }: TraceSpanDrillInViewProps) => { + // Type assertion needed because traceSpan.drillIn is optional in the general type + // but we know it's configured for the trace entity + const entityWithDrillIn = traceSpan as typeof traceSpan & { + drillIn: NonNullable + } + + return ( + + ) + }, +) + +TraceSpanDrillInView.displayName = "TraceSpanDrillInView" diff --git a/web/oss/src/components/DrillInView/index.ts b/web/oss/src/components/DrillInView/index.ts index 91f9e5463c..4b0670d842 100644 --- a/web/oss/src/components/DrillInView/index.ts +++ b/web/oss/src/components/DrillInView/index.ts @@ -2,3 +2,17 @@ export {default as DrillInFieldHeader} from "./DrillInFieldHeader" export type {DrillInFieldHeaderProps} from "./DrillInFieldHeader" export {default as DrillInBreadcrumb} from "./DrillInBreadcrumb" export type {DrillInBreadcrumbProps} from "./DrillInBreadcrumb" +export {DrillInControls} from "./DrillInControls" +export type {DrillInControlsProps, PropertyType} from "./DrillInControls" +export {DrillInContent} from "./DrillInContent" +export type {DrillInContentProps} from "./DrillInContent" +export {JsonEditorWithLocalState} from "./JsonEditorWithLocalState" +export {canToggleRawMode} from "./utils" +export {EntityDrillInView} from "./EntityDrillInView" +export type {EntityDrillInViewProps, PathItem} from "./EntityDrillInView" +export {TestcaseDrillInView} from "./TestcaseDrillInView" +export type {TestcaseDrillInViewProps} from "./TestcaseDrillInView" +export {TraceSpanDrillInView} from "./TraceSpanDrillInView" +export type {TraceSpanDrillInViewProps} from "./TraceSpanDrillInView" +export {EntityDualViewEditor} from "./EntityDualViewEditor" +export type {EntityDualViewEditorProps} from "./EntityDualViewEditor" diff --git a/web/oss/src/components/DrillInView/utils.ts b/web/oss/src/components/DrillInView/utils.ts new file mode 100644 index 0000000000..4d87d3a3b1 --- /dev/null +++ b/web/oss/src/components/DrillInView/utils.ts @@ -0,0 +1,24 @@ +import type {DataType} from "@/oss/components/TestcasesTableNew/components/TestcaseEditDrawer/fieldUtils" + +/** + * Determines if a data type supports raw mode toggle. + * Raw mode shows the stringified JSON representation instead of a formatted/specialized view. + * + * Data types that support raw mode: + * - string: Can toggle between text editor and JSON string view + * - messages: Can toggle between chat message list and raw JSON + * - json-object: Can toggle between formatted JSON and raw stringified view + * - json-array: Can toggle between formatted JSON and raw stringified view + * - boolean: Can toggle between switch and JSON primitive view + * - number: Can toggle between number input and JSON primitive view + */ +export function canToggleRawMode(dataType: DataType): boolean { + return ( + dataType === "string" || + dataType === "messages" || + dataType === "json-object" || + dataType === "json-array" || + dataType === "boolean" || + dataType === "number" + ) +} diff --git a/web/oss/src/components/Editor/Editor.tsx b/web/oss/src/components/Editor/Editor.tsx index b0c8245424..eb67d87c6b 100644 --- a/web/oss/src/components/Editor/Editor.tsx +++ b/web/oss/src/components/Editor/Editor.tsx @@ -83,6 +83,8 @@ const EditorInner = forwardRef( tokens = [], additionalCodePlugins = [], showLineNumbers = true, + onPropertyClick, + disableLongText, ...rest }: EditorProps, ref, @@ -359,6 +361,8 @@ const EditorInner = forwardRef( validationSchema={validationSchema} tokens={tokens} additionalCodePlugins={additionalCodePlugins} + onPropertyClick={onPropertyClick} + disableLongText={disableLongText} /> ) : ( { const {setContainerElm, dimensions: dimension} = useEditorResize({ @@ -519,6 +524,7 @@ const Editor = ({ tokens={tokens} additionalCodePlugins={additionalCodePlugins} showLineNumbers={showLineNumbers} + onPropertyClick={onPropertyClick} /> ) : ( )} diff --git a/web/oss/src/components/Editor/hooks/useEditorConfig/index.ts b/web/oss/src/components/Editor/hooks/useEditorConfig/index.ts index 6db5834d9f..f8cf72296e 100644 --- a/web/oss/src/components/Editor/hooks/useEditorConfig/index.ts +++ b/web/oss/src/components/Editor/hooks/useEditorConfig/index.ts @@ -50,6 +50,7 @@ const useEditorConfig = ({ import("../../plugins/code/nodes/CodeBlockErrorIndicatorNode"), import("../../plugins/code/nodes/CodeTabNode"), import("../../plugins/code/nodes/Base64Node"), + import("../../plugins/code/nodes/LongTextNode"), ]) initialNodes.push( @@ -60,6 +61,7 @@ const useEditorConfig = ({ initialNodesPromises[3].CodeBlockErrorIndicatorNode, initialNodesPromises[4].CodeTabNode, initialNodesPromises[5].Base64Node, + initialNodesPromises[6].LongTextNode, ], ) } else { diff --git a/web/oss/src/components/Editor/plugins/code/context/DrillInContext.tsx b/web/oss/src/components/Editor/plugins/code/context/DrillInContext.tsx new file mode 100644 index 0000000000..74d45b937f --- /dev/null +++ b/web/oss/src/components/Editor/plugins/code/context/DrillInContext.tsx @@ -0,0 +1,20 @@ +/** + * DrillInContext.tsx + * + * React Context for providing drill-in capability to nested components like LongTextNode. + * This allows LongTextNode to know whether drill-in is enabled without prop drilling. + */ +import {createContext, useContext} from "react" + +interface DrillInContextValue { + /** Whether drill-in functionality is enabled */ + enabled: boolean +} + +const DrillInContext = createContext({enabled: false}) + +export const DrillInProvider = DrillInContext.Provider + +export const useDrillInContext = () => useContext(DrillInContext) + +export default DrillInContext diff --git a/web/oss/src/components/Editor/plugins/code/index.tsx b/web/oss/src/components/Editor/plugins/code/index.tsx index 1bbb218330..3802625a8a 100644 --- a/web/oss/src/components/Editor/plugins/code/index.tsx +++ b/web/oss/src/components/Editor/plugins/code/index.tsx @@ -24,16 +24,19 @@ import {INITIAL_CONTENT_COMMAND, InitialContentPayload} from "../../commands/Ini export const store = createStore() +import {DrillInProvider} from "./context/DrillInContext" import {$createBase64Node, isBase64String, parseBase64String} from "./nodes/Base64Node" import {$createCodeBlockNode, $isCodeBlockNode} from "./nodes/CodeBlockNode" import {$createCodeHighlightNode} from "./nodes/CodeHighlightNode" import {$createCodeLineNode, CodeLineNode, $isCodeLineNode} from "./nodes/CodeLineNode" import {$createCodeTabNode, $isCodeTabNode} from "./nodes/CodeTabNode" +import {$createLongTextNode, isLongTextString, parseLongTextString} from "./nodes/LongTextNode" import {AutoCloseBracketsPlugin} from "./plugins/AutoCloseBracketsPlugin" import {AutoFormatAndValidateOnPastePlugin} from "./plugins/AutoFormatAndValidateOnPastePlugin" import {ClosingBracketIndentationPlugin} from "./plugins/ClosingBracketIndentationPlugin" import {GlobalErrorIndicatorPlugin} from "./plugins/GlobalErrorIndicatorPlugin" import {IndentationPlugin} from "./plugins/IndentationPlugin" +import PropertyClickPlugin from "./plugins/PropertyClickPlugin" import {$getEditorCodeAsString} from "./plugins/RealTimeValidationPlugin" import {SyntaxHighlightPlugin} from "./plugins/SyntaxHighlightPlugin" import VerticalNavigationPlugin from "./plugins/VerticalNavigationPlugin" @@ -41,8 +44,12 @@ import {tryParsePartialJson} from "./tryParsePartialJson" import {createLogger} from "./utils/createLogger" import {tokenizeCodeLine} from "./utils/tokenizer" +export {PropertyClickPlugin} + export const TOGGLE_FORM_VIEW = createCommand("TOGGLE_FORM_VIEW") +export const DRILL_IN_TO_PATH = createCommand<{path: string}>("DRILL_IN_TO_PATH") + export const ON_CHANGE_LANGUAGE = createCommand<{ language: string }>("ON_CHANGE_LANGUAGE") @@ -96,9 +103,14 @@ function getTokenValidation( * @param text The input text to highlight. * @param language The language to use for highlighting. * @param validationSchema Optional schema for validation during node creation. + * @param disableLongText If true, disable long text node truncation (show full strings) * @returns An array of highlighted code line nodes. */ -export function createHighlightedNodes(text: string, language: "json" | "yaml"): CodeLineNode[] { +export function createHighlightedNodes( + text: string, + language: "json" | "yaml", + disableLongText?: boolean, +): CodeLineNode[] { // For JSON, avoid splitting on \n inside string values if (language === "json") { try { @@ -133,6 +145,15 @@ export function createHighlightedNodes(text: string, language: "json" | "yaml"): token.type, ) codeLine.append(base64Node) + } else if ( + token.type === "string" && + !disableLongText && + isLongTextString(token.content) + ) { + // Check if this is a long text string token + const parsed = parseLongTextString(token.content) + const longTextNode = $createLongTextNode(parsed.fullValue, token.type) + codeLine.append(longTextNode) } else { const {shouldHaveError, expectedMessage} = getTokenValidation( token.content.trim(), @@ -173,6 +194,15 @@ export function createHighlightedNodes(text: string, language: "json" | "yaml"): const parsed = parseBase64String(token.content) const base64Node = $createBase64Node(parsed.fullValue, parsed.mimeType, token.type) codeLine.append(base64Node) + } else if ( + token.type === "string" && + !disableLongText && + isLongTextString(token.content) + ) { + // Check if this is a long text string token + const parsed = parseLongTextString(token.content) + const longTextNode = $createLongTextNode(parsed.fullValue, token.type) + codeLine.append(longTextNode) } else { const {shouldHaveError, expectedMessage} = getTokenValidation( token.content.trim(), @@ -208,6 +238,8 @@ function InsertInitialCodeBlockPlugin({ validationSchema, additionalCodePlugins = [], editorId, + onPropertyClick, + disableLongText = false, }: { debug?: boolean initialValue: string @@ -215,6 +247,8 @@ function InsertInitialCodeBlockPlugin({ validationSchema: any additionalCodePlugins?: React.ReactNode[] editorId: string + onPropertyClick?: (path: string) => void + disableLongText?: boolean }) { const [editor] = useLexicalComposerContext() @@ -349,6 +383,7 @@ function InsertInitialCodeBlockPlugin({ const highlightedNodes = createHighlightedNodes( value, payload.language as "json" | "yaml", + disableLongText, ) highlightedNodes.forEach((node) => { existingCodeBlock.append(node) @@ -457,7 +492,7 @@ function InsertInitialCodeBlockPlugin({ } existingCodeBlock.clear() - const newNodes = createHighlightedNodes(newText, newLanguage) + const newNodes = createHighlightedNodes(newText, newLanguage, disableLongText) newNodes.forEach((n) => existingCodeBlock.append(n)) existingCodeBlock.setLanguage(newLanguage) @@ -610,19 +645,29 @@ function InsertInitialCodeBlockPlugin({ editor.dispatchCommand(INITIAL_CONTENT_COMMAND, payload) }, [initialValue, language]) + const drillInContextValue = {enabled: Boolean(onPropertyClick)} + return ( - <> + - + + {onPropertyClick && ( + + )} {additionalCodePlugins?.map((plugin, index) => ( {plugin} ))} - + ) } diff --git a/web/oss/src/components/Editor/plugins/code/nodes/LongTextNode.tsx b/web/oss/src/components/Editor/plugins/code/nodes/LongTextNode.tsx new file mode 100644 index 0000000000..9d28e5cd32 --- /dev/null +++ b/web/oss/src/components/Editor/plugins/code/nodes/LongTextNode.tsx @@ -0,0 +1,325 @@ +/** + * LongTextNode.tsx + * + * A custom Lexical node for rendering long text strings in a collapsed/truncated view. + * Shows a preview with character count and allows viewing the full content via drill-in. + */ +import React, {useCallback, useState} from "react" + +import {useLexicalComposerContext} from "@lexical/react/LexicalComposerContext" +import {TextAlignLeft, ArrowSquareOut} from "@phosphor-icons/react" +import {Popover, Typography, Button, message} from "antd" +import { + DecoratorNode, + EditorConfig, + LexicalNode, + NodeKey, + SerializedLexicalNode, + Spread, +} from "lexical" + +import {useDrillInContext} from "../context/DrillInContext" + +const {Text} = Typography + +/** Minimum length for a string to be considered "long" and truncated */ +const MIN_LENGTH_FOR_TRUNCATION = 200 + +/** Maximum length to show in truncated view */ +const TRUNCATE_LENGTH = 80 + +/** + * Check if a string is long enough to be truncated + */ +export function isLongTextString(value: string): boolean { + // Must be a quoted string and exceed minimum length + if (!value.startsWith('"') || !value.endsWith('"')) return false + const content = value.slice(1, -1) + return content.length > MIN_LENGTH_FOR_TRUNCATION +} + +/** + * Extract truncated preview from long text string + */ +export function parseLongTextString(value: string): { + preview: string + fullValue: string + charCount: number +} { + // Remove surrounding quotes + const content = value.replace(/^"|"$/g, "") + const charCount = content.length + + if (charCount <= MIN_LENGTH_FOR_TRUNCATION) { + return { + preview: content, + fullValue: content, + charCount, + } + } + + const truncated = content.substring(0, TRUNCATE_LENGTH) + return { + preview: truncated, + fullValue: content, + charCount, + } +} + +/** + * Format character count for display + */ +function formatCharCount(count: number): string { + if (count >= 1000) { + return `${(count / 1000).toFixed(1)}k chars` + } + return `${count} chars` +} + +/** + * Serialized form of LongTextNode + */ +export type SerializedLongTextNode = Spread< + { + fullValue: string + highlightType: string + }, + SerializedLexicalNode +> + +/** + * React component for rendering the long text content + */ +function LongTextComponent({fullValue, nodeKey}: {fullValue: string; nodeKey: string}) { + useLexicalComposerContext() // Ensure we're in a Lexical context + const {enabled: drillInEnabled} = useDrillInContext() + const [copied, setCopied] = useState(false) + const [expanded, setExpanded] = useState(false) + const [popoverOpen, setPopoverOpen] = useState(false) + const parsed = parseLongTextString(`"${fullValue}"`) + const spanRef = React.useRef(null) + + const handleCopy = useCallback(async () => { + try { + await navigator.clipboard.writeText(parsed.fullValue) + setCopied(true) + message.success("Copied to clipboard") + setTimeout(() => setCopied(false), 2000) + } catch { + message.error("Failed to copy") + } + }, [parsed.fullValue]) + + const handleCollapse = useCallback(() => { + setExpanded(false) + setPopoverOpen(false) + }, []) + + const handleDrillIn = useCallback(() => { + console.log("[LongTextNode] handleDrillIn called") + console.log("[LongTextNode] spanRef.current:", spanRef.current) + // Use the ref to find the property key on the same line and dispatch a custom event + if (spanRef.current) { + // The class is "editor-code-line", not "code-line" + const line = spanRef.current.closest(".editor-code-line") + console.log("[LongTextNode] Found line:", line) + if (line) { + const propertyKey = line.querySelector(".token-property") as HTMLElement + console.log("[LongTextNode] Found propertyKey:", propertyKey) + if (propertyKey) { + // Dispatch a custom event with the property element as detail + // This will be caught by PropertyClickPlugin + const event = new CustomEvent("longtext-drill-in", { + bubbles: true, + detail: {propertyElement: propertyKey}, + }) + console.log("[LongTextNode] Dispatching event:", event) + spanRef.current.dispatchEvent(event) + } else { + console.log("[LongTextNode] No property key found on line") + } + } else { + console.log("[LongTextNode] No .editor-code-line parent found") + } + } else { + console.log("[LongTextNode] spanRef.current is null") + } + }, []) + + // Collapsed state popover content + const collapsedPopoverContent = ( +
+
+
+ + Long Text + + ({formatCharCount(parsed.charCount)}) + +
+
+ + {drillInEnabled ? ( + + ) : ( + + )} +
+
+ + {/* Full Text Content */} +
+ + {parsed.fullValue} + +
+
+ ) + + // When expanded, show clickable text with inline collapse button at the start + if (expanded) { + return ( + + {/* Small inline collapse button */} + + {/* The text itself - also clickable to collapse */} + + "{parsed.fullValue}" + + + ) + } + + // Collapsed state with popover + return ( + + + "{parsed.preview}..." + + [{formatCharCount(parsed.charCount)}] + + + + ) +} + +/** + * LongTextNode - A decorator node for rendering long text strings + */ +export class LongTextNode extends DecoratorNode { + __fullValue: string + __highlightType: string + + static getType(): string { + return "longtext" + } + + static clone(node: LongTextNode): LongTextNode { + return new LongTextNode(node.__fullValue, node.__highlightType, node.__key) + } + + constructor(fullValue: string, highlightType = "string", key?: NodeKey) { + super(key) + this.__fullValue = fullValue + this.__highlightType = highlightType + } + + createDOM(_config: EditorConfig): HTMLElement { + const span = document.createElement("span") + span.className = "longtext-node-wrapper" + return span + } + + updateDOM(): boolean { + return false + } + + decorate(): React.ReactElement { + return + } + + exportJSON(): SerializedLongTextNode { + return { + type: "longtext", + version: 1, + fullValue: this.__fullValue, + highlightType: this.__highlightType, + } + } + + static importJSON(json: SerializedLongTextNode): LongTextNode { + return new LongTextNode(json.fullValue, json.highlightType) + } + + getTextContent(): string { + // Return the full value for copy/paste and serialization + return `"${this.__fullValue}"` + } + + getFullValue(): string { + return this.__fullValue + } +} + +/** + * Helper to create a LongTextNode + */ +export function $createLongTextNode(fullValue: string, highlightType = "string"): LongTextNode { + return new LongTextNode(fullValue, highlightType) +} + +/** + * Type guard for LongTextNode + */ +export function $isLongTextNode(node: LexicalNode | null | undefined): node is LongTextNode { + return node instanceof LongTextNode +} diff --git a/web/oss/src/components/Editor/plugins/code/plugins/PropertyClickPlugin.tsx b/web/oss/src/components/Editor/plugins/code/plugins/PropertyClickPlugin.tsx new file mode 100644 index 0000000000..4f889d6ebd --- /dev/null +++ b/web/oss/src/components/Editor/plugins/code/plugins/PropertyClickPlugin.tsx @@ -0,0 +1,198 @@ +/** + * PropertyClickPlugin + * + * Enables clicking on JSON property keys to trigger navigation. + * When a user clicks on a property key in the JSON editor, + * this plugin extracts the full JSON path and calls the onPropertyClick callback. + */ +import {useEffect} from "react" + +import {useLexicalComposerContext} from "@lexical/react/LexicalComposerContext" + +interface PropertyClickPluginProps { + /** Callback when a property key is clicked - receives the full JSON path */ + onPropertyClick?: (path: string) => void + /** Language of the code (only 'json' is supported) */ + language?: string +} + +/** + * Calculate the full JSON path to a property by parsing the editor content + * and tracking the nesting structure up to the clicked property. + * Handles both object keys and array indices. + */ +function calculateJsonPath(rootElement: HTMLElement, targetElement: HTMLElement): string | null { + // Find the position of the target in the text + const range = document.createRange() + range.setStartBefore(rootElement.firstChild || rootElement) + range.setEndBefore(targetElement) + const textBefore = range.toString() + + // Stack to track context at each nesting level + // Each entry: { type: 'object' | 'array', key?: string, index: number } + const stack: {type: "object" | "array"; key?: string; index: number}[] = [] + let inString = false + let currentKey = "" + let collectingKey = false + let i = 0 + + while (i < textBefore.length) { + const char = textBefore[i] + + // Handle string boundaries + if (char === '"' && (i === 0 || textBefore[i - 1] !== "\\")) { + if (!inString) { + inString = true + collectingKey = true + currentKey = "" + } else { + inString = false + if (collectingKey && currentKey) { + // Check if this is followed by a colon (making it a key) + const afterQuote = textBefore.slice(i + 1).trimStart() + if (afterQuote.startsWith(":") && stack.length > 0) { + // This is a key in the current object + stack[stack.length - 1].key = currentKey + } + } + collectingKey = false + } + i++ + continue + } + + if (inString) { + if (collectingKey) { + currentKey += char + } + i++ + continue + } + + // Track nesting + if (char === "{") { + stack.push({type: "object", index: 0}) + } else if (char === "[") { + stack.push({type: "array", index: 0}) + } else if (char === "}" || char === "]") { + stack.pop() + } else if (char === ",") { + // Increment array index when we see a comma at array level + if (stack.length > 0 && stack[stack.length - 1].type === "array") { + stack[stack.length - 1].index++ + } + } + + i++ + } + + // Build the path from the stack + // Don't include the key from the innermost object frame - the clicked key replaces it + const path: string[] = [] + for (let j = 0; j < stack.length; j++) { + const frame = stack[j] + const isLast = j === stack.length - 1 + + if (frame.type === "array") { + path.push(String(frame.index)) + } else if (frame.type === "object" && frame.key && !isLast) { + // Only include keys from non-innermost object frames + path.push(frame.key) + } + } + + // Get the clicked property key + const propertyText = targetElement.textContent || "" + const clickedKey = propertyText.replace(/^"|"$/g, "") + + if (!clickedKey) return null + + // Add the clicked key to the path + path.push(clickedKey) + + return path.join(".") +} + +export default function PropertyClickPlugin({ + onPropertyClick, + language = "json", +}: PropertyClickPluginProps) { + const [editor] = useLexicalComposerContext() + + useEffect(() => { + if (!onPropertyClick || language !== "json") return + + const rootElement = editor.getRootElement() + if (!rootElement) return + + // Add tooltip on hover + const handleMouseEnter = (event: MouseEvent) => { + const target = event.target as HTMLElement + if (!target.classList.contains("token-property")) return + + const fullPath = calculateJsonPath(rootElement, target) + if (fullPath) { + target.setAttribute("title", `Click to drill into "${fullPath}"`) + } + } + + const handleMouseLeave = (event: MouseEvent) => { + const target = event.target as HTMLElement + if (target.classList.contains("token-property")) { + target.removeAttribute("title") + } + } + + const handleClick = (event: MouseEvent) => { + const target = event.target as HTMLElement + + // Check if clicked on a property token + if (!target.classList.contains("token-property")) return + + const fullPath = calculateJsonPath(rootElement, target) + if (!fullPath) return + + // Prevent default behavior + event.preventDefault() + event.stopPropagation() + + onPropertyClick(fullPath) + } + + // Handle custom drill-in event from LongTextNode popover + const handleLongTextDrillIn = (event: Event) => { + console.log("[PropertyClickPlugin] handleLongTextDrillIn called", event) + const customEvent = event as CustomEvent<{propertyElement: HTMLElement}> + const propertyElement = customEvent.detail?.propertyElement + console.log("[PropertyClickPlugin] propertyElement:", propertyElement) + if (!propertyElement) { + console.log("[PropertyClickPlugin] No propertyElement in event detail") + return + } + + const fullPath = calculateJsonPath(rootElement, propertyElement) + console.log("[PropertyClickPlugin] Calculated fullPath:", fullPath) + if (!fullPath) { + console.log("[PropertyClickPlugin] Could not calculate path") + return + } + + console.log("[PropertyClickPlugin] Calling onPropertyClick with path:", fullPath) + onPropertyClick(fullPath) + } + + rootElement.addEventListener("click", handleClick) + rootElement.addEventListener("mouseover", handleMouseEnter) + rootElement.addEventListener("mouseout", handleMouseLeave) + rootElement.addEventListener("longtext-drill-in", handleLongTextDrillIn) + + return () => { + rootElement.removeEventListener("click", handleClick) + rootElement.removeEventListener("mouseover", handleMouseEnter) + rootElement.removeEventListener("mouseout", handleMouseLeave) + rootElement.removeEventListener("longtext-drill-in", handleLongTextDrillIn) + } + }, [editor, onPropertyClick, language]) + + return null +} diff --git a/web/oss/src/components/Editor/plugins/code/plugins/RealTimeValidationPlugin.tsx b/web/oss/src/components/Editor/plugins/code/plugins/RealTimeValidationPlugin.tsx index 66f9c04b12..10d7b24dd7 100644 --- a/web/oss/src/components/Editor/plugins/code/plugins/RealTimeValidationPlugin.tsx +++ b/web/oss/src/components/Editor/plugins/code/plugins/RealTimeValidationPlugin.tsx @@ -100,6 +100,13 @@ export function $getEditorCodeAsString(editor?: LexicalEditor): string { if (text !== "\u200B") { parts.push(text) } + } else { + // Handle other node types (LongTextNode, Base64Node, etc.) + // by calling getTextContent() which returns the full value + const text = child.getTextContent() + if (text && text !== "\u200B") { + parts.push(text) + } } } diff --git a/web/oss/src/components/Editor/plugins/code/plugins/SyntaxHighlightPlugin.tsx b/web/oss/src/components/Editor/plugins/code/plugins/SyntaxHighlightPlugin.tsx index 9c8aa2f805..d46fed293e 100644 --- a/web/oss/src/components/Editor/plugins/code/plugins/SyntaxHighlightPlugin.tsx +++ b/web/oss/src/components/Editor/plugins/code/plugins/SyntaxHighlightPlugin.tsx @@ -33,6 +33,12 @@ import { } from "../nodes/CodeHighlightNode" import {$isCodeLineNode, CodeLineNode} from "../nodes/CodeLineNode" import {$isCodeTabNode} from "../nodes/CodeTabNode" +import { + $createLongTextNode, + $isLongTextNode, + isLongTextString, + parseLongTextString, +} from "../nodes/LongTextNode" import {createLogger} from "../utils/createLogger" import {getDiffRange} from "../utils/getDiffRange" import {isPluginLocked, lockPlugin, unlockPlugin} from "../utils/pluginLocks" @@ -199,12 +205,14 @@ interface SyntaxHighlightPluginProps { editorId: string schema?: any debug?: boolean + disableLongText?: boolean } export function SyntaxHighlightPlugin({ editorId, schema, debug = false, + disableLongText = false, }: SyntaxHighlightPluginProps) { const [editor] = useLexicalComposerContext() @@ -311,6 +319,13 @@ export function SyntaxHighlightPlugin({ return t.content === existing.content } + // Check if both are long text (existing is longtext node, new is string token with long text content) + const newIsLongText = t.type === "string" && isLongTextString(t.content) + const existingIsLongText = existing.type === "longtext" + if (newIsLongText && existingIsLongText) { + return t.content === existing.content + } + return t.content === existing.content && t.type === existing.type }) log(`🔍 [SyntaxHighlightPlugin] Token comparison:`, { @@ -346,11 +361,14 @@ export function SyntaxHighlightPlugin({ const current = lineNode.getChildren() const tabs = current.filter($isCodeTabNode) const highlights = current.filter( - (child) => $isCodeHighlightNode(child) || $isBase64Node(child), + (child) => + $isCodeHighlightNode(child) || + $isBase64Node(child) || + $isLongTextNode(child), ) // Create new highlight nodes from tokens (pure syntax highlighting) - // Check for base64 strings and create Base64Nodes for them + // Check for base64 strings and long text strings and create special nodes for them const newHighlights: LexicalNode[] = tokens.map(({content, type}) => { // Check if this is a base64 string token - create Base64Node for collapsed display if (type === "string" && isBase64String(content)) { @@ -358,6 +376,29 @@ export function SyntaxHighlightPlugin({ return $createBase64Node(parsed.fullValue, parsed.mimeType, type) } + // Check if this is a long text string - create LongTextNode for truncated display + // Skip if disableLongText is true + // ALSO skip if user is currently typing in this text (has active selection in this line) + if ( + type === "string" && + !disableLongText && + isLongTextString(content) + ) { + // Check if the current selection is within this line + // If user is actively typing, keep as regular text node for better UX + const currentSelection = $getSelection() + const isUserTypingInLine = + $isRangeSelection(currentSelection) && + currentSelection.anchor.getNode().getParent() === lineNode + + // Only convert to LongTextNode if user is NOT actively typing in this line + if (!isUserTypingInLine) { + const parsed = parseLongTextString(content) + return $createLongTextNode(parsed.fullValue, type) + } + // Otherwise fall through to create regular CodeHighlightNode + } + const node = $createCodeHighlightNode( content, type, diff --git a/web/oss/src/components/Editor/plugins/code/utils/getDiffRange.ts b/web/oss/src/components/Editor/plugins/code/utils/getDiffRange.ts index c664d29ca2..961a556dc3 100644 --- a/web/oss/src/components/Editor/plugins/code/utils/getDiffRange.ts +++ b/web/oss/src/components/Editor/plugins/code/utils/getDiffRange.ts @@ -2,13 +2,14 @@ import {LexicalNode} from "lexical" import {$isBase64Node} from "../nodes/Base64Node" import {$isCodeHighlightNode} from "../nodes/CodeHighlightNode" +import {$isLongTextNode} from "../nodes/LongTextNode" /** - * Checks if two nodes (CodeHighlightNode or Base64Node) are semantically equal. + * Checks if two nodes (CodeHighlightNode, Base64Node, or LongTextNode) are semantically equal. * * Two nodes are considered equal if they have: * 1. The same text content - * 2. The same type (both highlight or both base64) + * 2. The same type (both highlight, both base64, or both longtext) * 3. For highlight nodes: same highlight type, validation error state, and message * * @param a - First node to compare @@ -29,6 +30,19 @@ export function isEqual(a: LexicalNode, b: LexicalNode): boolean { return a.getTextContent() === b.getTextContent() } + // Check for LongTextNodes + const aIsLongText = $isLongTextNode(a) + const bIsLongText = $isLongTextNode(b) + + if (aIsLongText !== bIsLongText) { + return false + } + + // For LongTextNodes, just compare text content + if (aIsLongText && bIsLongText) { + return a.getTextContent() === b.getTextContent() + } + // For CodeHighlightNodes, compare all properties if ($isCodeHighlightNode(a) && $isCodeHighlightNode(b)) { return ( diff --git a/web/oss/src/components/Editor/plugins/code/utils/pasteUtils.ts b/web/oss/src/components/Editor/plugins/code/utils/pasteUtils.ts index 7baec46b33..7800a1554f 100644 --- a/web/oss/src/components/Editor/plugins/code/utils/pasteUtils.ts +++ b/web/oss/src/components/Editor/plugins/code/utils/pasteUtils.ts @@ -4,6 +4,7 @@ import {$createBase64Node, isBase64String, parseBase64String} from "../nodes/Bas import {$createCodeHighlightNode} from "../nodes/CodeHighlightNode" import {$createCodeLineNode, CodeLineNode} from "../nodes/CodeLineNode" import {$createCodeTabNode} from "../nodes/CodeTabNode" +import {$createLongTextNode, isLongTextString, parseLongTextString} from "../nodes/LongTextNode" import type {CodeLanguage} from "../types" import {normalizePastedLinesIndentation} from "./indentationUtils" @@ -282,6 +283,11 @@ export function $createNodeForLineWithTabs(line: string, language: CodeLanguage) const parsed = parseBase64String(token.content) const base64Node = $createBase64Node(parsed.fullValue, parsed.mimeType, token.type) codeLine.append(base64Node) + } else if (token.type === "string" && isLongTextString(token.content)) { + // Check if this is a long text string token - create LongTextNode for truncated display + const parsed = parseLongTextString(token.content) + const longTextNode = $createLongTextNode(parsed.fullValue, token.type) + codeLine.append(longTextNode) } else { codeLine.append($createCodeHighlightNode(token.content, token.type, false, null)) } diff --git a/web/oss/src/components/Editor/plugins/index.tsx b/web/oss/src/components/Editor/plugins/index.tsx index 51b10fdee2..6145044974 100644 --- a/web/oss/src/components/Editor/plugins/index.tsx +++ b/web/oss/src/components/Editor/plugins/index.tsx @@ -75,6 +75,8 @@ const EditorPlugins = ({ tokens, templateFormat, additionalCodePlugins = [], + onPropertyClick, + disableLongText, }: EditorPluginsProps) => { const markdown = useAtomValue(markdownViewAtom(id)) @@ -127,9 +129,11 @@ const EditorPlugins = ({ editorId={id} validationSchema={validationSchema} initialValue={value !== undefined ? value : initialValue} - language={language} + language={language === "code" ? "json" : language} debug={debug} additionalCodePlugins={additionalCodePlugins} + onPropertyClick={onPropertyClick} + disableLongText={disableLongText} /> diff --git a/web/oss/src/components/Editor/types.d.ts b/web/oss/src/components/Editor/types.d.ts index 6ea1bbac6f..5d71cffb16 100644 --- a/web/oss/src/components/Editor/types.d.ts +++ b/web/oss/src/components/Editor/types.d.ts @@ -51,6 +51,10 @@ export interface EditorProps extends React.HTMLProps { validationSchema?: unknown /** Additional plugins to include in code editor */ additionalCodePlugins?: React.ReactNode[] + /** Callback when a JSON property key is Cmd/Meta+clicked (for drill-in navigation) */ + onPropertyClick?: (path: string) => void + /** Disable long text node truncation (show full content instead of [N chars]) */ + disableLongText?: boolean } export interface EditorPluginsProps { @@ -73,4 +77,8 @@ export interface EditorPluginsProps { handleUpdate: (editorState: EditorState, editor: LexicalEditor) => void /** Additional plugins to include in code editor */ additionalCodePlugins?: React.ReactNode[] + /** Callback when a JSON property key is Cmd/Meta+clicked (for drill-in navigation) */ + onPropertyClick?: (path: string) => void + /** Disable long text node truncation (show full content instead of [N chars]) */ + disableLongText?: boolean } diff --git a/web/oss/src/components/EnhancedUIs/Drawer/index.tsx b/web/oss/src/components/EnhancedUIs/Drawer/index.tsx index 8d199c648a..e7e95bad03 100644 --- a/web/oss/src/components/EnhancedUIs/Drawer/index.tsx +++ b/web/oss/src/components/EnhancedUIs/Drawer/index.tsx @@ -1,12 +1,35 @@ -import {useState, useEffect} from "react" +import {useState, useEffect, useMemo} from "react" import {Drawer} from "antd" import {EnhancedDrawerProps} from "./types" -const EnhancedDrawer = ({children, closeOnLayoutClick = true, ...props}: EnhancedDrawerProps) => { +const EnhancedDrawer = ({ + children, + closeOnLayoutClick = true, + width, + styles, + ...props +}: EnhancedDrawerProps) => { const [shouldRender, setShouldRender] = useState(false) - const {open: isVisible, onClose} = props + const {open: isVisible, onClose, mask} = props + + const drawerStyles = useMemo(() => { + if (!width) return styles + return { + ...styles, + wrapper: { + ...styles?.wrapper, + width, + }, + } + }, [width, styles]) + + const maskProps = useMemo(() => { + if (mask === false) return false + const maskObj = typeof mask === "object" ? mask : {} + return {blur: false, ...maskObj} + }, [mask]) useEffect(() => { if (isVisible) { @@ -42,7 +65,14 @@ const EnhancedDrawer = ({children, closeOnLayoutClick = true, ...props}: Enhance if (!shouldRender) return null return ( - + {children} ) diff --git a/web/oss/src/components/EnhancedUIs/Modal/index.tsx b/web/oss/src/components/EnhancedUIs/Modal/index.tsx index a21910adca..155af848c3 100644 --- a/web/oss/src/components/EnhancedUIs/Modal/index.tsx +++ b/web/oss/src/components/EnhancedUIs/Modal/index.tsx @@ -48,10 +48,7 @@ const EnhancedModal = ({children, ...props}: EnhancedModalProps) => { display: "flex", flexDirection: "column", // Only apply maxHeight if not explicitly overridden by customContainer - ...(customContainer?.maxHeight === undefined && - customContainer?.height === undefined - ? {maxHeight: "90vh"} - : {}), + ...(customContainer?.maxHeight === undefined ? {maxHeight: "90vh"} : {}), ...customContainer, }, body: { diff --git a/web/oss/src/components/EnhancedUIs/Table/assets/CustomCells.tsx b/web/oss/src/components/EnhancedUIs/Table/assets/CustomCells.tsx index 6906be9cb5..6c6de393ef 100644 --- a/web/oss/src/components/EnhancedUIs/Table/assets/CustomCells.tsx +++ b/web/oss/src/components/EnhancedUIs/Table/assets/CustomCells.tsx @@ -18,35 +18,6 @@ export const ResizableTitle = memo((props: any) => { setLiveWidth(width) }, [width]) - // const isSelectionColumn = - // typeof restProps.className === "string" && - // restProps.className.includes("ant-table-selection-column") - - // if (isSelectionColumn) { - // const styleWidth = - // typeof restProps.style?.width === "number" ? restProps.style.width : undefined - // const targetWidth = - // typeof width === "number" ? width : typeof styleWidth === "number" ? styleWidth : 48 - - // return ( - // - //
- // {restProps.children} - //
- // - // ) - // } - // Only enable resizable behavior when a resize handler is provided. // This ensures non-resizable columns (e.g., selection or fixed columns) // are not wrapped in the Resizable component and keep their native layout. diff --git a/web/oss/src/components/EvalRunDetails/Table.tsx b/web/oss/src/components/EvalRunDetails/Table.tsx index ee41dd7c13..e274a3d36a 100644 --- a/web/oss/src/components/EvalRunDetails/Table.tsx +++ b/web/oss/src/components/EvalRunDetails/Table.tsx @@ -1,11 +1,10 @@ import {useCallback, useMemo, useRef} from "react" -import {useAtomValue, useStore} from "jotai" import clsx from "clsx" +import {useAtomValue, useStore} from "jotai" import {message} from "@/oss/components/AppMessageContext" import VirtualizedScenarioTableAnnotateDrawer from "@/oss/components/EvalRunDetails/components/AnnotateDrawer/VirtualizedScenarioTableAnnotateDrawer" -import ScenarioColumnVisibilityPopoverContent from "@/oss/components/EvalRunDetails/components/columnVisibility/ColumnVisibilityPopoverContent" import { InfiniteVirtualTableFeatureShell, type TableFeaturePagination, @@ -24,6 +23,7 @@ import {runDisplayNameAtomFamily} from "./atoms/runDerived" import type {EvaluationTableColumn} from "./atoms/table" import {DEFAULT_SCENARIO_PAGE_SIZE} from "./atoms/table" import type {PreviewTableRow} from "./atoms/tableRows" +import ScenarioColumnVisibilityPopoverContent from "./components/columnVisibility/ColumnVisibilityPopoverContent" import { evaluationPreviewDatasetStore, evaluationPreviewTableStore, @@ -40,12 +40,6 @@ import {patchFocusDrawerQueryParams} from "./state/urlFocusDrawer" type TableRowData = PreviewTableRow -// Alternating background colors for timestamp-based batch grouping -const TIMESTAMP_GROUP_COLORS = [ - "rgba(59, 130, 246, 0.06)", // blue - "rgba(16, 185, 129, 0.06)", // green -] - interface EvalRunDetailsTableProps { runId: string evaluationType: "auto" | "human" | "online" @@ -93,26 +87,13 @@ const EvalRunDetailsTable = ({ const previewColumns = usePreviewColumns({columnResult, evaluationType}) - // Inject synthetic columns for comparison exports (hidden in table display) - const columnsWithSyntheticColumns = useMemo(() => { + // Inject synthetic columns for comparison exports (do not render in UI) + const exportColumns = useMemo(() => { const hasCompareRuns = compareSlots.some(Boolean) if (!hasCompareRuns) { return previewColumns.columns } - const hiddenColumnStyle = { - display: "none", - width: 0, - minWidth: 0, - maxWidth: 0, - padding: 0, - margin: 0, - border: "none", - visibility: "hidden", - position: "absolute", - left: "-9999px", - } as const - // Create synthetic "Run" column for export only (completely hidden in table) const runColumn = { key: "__run_type__", @@ -124,8 +105,6 @@ const EvalRunDetailsTable = ({ render: () => null, exportEnabled: true, exportLabel: "Run", - onHeaderCell: () => ({style: hiddenColumnStyle}), - onCell: () => ({style: hiddenColumnStyle}), } // Create synthetic "Run ID" column for export only (completely hidden in table) @@ -139,8 +118,6 @@ const EvalRunDetailsTable = ({ render: () => null, exportEnabled: true, exportLabel: "Run ID", - onHeaderCell: () => ({style: hiddenColumnStyle}), - onCell: () => ({style: hiddenColumnStyle}), } return [runColumn, runIdColumn, ...previewColumns.columns] @@ -306,21 +283,6 @@ const EvalRunDetailsTable = ({ [handleLoadMore, handleResetPages, mergedRows], ) - // Build timestamp color map for row grouping (only for online evaluations) - const timestampColorMap = useMemo(() => { - const map = new Map() - if (evaluationType !== "online") return map - - // Process rows in order to assign consistent colors - mergedRows.forEach((row) => { - if (row.timestamp && !map.has(row.timestamp)) { - const colorIndex = map.size % TIMESTAMP_GROUP_COLORS.length - map.set(row.timestamp, TIMESTAMP_GROUP_COLORS[colorIndex]) - } - }) - return map - }, [evaluationType, mergedRows]) - // Build group map for export label resolution const groupMap = useMemo(() => { return buildGroupMap(columnResult?.groups) @@ -851,17 +813,27 @@ const EvalRunDetailsTable = ({ resolveColumnLabel, filename: `${runDisplayName || runId}-scenarios.csv`, beforeExport: loadAllPagesBeforeExport, + columnsOverride: exportColumns, }), - [exportResolveValue, resolveColumnLabel, runId, runDisplayName, loadAllPagesBeforeExport], + [ + exportResolveValue, + resolveColumnLabel, + runId, + runDisplayName, + loadAllPagesBeforeExport, + exportColumns, + ], ) + const hasCompareRuns = compareSlots.some(Boolean) + return ( -
-
+
+
datasetStore={evaluationPreviewDatasetStore} tableScope={tableScope} - columns={columnsWithSyntheticColumns} + columns={previewColumns.columns} rowKey={(record) => record.key} tableClassName={clsx( "agenta-scenario-table", @@ -898,17 +870,13 @@ const EvalRunDetailsTable = ({ bordered: true, tableLayout: "fixed", onRow: (record) => { - // Determine background color: comparison color takes precedence, then timestamp grouping - let backgroundColor: string | undefined - if (record.compareIndex) { - backgroundColor = getComparisonColor(record.compareIndex) - } else if ( - evaluationType === "online" && - record.timestamp && - timestampColorMap.has(record.timestamp) - ) { - backgroundColor = timestampColorMap.get(record.timestamp) - } + const backgroundColor = hasCompareRuns + ? getComparisonColor( + typeof record.compareIndex === "number" + ? record.compareIndex + : 0, + ) + : "#fff" return { onClick: (event) => { diff --git a/web/oss/src/components/EvalRunDetails/atoms/annotations.ts b/web/oss/src/components/EvalRunDetails/atoms/annotations.ts index 0fc9df0800..2cc359c3ca 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/annotations.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/annotations.ts @@ -140,6 +140,7 @@ export const evaluationAnnotationQueryAtomFamily = atomFamily( gcTime: 5 * 60 * 1000, refetchOnWindowFocus: false, refetchOnReconnect: false, + structuralSharing: true, queryFn: async () => { if (!batcher) { throw new Error("Annotation batcher is not initialised") @@ -173,6 +174,7 @@ export const scenarioAnnotationsQueryAtomFamily = atomFamily( gcTime: 5 * 60 * 1000, refetchOnWindowFocus: false, refetchOnReconnect: false, + structuralSharing: true, queryFn: async () => { if (!batcher || uniqueTraceIds.length === 0) return [] const results = await Promise.all(uniqueTraceIds.map((id) => batcher(id))) diff --git a/web/oss/src/components/EvalRunDetails/atoms/compare.ts b/web/oss/src/components/EvalRunDetails/atoms/compare.ts index d86d1ecf79..281feefce6 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/compare.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/compare.ts @@ -168,7 +168,20 @@ export const deriveRunComparisonStructure = ({ } /** Terminal statuses that allow comparison */ -const TERMINAL_STATUSES = new Set(["success", "failure", "errors", "cancelled"]) +export const TERMINAL_STATUSES = new Set([ + "success", + "failure", + "failed", + "errors", + "cancelled", + "completed", + "finished", + "ok", + "evaluation_finished", + "evaluation_finished_with_errors", + "evaluation_failed", + "evaluation_aggregation_failed", +]) /** Check if a status is terminal (run has finished) */ export const isTerminalStatus = (status: string | undefined | null): boolean => { diff --git a/web/oss/src/components/EvalRunDetails/atoms/invocationTraceSummary.ts b/web/oss/src/components/EvalRunDetails/atoms/invocationTraceSummary.ts index d84d69917e..c61a1ac267 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/invocationTraceSummary.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/invocationTraceSummary.ts @@ -98,6 +98,55 @@ const collectMetricSources = (node?: TraceNode | null): any[] => { return sources } +/** + * Lightweight atom family that only checks if a scenario has an invocation. + * Much faster than invocationTraceSummaryAtomFamily as it doesn't extract metrics. + * Use this when you only need to know if an invocation exists. + */ +export const scenarioHasInvocationAtomFamily = atomFamily( + ({scenarioId, runId}: {scenarioId?: string; runId?: string}) => + atom((get) => { + if (!scenarioId) return false + + const effectiveRunId = runId ?? get(activePreviewRunIdAtom) ?? undefined + const runIndex = get( + evaluationRunIndexAtomFamily(effectiveRunId ? effectiveRunId : null), + ) + + const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId: effectiveRunId})) + if (stepsQuery.isLoading || stepsQuery.isFetching) { + return false // Treat loading as no invocation yet + } + + const allSteps = stepsQuery.data?.steps ?? [] + const candidateKeys: string[] = [] + if (runIndex) { + runIndex.invocationKeys.forEach((key) => { + if (!candidateKeys.includes(key)) candidateKeys.push(key) + }) + } + + // Quick check: does any invocation step exist with a traceId? + const invocationStep = candidateKeys + .map((key) => allSteps.find((step: any) => step?.stepKey === key)) + .find((step) => step) + + if (!invocationStep) return false + + // Check for traceId existence + const traceId = + invocationStep?.traceId || + invocationStep?.trace_id || + invocationStep?.trace?.tree?.id || + (Array.isArray(invocationStep?.trace?.trees) && + invocationStep.trace.trees[0]?.tree?.id) || + (Array.isArray(invocationStep?.trace?.nodes) && + invocationStep.trace.nodes[0]?.trace_id) + + return Boolean(traceId) + }), +) + export const invocationTraceSummaryAtomFamily = atomFamily( ({scenarioId, stepKey, runId}: {scenarioId?: string; stepKey?: string; runId?: string}) => atom((get) => { @@ -109,7 +158,10 @@ export const invocationTraceSummaryAtomFamily = atomFamily( ) const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId: effectiveRunId})) - if (stepsQuery.isLoading || stepsQuery.isFetching) { + // Stale-while-revalidate: only show loading when there's no data yet + // Don't show loading during background refetches (isFetching with existing data) + const hasStepsData = Boolean(stepsQuery.data) + if (!hasStepsData && (stepsQuery.isLoading || stepsQuery.isPending)) { return {state: "loading"} } diff --git a/web/oss/src/components/EvalRunDetails/atoms/metrics.ts b/web/oss/src/components/EvalRunDetails/atoms/metrics.ts index 172c60d211..cf39fca18a 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/metrics.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/metrics.ts @@ -789,6 +789,8 @@ export const evaluationMetricQueryAtomFamily = atomFamily( gcTime: 5 * 60 * 1000, refetchOnWindowFocus: false, refetchOnReconnect: false, + // Enable structural sharing to prevent unnecessary re-renders when data hasn't changed + structuralSharing: true, queryFn: async () => { if (!batcher) { throw new Error("Metric batcher is not initialised") @@ -844,11 +846,15 @@ export const scenarioMetricMetaAtomFamily = atomFamily( ({scenarioId, runId}: {scenarioId: string; runId?: string | null}) => selectAtom( evaluationMetricQueryAtomFamily({scenarioId, runId}), - (queryState) => ({ - isLoading: queryState.isLoading, - isFetching: queryState.isFetching, - error: queryState.error, - }), + (queryState) => { + // Stale-while-revalidate: only show loading when there's no cached data + const hasData = Boolean(queryState.data) + return { + isLoading: !hasData && queryState.isLoading, + isFetching: queryState.isFetching, + error: queryState.error, + } + }, (a, b) => a.isLoading === b.isLoading && a.isFetching === b.isFetching && a.error === b.error, ), diff --git a/web/oss/src/components/EvalRunDetails/atoms/query.ts b/web/oss/src/components/EvalRunDetails/atoms/query.ts index ca041c26cf..9bf88533d7 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/query.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/query.ts @@ -189,11 +189,11 @@ const normalizeQueryRevisionSnapshot = ( const hasLookupValue = (reference: EvaluationQueryReference) => Boolean( reference.queryRevisionId || - reference.queryRevisionSlug || - reference.queryVariantId || - reference.queryVariantSlug || - reference.queryId || - reference.querySlug, + reference.queryRevisionSlug || + reference.queryVariantId || + reference.queryVariantSlug || + reference.queryId || + reference.querySlug, ) type ReferenceDescriptor = diff --git a/web/oss/src/components/EvalRunDetails/atoms/scenarioColumnValues.ts b/web/oss/src/components/EvalRunDetails/atoms/scenarioColumnValues.ts index d234709c82..16d31343f4 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/scenarioColumnValues.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/scenarioColumnValues.ts @@ -19,6 +19,7 @@ import {evaluationAnnotationQueryAtomFamily} from "./annotations" import {scenarioMetricMetaAtomFamily, scenarioMetricValueAtomFamily} from "./metrics" import {activePreviewRunIdAtom} from "./run" import {scenarioStepsQueryFamily} from "./scenarioSteps" +import {scenarioTestcaseMetaAtomFamily, scenarioTestcaseValueAtomFamily} from "./scenarioTestcase" import type {EvaluationTableColumn} from "./table" import { columnValueDescriptorMapAtomFamily, @@ -27,7 +28,6 @@ import { type ColumnValueDescriptor, } from "./table/columnAccess" import {evaluationRunIndexAtomFamily} from "./table/run" -import {testcaseQueryMetaAtomFamily, testcaseValueAtomFamily} from "./table/testcases" import {traceQueryMetaAtomFamily, traceValueAtomFamily} from "./traces" export interface QueryState { @@ -299,11 +299,6 @@ const toTraceId = (step: IStepResponse | undefined) => { ) } -const toTestcaseId = (step: IStepResponse | undefined) => { - if (!step) return undefined - return (step as any)?.testcaseId || (step as any)?.testcase_id || undefined -} - /** * Extract step error if the step has status "failure" and an error object. * This is used to display evaluator errors in the UI. @@ -472,7 +467,11 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( const evalType = get(previewEvalTypeAtom) const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId})) - const stepsQueryLoading = stepsQuery.isLoading || stepsQuery.isPending + // Stale-while-revalidate: only show loading when there's no data yet + // Don't show loading during background refetches (isFetching with existing data) + const hasStepsData = Boolean(stepsQuery.data) + const stepsQueryLoading = + !hasStepsData && (stepsQuery.isLoading || stepsQuery.isPending) const baseSteps = stepsQuery.data?.steps ?? [] const runIndex = get(evaluationRunIndexAtomFamily(runId ?? null)) const derivedByKind = extractStepsByKind(baseSteps, runIndex) @@ -496,54 +495,103 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( error: undefined, } } + + // Get testcase entity and metadata for this scenario + const testcaseMeta = get(scenarioTestcaseMetaAtomFamily({scenarioId, runId})) + + // Primary source: testcase entity (when testcaseId exists) + if (testcaseMeta.hasTestcase) { + const valueFromTestcase = get( + scenarioTestcaseValueAtomFamily({scenarioId, runId, path: column.path}), + ) + + if (valueFromTestcase !== undefined) { + // Stale-while-revalidate: if we have a value, never show loading + return { + value: valueFromTestcase, + displayValue: valueFromTestcase, + isLoading: false, + isFetching: testcaseMeta.isFetching, + error: testcaseMeta.error, + } + } + + // If testcase exists but value not found at path, only show loading on initial load + // (when isLoading is true and we haven't found any value yet) + if (testcaseMeta.isLoading) { + return { + value: undefined, + displayValue: undefined, + isLoading: true, + isFetching: testcaseMeta.isFetching, + error: undefined, + } + } + } + + // Fallback for online evaluations: step data or trace data const targetStep = pickStep(inputs.length ? inputs : steps, column.stepKey) - const testcaseId = toTestcaseId(targetStep) const pathSegments = descriptor.pathSegments - const traceId = toTraceId(targetStep) - const testcaseMeta = testcaseId - ? get(testcaseQueryMetaAtomFamily({testcaseId, runId})) - : null - const valueFromTestcase = testcaseId - ? get(testcaseValueAtomFamily({testcaseId, path: column.path, runId})) - : undefined - const stepValue = resolveInputStepValueByPath(targetStep, pathSegments) - const traceCandidates: {path: string; valueKey?: string}[] = [ - {path: column.path, valueKey: column.valueKey}, - ] - if (column.path.endsWith(".inputs")) { - traceCandidates.push({ - path: column.path.slice(0, -".inputs".length), - valueKey: column.valueKey, - }) + // Try step's embedded inputs first + const stepValue = resolveInputStepValueByPath(targetStep, pathSegments) + if (stepValue !== undefined) { + return { + value: stepValue, + displayValue: stepValue, + isLoading: false, + isFetching: false, + error: undefined, + } } - let localTraceValue: unknown = undefined + // Try local trace data const localTrace = (targetStep as any)?.trace if (localTrace) { + const traceCandidates: {path: string; valueKey?: string}[] = [ + {path: column.path, valueKey: column.valueKey}, + ] + if (column.path.endsWith(".inputs")) { + traceCandidates.push({ + path: column.path.slice(0, -".inputs".length), + valueKey: column.valueKey, + }) + } + for (const candidate of traceCandidates) { - localTraceValue = resolveInvocationTraceValue( + const localTraceValue = resolveInvocationTraceValue( localTrace, candidate.path, candidate.valueKey, ) - if (localTraceValue !== undefined) break + if (localTraceValue !== undefined) { + return { + value: localTraceValue, + displayValue: localTraceValue, + isLoading: false, + isFetching: false, + error: undefined, + } + } } } - let traceMeta: {isLoading?: boolean; isFetching?: boolean; error?: unknown} | null = - null - let remoteTraceValue: unknown = undefined - const shouldFetchRemoteTrace = - traceId && - valueFromTestcase === undefined && - stepValue === undefined && - localTraceValue === undefined - - if (shouldFetchRemoteTrace && traceId) { - traceMeta = get(traceQueryMetaAtomFamily({traceId, runId})) ?? null + // Last resort: fetch from remote trace + const traceId = toTraceId(targetStep) + if (traceId) { + const traceMeta = get(traceQueryMetaAtomFamily({traceId, runId})) + const traceCandidates: {path: string; valueKey?: string}[] = [ + {path: column.path, valueKey: column.valueKey}, + ] + if (column.path.endsWith(".inputs")) { + traceCandidates.push({ + path: column.path.slice(0, -".inputs".length), + valueKey: column.valueKey, + }) + } + for (const candidate of traceCandidates) { - const candidateValue = get( + const remoteTraceValue = get( traceValueAtomFamily({ traceId, path: candidate.path, @@ -551,78 +599,55 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( runId, }), ) - if (candidateValue !== undefined) { - remoteTraceValue = candidateValue - break + if (remoteTraceValue !== undefined) { + return { + value: remoteTraceValue, + displayValue: remoteTraceValue, + isLoading: false, + isFetching: false, + error: undefined, + } } } - } - const value = - valueFromTestcase ?? - stepValue ?? - localTraceValue ?? - remoteTraceValue ?? - undefined + // Still loading trace data + if (traceMeta?.isLoading) { + return { + value: undefined, + displayValue: undefined, + isLoading: true, + isFetching: traceMeta.isFetching ?? false, + error: undefined, + } + } - if ( - (value === undefined || value === null) && - !stepsQueryLoading && - !stepsQuery.isLoading && - !(testcaseMeta?.isLoading ?? false) - ) { + return { + value: undefined, + displayValue: undefined, + isLoading: false, + isFetching: false, + error: traceMeta?.error, + } + } + + // No data source available + if (!stepsQueryLoading && !testcaseMeta.isLoading) { debugScenarioValue("Input column resolved empty value", { scenarioId, runId, columnId: column.id, path: column.path, stepKey: column.stepKey, - hasTargetStep: Boolean(targetStep), - hasTestcaseData: Boolean(valueFromTestcase), + hasTestcase: testcaseMeta.hasTestcase, }) } return { - value, - displayValue: value, - isLoading: - !scenarioId || - stepsQueryLoading || - Boolean(stepsQuery.isLoading) || - Boolean( - testcaseMeta?.isLoading && - valueFromTestcase === undefined && - stepValue === undefined && - localTraceValue === undefined && - remoteTraceValue === undefined, - ) || - Boolean( - traceMeta?.isLoading && - remoteTraceValue === undefined && - valueFromTestcase === undefined && - stepValue === undefined && - localTraceValue === undefined, - ), - isFetching: - Boolean(stepsQuery.isFetching) || - Boolean( - testcaseMeta?.isFetching && - valueFromTestcase === undefined && - stepValue === undefined && - localTraceValue === undefined && - remoteTraceValue === undefined, - ) || - Boolean( - traceMeta?.isFetching && - remoteTraceValue === undefined && - valueFromTestcase === undefined && - stepValue === undefined && - localTraceValue === undefined, - ), - error: - valueFromTestcase !== undefined - ? testcaseMeta?.error - : (traceMeta?.error ?? testcaseMeta?.error), + value: undefined, + displayValue: undefined, + isLoading: stepsQueryLoading || testcaseMeta.isLoading, + isFetching: Boolean(stepsQuery.isFetching) || testcaseMeta.isFetching, + error: testcaseMeta.error, } } @@ -766,27 +791,20 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( }) } + // Stale-while-revalidate: only show loading if we have no value yet + const hasValue = + value !== undefined || + scenarioInvocationValue !== undefined || + traceValue !== undefined || + fallbackValue !== undefined return { value, displayValue: value, isLoading: !scenarioId || - stepsQueryLoading || - Boolean(stepsQuery.isLoading) || - Boolean( - traceMeta?.isLoading && - scenarioInvocationValue === undefined && - traceValue === undefined && - fallbackValue === undefined, - ), - isFetching: - Boolean(stepsQuery.isFetching) || - Boolean( - traceMeta?.isFetching && - scenarioInvocationValue === undefined && - traceValue === undefined && - fallbackValue === undefined, - ), + (!hasValue && stepsQueryLoading) || + (!hasValue && Boolean(traceMeta?.isLoading)), + isFetching: Boolean(stepsQuery.isFetching) || Boolean(traceMeta?.isFetching), error: traceMeta?.error, } } @@ -891,10 +909,12 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( // String metrics don't store values, so we need to fall back to annotation data const isPlaceholder = isStringTypePlaceholder(metricValue) + // Stale-while-revalidate: only show loading if we have no metric value yet + const hasMetricValue = metricValue !== undefined && !isPlaceholder metricCandidate = { value: isPlaceholder ? undefined : metricValue, displayValue: isPlaceholder ? undefined : metricDisplayValue, - isLoading: metricMeta.isLoading, + isLoading: !hasMetricValue && metricMeta.isLoading, isFetching: metricMeta.isFetching, error: metricMeta.error, resolvedStepKey: resolvedMetricStepKey, @@ -1121,25 +1141,19 @@ const scenarioColumnValueBaseAtomFamily = atomFamily( }) } + // Stale-while-revalidate: only show loading if we have no value yet + const hasAnnotationValue = + value !== undefined || + valueFromAnnotation !== undefined || + fallbackValue !== undefined return { value, displayValue, isLoading: !scenarioId || - stepsQueryLoading || - Boolean(stepsQuery.isLoading) || - Boolean( - annotationQuery.isLoading && - valueFromAnnotation === undefined && - fallbackValue === undefined, - ), + (!hasAnnotationValue && (stepsQueryLoading || annotationQuery.isLoading)), isFetching: - Boolean(stepsQuery.isFetching) || - Boolean( - annotationQuery.isFetching && - valueFromAnnotation === undefined && - fallbackValue === undefined, - ), + Boolean(stepsQuery.isFetching) || Boolean(annotationQuery.isFetching), error: annotationQuery.error, } } diff --git a/web/oss/src/components/EvalRunDetails/atoms/scenarioSteps.ts b/web/oss/src/components/EvalRunDetails/atoms/scenarioSteps.ts index bae9c475fa..566ddb321d 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/scenarioSteps.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/scenarioSteps.ts @@ -135,6 +135,8 @@ export const scenarioStepsQueryFamily = atomFamily( refetchOnReconnect: false, staleTime: 30_000, gcTime: 5 * 60 * 1000, + // Enable structural sharing to prevent unnecessary re-renders when data hasn't changed + structuralSharing: true, queryFn: async () => { if (!batcher) { throw new Error("Scenario steps batcher is not initialised") diff --git a/web/oss/src/components/EvalRunDetails/atoms/scenarioTestcase.ts b/web/oss/src/components/EvalRunDetails/atoms/scenarioTestcase.ts new file mode 100644 index 0000000000..b50b6d2db9 --- /dev/null +++ b/web/oss/src/components/EvalRunDetails/atoms/scenarioTestcase.ts @@ -0,0 +1,193 @@ +/** + * Scenario-level testcase entity atoms + * + * This module provides atoms to fetch and access testcase entities for evaluation scenarios. + * It uses the existing testcase entity system from state/entities/testcase for consistency. + * + * The key insight is that each scenario has at most one testcase (from input steps), + * so we fetch it once per scenario and all input columns read from it. + */ + +import {atom} from "jotai" +import {atomFamily, selectAtom} from "jotai/utils" + +import {testcase} from "@/oss/state/entities/testcase" +import type {FlattenedTestcase} from "@/oss/state/entities/testcase/schema" +import {testcaseQueryAtomFamily} from "@/oss/state/entities/testcase/testcaseEntity" + +import {activePreviewRunIdAtom} from "./run" +import {scenarioStepsQueryFamily} from "./scenarioSteps" + +/** + * Extract testcaseId from scenario steps + * Looks for testcaseId in input steps first, then falls back to any step with testcaseId + */ +const extractTestcaseIdFromSteps = (steps: any[]): string | undefined => { + if (!steps?.length) return undefined + + // First, try to find testcaseId from input steps + const inputStepKeys = new Set(["input", "inputs", "testcase", "data"]) + for (const step of steps) { + const stepKey = step?.stepKey ?? step?.step_key ?? step?.key ?? "" + if (inputStepKeys.has(stepKey.toLowerCase())) { + const testcaseId = step?.testcaseId ?? step?.testcase_id + if (testcaseId) return testcaseId + } + } + + // Fallback: check any step for testcaseId + for (const step of steps) { + const testcaseId = step?.testcaseId ?? step?.testcase_id + if (testcaseId) return testcaseId + } + + return undefined +} + +/** + * Atom family that extracts the testcaseId for a scenario from its steps + */ +export const scenarioTestcaseIdAtomFamily = atomFamily( + ({scenarioId, runId}: {scenarioId: string; runId?: string | null}) => + atom((get): string | undefined => { + const effectiveRunId = runId ?? get(activePreviewRunIdAtom) ?? undefined + const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId: effectiveRunId})) + const steps = stepsQuery.data?.steps ?? [] + return extractTestcaseIdFromSteps(steps) + }), +) + +/** + * Atom family that provides the testcase entity for a scenario + * Uses the global testcase entity system for consistency and caching + * + * Returns null if: + * - No testcaseId found in scenario steps + * - Testcase entity not yet loaded + * - Testcase doesn't exist + */ +export const scenarioTestcaseEntityAtomFamily = atomFamily( + ({scenarioId, runId}: {scenarioId: string; runId?: string | null}) => + atom((get): FlattenedTestcase | null => { + const testcaseId = get(scenarioTestcaseIdAtomFamily({scenarioId, runId})) + if (!testcaseId) return null + + // Use the global testcase entity atom for caching and consistency + return get(testcase.selectors.data(testcaseId)) + }), +) + +/** + * Atom family that provides loading/error state for scenario testcase + */ +export const scenarioTestcaseMetaAtomFamily = atomFamily( + ({scenarioId, runId}: {scenarioId: string; runId?: string | null}) => + atom((get) => { + const effectiveRunId = runId ?? get(activePreviewRunIdAtom) ?? undefined + + // Check if steps are still loading (stale-while-revalidate: only if no data yet) + const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId: effectiveRunId})) + const hasStepsData = Boolean(stepsQuery.data) + if (!hasStepsData && (stepsQuery.isLoading || stepsQuery.isPending)) { + return { + isLoading: true, + isFetching: stepsQuery.isFetching ?? false, + error: undefined, + hasTestcase: false, + } + } + + const testcaseId = get(scenarioTestcaseIdAtomFamily({scenarioId, runId})) + if (!testcaseId) { + return { + isLoading: false, + isFetching: false, + error: undefined, + hasTestcase: false, + } + } + + // Check testcase query state (stale-while-revalidate: only loading if no data) + const testcaseQuery = get(testcaseQueryAtomFamily(testcaseId)) + const hasTestcaseData = Boolean(testcaseQuery.data) + return { + isLoading: !hasTestcaseData && (testcaseQuery.isLoading ?? false), + isFetching: testcaseQuery.isFetching ?? false, + error: testcaseQuery.error, + hasTestcase: true, + } + }), +) + +/** + * Atom family to get a specific value from the scenario's testcase entity + * Uses path-based access similar to testcaseCellAtomFamily + * + * @param scenarioId - The scenario ID + * @param runId - Optional run ID + * @param path - Dot-separated path to the value (e.g., "data.input", "question") + */ +export const scenarioTestcaseValueAtomFamily = atomFamily( + ({scenarioId, runId, path}: {scenarioId: string; runId?: string | null; path: string}) => + selectAtom( + scenarioTestcaseEntityAtomFamily({scenarioId, runId}), + (entity): unknown => { + if (!entity) return undefined + + // Split path and resolve value + const segments = path.split(".").filter(Boolean) + let current: unknown = entity + + for (const segment of segments) { + if (current === null || current === undefined) return undefined + if (typeof current !== "object") return undefined + + // Handle "data" prefix - testcase data is flattened, so skip "data" segment + if (segment === "data" && segments[0] === "data") { + // Data is already flattened into entity, continue to next segment + continue + } + + current = (current as Record)[segment] + } + + return current + }, + // Use deep equality for complex values + (a, b) => { + if (a === b) return true + if (a === undefined || b === undefined) return a === b + if (typeof a !== typeof b) return false + if (typeof a === "object" && a !== null && b !== null) { + try { + return JSON.stringify(a) === JSON.stringify(b) + } catch { + return false + } + } + return a === b + }, + ), +) + +/** + * Check if a scenario has embedded input data in steps (for online evaluations) + * Online evaluations may not have testcaseId but have inputs directly in steps + */ +export const scenarioHasEmbeddedInputsAtomFamily = atomFamily( + ({scenarioId, runId}: {scenarioId: string; runId?: string | null}) => + atom((get): boolean => { + const effectiveRunId = runId ?? get(activePreviewRunIdAtom) ?? undefined + const stepsQuery = get(scenarioStepsQueryFamily({scenarioId, runId: effectiveRunId})) + const steps = stepsQuery.data?.steps ?? [] + + // Check if any step has embedded inputs + for (const step of steps) { + if (step?.inputs && Object.keys(step.inputs).length > 0) { + return true + } + } + + return false + }), +) diff --git a/web/oss/src/components/EvalRunDetails/atoms/table/run.ts b/web/oss/src/components/EvalRunDetails/atoms/table/run.ts index abddb442c3..29e4d7624b 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/table/run.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/table/run.ts @@ -4,10 +4,14 @@ import {atomWithQuery} from "jotai-tanstack-query" import axios from "@/oss/lib/api/assets/axiosConfig" import {buildRunIndex} from "@/oss/lib/evaluations/buildRunIndex" import {snakeToCamelCaseKeys} from "@/oss/lib/helpers/casing" +import { + getPreviewRunBatcher, + invalidatePreviewRunCache, +} from "@/oss/lib/hooks/usePreviewEvaluations/assets/previewRunBatcher" +import {TERMINAL_STATUSES} from "../compare" import {effectiveProjectIdAtom} from "../run" -import {getPreviewRunBatcher} from "@/agenta-oss-common/lib/hooks/usePreviewEvaluations/assets/previewRunBatcher" import type {EvaluationRun} from "@/agenta-oss-common/lib/hooks/usePreviewEvaluations/types" export interface EvaluationRunQueryResult { @@ -16,6 +20,11 @@ export interface EvaluationRunQueryResult { runIndex: ReturnType } +const isTerminalStatus = (status: string | null | undefined) => { + if (!status) return false + return TERMINAL_STATUSES.has(status.toLowerCase()) +} + const patchedRunRevisionSet = new Set() const buildRevisionPayload = (references: Record | undefined) => { @@ -309,6 +318,11 @@ export const evaluationRunQueryAtomFamily = atomFamily((runId: string | null) => gcTime: 5 * 60 * 1000, refetchOnWindowFocus: false, refetchOnReconnect: false, + refetchInterval: (query) => { + const status = + query.state.data?.rawRun?.status ?? query.state.data?.camelRun?.status + return isTerminalStatus(status) ? false : 5000 + }, queryFn: async () => { if (!runId) { throw new Error("evaluationRunQueryAtomFamily requires a run id") @@ -317,6 +331,7 @@ export const evaluationRunQueryAtomFamily = atomFamily((runId: string | null) => throw new Error("evaluationRunQueryAtomFamily requires a project id") } + invalidatePreviewRunCache(projectId, runId) const batcher = getPreviewRunBatcher() const rawRun = await batcher({projectId, runId}) if (!rawRun) { diff --git a/web/oss/src/components/EvalRunDetails/atoms/table/testcases.ts b/web/oss/src/components/EvalRunDetails/atoms/table/testcases.ts index 2b5c99960d..59eb685dad 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/table/testcases.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/table/testcases.ts @@ -108,6 +108,7 @@ export const evaluationTestcaseQueryAtomFamily = atomFamily( gcTime: 5 * 60 * 1000, refetchOnWindowFocus: false, refetchOnReconnect: false, + structuralSharing: true, queryFn: async () => { if (!batcher) { throw new Error("Testcase batcher is not initialised") diff --git a/web/oss/src/components/EvalRunDetails/atoms/table/types.ts b/web/oss/src/components/EvalRunDetails/atoms/table/types.ts index bfb772cd22..74d0dda0aa 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/table/types.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/table/types.ts @@ -112,6 +112,8 @@ export interface EvaluationScenarioRow { export interface WindowingState { next: string | null + oldest?: string | null + newest?: string | null stop?: string | null order?: string | null limit?: number | null diff --git a/web/oss/src/components/EvalRunDetails/atoms/traces.ts b/web/oss/src/components/EvalRunDetails/atoms/traces.ts index d08da41011..e2561fcd4b 100644 --- a/web/oss/src/components/EvalRunDetails/atoms/traces.ts +++ b/web/oss/src/components/EvalRunDetails/atoms/traces.ts @@ -1,31 +1,18 @@ -import {atom} from "jotai" import {atomFamily, selectAtom} from "jotai/utils" -import {atomWithQuery} from "jotai-tanstack-query" -import axios from "@/oss/lib/api/assets/axiosConfig" import type {TraceData, TraceNode, TraceTree} from "@/oss/lib/evaluations" import {uuidToTraceId} from "@/oss/lib/traces/helpers" import {transformTracesResponseToTree} from "@/oss/services/tracing/lib/helpers" import type {TraceSpanNode, TracesResponse} from "@/oss/services/tracing/types" -import {getProjectValues} from "@/oss/state/project" -import createBatchFetcher, {BatchFetcher} from "@/oss/state/utils/createBatchFetcher" +import {traceEntityAtomFamily, invalidateTraceEntityCache} from "@/oss/state/entities/trace/store" import {resolveInvocationTraceValue} from "../utils/traceValue" -import {activePreviewRunIdAtom, effectiveProjectIdAtom} from "./run" - -const traceBatcherCache = new Map>() - /** * Invalidate the trace batcher cache. - * Call this after running an invocation to force a fresh fetch of trace data. + * Now delegates to the shared trace entity cache invalidation. */ -export const invalidateTraceBatcherCache = () => { - traceBatcherCache.clear() -} - -const resolveEffectiveRunId = (get: any, runId?: string | null) => - runId ?? get(activePreviewRunIdAtom) ?? undefined +export const invalidateTraceBatcherCache = invalidateTraceEntityCache const _debugTraceValue = (() => { const enabled = process.env.NEXT_PUBLIC_EVAL_RUN_DEBUG === "true" @@ -196,113 +183,51 @@ const buildTraceDataFromEntry = ( return traceData } -export const evaluationTraceBatcherFamily = atomFamily(({runId}: {runId?: string | null} = {}) => - atom((get) => { - const effectiveRunId = resolveEffectiveRunId(get, runId) - const {projectId: globalProjectId} = getProjectValues() - const projectId = globalProjectId ?? get(effectiveProjectIdAtom) - if (!projectId) return null - - const cacheKey = `${projectId}:${effectiveRunId ?? "preview"}` - let batcher = traceBatcherCache.get(cacheKey) - if (!batcher) { - traceBatcherCache.clear() - batcher = createBatchFetcher({ - serializeKey: (key) => key, - batchFn: async (traceIds) => { - const unique = Array.from(new Set(traceIds.filter(Boolean))) - if (!unique.length) { - return {} - } - - const canonicalPairs = unique.map((id) => ({ - original: id, - canonical: uuidToTraceId(id) ?? id.replace(/-/g, ""), - })) - - const response = await axios.post( - `/preview/tracing/spans/query`, - { - focus: "trace", - format: "agenta", - filter: { - conditions: [ - { - field: "trace_id", - operator: "in", - value: canonicalPairs.map((pair) => pair.canonical), - }, - ], - }, - }, - { - params: { - project_id: projectId, - }, - }, - ) - - const traces = response.data?.traces ?? {} - const version = response.data?.version - const result: Record = Object.create(null) - - unique.forEach((originalId) => { - const pair = canonicalPairs.find( - (entry) => entry.original === originalId, - ) ?? { - original: originalId, - canonical: uuidToTraceId(originalId) ?? originalId.replace(/-/g, ""), - } - const entry = - traces?.[pair.canonical] ?? - traces?.[originalId] ?? - traces?.[originalId.replace(/-/g, "")] ?? - undefined - const traceData = buildTraceDataFromEntry( - pair.canonical, - originalId, - entry, - version, - ) - result[originalId] = traceData - }) +/** + * Transforms raw trace entity response to TraceData format used by evaluation components. + * This bridges the gap between traceEntityAtomFamily (raw API response) and + * the TraceData format expected by evaluation atoms. + */ +const transformToTraceData = ( + traceId: string, + response: {traces?: Record}>} | null, +): TraceData | null => { + if (!response?.traces) return null - return result - }, - }) - traceBatcherCache.set(cacheKey, batcher) - } + // Find the trace entry - try with and without dashes + const canonicalId = uuidToTraceId(traceId) ?? traceId.replace(/-/g, "") + const traceEntry = response.traces[canonicalId] ?? response.traces[traceId] - return batcher - }), -) + if (!traceEntry) return null -export const evaluationTraceBatcherAtom = atom((get) => get(evaluationTraceBatcherFamily())) + return buildTraceDataFromEntry(canonicalId, traceId, traceEntry as any, undefined) +} +/** + * Evaluation trace query atom family - uses the shared traceEntityAtomFamily + * and transforms the response to TraceData format for evaluation components. + */ export const evaluationTraceQueryAtomFamily = atomFamily( - ({traceId, runId}: {traceId: string; runId?: string | null}) => - atomWithQuery((get) => { - const batcher = get(evaluationTraceBatcherFamily({runId})) - const {projectId: globalProjectId} = getProjectValues() - const projectId = globalProjectId ?? get(effectiveProjectIdAtom) - const effectiveRunId = resolveEffectiveRunId(get, runId) - - return { - queryKey: ["preview", "evaluation-trace", effectiveRunId, projectId, traceId], - enabled: Boolean(projectId && batcher && traceId), - staleTime: 30_000, - gcTime: 5 * 60 * 1000, - refetchOnWindowFocus: false, - refetchOnReconnect: false, - queryFn: async () => { - if (!batcher) { - throw new Error("Trace batcher is not initialised") - } - const value = await batcher(traceId) - return value ?? null - }, - } - }), + ({traceId, runId: _runId}: {traceId: string; runId?: string | null}) => + selectAtom( + traceEntityAtomFamily(traceId), + (queryState) => { + const data = queryState.data + ? transformToTraceData(traceId, queryState.data as any) + : null + return { + data, + isLoading: !queryState.data && queryState.isLoading, + isFetching: queryState.isFetching, + error: queryState.error, + } + }, + (a, b) => + a.data === b.data && + a.isLoading === b.isLoading && + a.isFetching === b.isFetching && + a.error === b.error, + ), ) export const traceValueAtomFamily = atomFamily( @@ -353,11 +278,15 @@ export const traceQueryMetaAtomFamily = atomFamily( ({traceId, runId}: {traceId: string; runId?: string | null}) => selectAtom( evaluationTraceQueryAtomFamily({traceId, runId}), - (queryState) => ({ - isLoading: queryState.isLoading, - isFetching: queryState.isFetching, - error: queryState.error, - }), + (queryState) => { + // Stale-while-revalidate: only show loading when there's no cached data + const hasData = Boolean(queryState.data) + return { + isLoading: !hasData && queryState.isLoading, + isFetching: queryState.isFetching, + error: queryState.error, + } + }, (a, b) => a.isLoading === b.isLoading && a.isFetching === b.isFetching && a.error === b.error, ), diff --git a/web/oss/src/components/EvalRunDetails/components/CompareRunsMenu.tsx b/web/oss/src/components/EvalRunDetails/components/CompareRunsMenu.tsx index 21759f1631..781e18a2c2 100644 --- a/web/oss/src/components/EvalRunDetails/components/CompareRunsMenu.tsx +++ b/web/oss/src/components/EvalRunDetails/components/CompareRunsMenu.tsx @@ -1,9 +1,12 @@ import {memo, useCallback, useEffect, useMemo, useState} from "react" import {Button, Checkbox, Input, List, Popover, Space, Tag, Tooltip, Typography} from "antd" +import clsx from "clsx" import {useAtomValue, useSetAtom} from "jotai" +import Image from "next/image" import {message} from "@/oss/components/AppMessageContext" +import EmptyComponent from "@/oss/components/Placeholders/EmptyComponent" import ReferenceTag from "@/oss/components/References/ReferenceTag" import axios from "@/oss/lib/api/assets/axiosConfig" import dayjs from "@/oss/lib/helpers/dateTimeHelper/dayjs" @@ -104,9 +107,11 @@ const CompareRunsMenu = ({runId}: CompareRunsMenuProps) => { { - const ids = new Set() - candidates.forEach((candidate) => { - candidate.structure.testsetIds.forEach((id) => ids.add(id)) - }) - return Array.from(ids) - }, [candidates]) - const candidateTestsetNameMap = useTestsetNameMap(candidateTestsetIds) - const filteredCandidates = useMemo(() => { const query = searchTerm.trim().toLowerCase() return candidates.filter((candidate) => { @@ -211,6 +207,10 @@ const CompareRunsPopoverContent = memo(({runId, availability}: CompareRunsPopove }) }, [candidates, searchTerm, statusFilter]) + const hasLoadedRuns = Boolean((swrData as any)?.data) + const showLoading = Boolean(swrData.isLoading && !hasLoadedRuns) + const showEmptyState = !showLoading && filteredCandidates.length === 0 + const handleToggle = useCallback( (targetId: string) => { setCompareIds((prev) => { @@ -228,167 +228,172 @@ const CompareRunsPopoverContent = memo(({runId, availability}: CompareRunsPopove [setCompareIds], ) - const handleRemove = useCallback( - (targetId: string) => { - setCompareIds((prev) => prev.filter((id) => id !== targetId)) - }, - [setCompareIds], - ) - const handleClearAll = useCallback(() => { setCompareIds([]) }, [setCompareIds]) - const selectedDetails = useMemo(() => { - const map = new Map() - candidates.forEach((candidate) => { - map.set(candidate.id, candidate) - }) - return compareIds.map( - (id) => - map.get(id) ?? { - id, - name: id, - status: undefined, - createdAt: undefined, - testsetNames: [], - structure: {testsetIds: [], hasQueryInput: false, inputStepCount: 0}, - }, - ) - }, [candidates, compareIds]) - return ( - -
- - {availability.testsetIds.length ? ( - - {availability.testsetIds.map((id) => { - const label = matchingTestsetNameMap[id] ?? id - const copyValue = id - const href = buildTestsetHref(id) - - return ( - - ) - })} - - ) : null} - -
- -
- - Selected {compareIds.length}/{MAX_COMPARISON_RUNS} - -
- {selectedDetails.map((run) => ( - { - event.preventDefault() - handleRemove(run.id) - }} - > - {run.name} - - ))} + +
+
+
+ Testset: + {availability.testsetIds.length ? ( +
+ {availability.testsetIds.map((id) => { + const label = matchingTestsetNameMap[id] ?? id + const copyValue = id + const href = buildTestsetHref(id) + + return ( + + ) + })} +
+ ) : ( + + )} +
+ + + Selected: {compareIds.length}/{MAX_COMPARISON_RUNS} + + {compareIds.length ? ( + + ) : null} +
- {compareIds.length ? ( - - ) : null} + + setSearchTerm(event.target.value)} + bordered={false} + /> + +
- setSearchTerm(event.target.value)} - /> - - - - { - const isChecked = compareIds.includes(item.id) - const createdLabel = item.createdAt - ? dayjs(item.createdAt).format("DD MMM YYYY") - : "" - const _resolvedTestsetNames = - item.testsetNames.length > 0 - ? item.testsetNames - : item.structure.testsetIds - .map((id) => candidateTestsetNameMap[id]) - .filter((name): name is string => Boolean(name)) - return ( - handleToggle(item.id)} - className="compare-run-row flex flex-col !items-start justify-start" - > -
- event.stopPropagation()} - onChange={(event) => { - event.stopPropagation() - handleToggle(item.id) - }} - > -
- {item.name} - - {item.description?.trim() - ? item.description - : "No description"} - -
-
- - - {item.status ? : null} - {createdLabel ? ( - - {createdLabel} - - ) : null} - + {showLoading ? ( +
+ Loading evaluations... +
+ ) : showEmptyState ? ( +
+ + } + description={ +
+
+ No evaluations to compare +
+
+ Run another evaluation using the same test set to enable + comparison. +
- - ) - }} - /> + } + /> +
+ ) : ( + { + const isChecked = compareIds.includes(item.id) + const createdLabel = item.createdAt + ? dayjs(item.createdAt).format("DD MMM YYYY") + : "" + + return ( + handleToggle(item.id)} + className={clsx( + "compare-run-row flex flex-col !items-start justify-start", + "!py-1 !px-2", + "border-b border-[#EAEFF5]", + "last:border-b-0", + isChecked && "compare-run-row--selected", + )} + style={{borderBottomStyle: "solid"}} + > +
+ event.stopPropagation()} + onChange={(event) => { + event.stopPropagation() + handleToggle(item.id) + }} + > +
+ {item.name} + + {item.description?.trim() + ? item.description + : "No description"} + +
+
+ + + {item.status ? : null} + {createdLabel ? ( + + {createdLabel} + + ) : null} + +
+
+ ) + }} + /> + )} ) }) @@ -457,8 +462,8 @@ const TestsetReferenceTag = ({ label={label} copyValue={copyValue} href={href} - tone="testset" className="max-w-[200px]" + showIcon={false} /> ) diff --git a/web/oss/src/components/EvalRunDetails/components/EvaluationRunTag.tsx b/web/oss/src/components/EvalRunDetails/components/EvaluationRunTag.tsx new file mode 100644 index 0000000000..4871c2cd10 --- /dev/null +++ b/web/oss/src/components/EvalRunDetails/components/EvaluationRunTag.tsx @@ -0,0 +1,64 @@ +import {ReactNode} from "react" + +import {PushpinFilled} from "@ant-design/icons" +import {Tag} from "antd" +import clsx from "clsx" + +import {getComparisonColor, getComparisonSolidColor} from "../atoms/compare" + +interface EvaluationRunTagProps { + label: string + compareIndex?: number + isBaseRun?: boolean + closable?: boolean + closeIcon?: ReactNode + onClose?: (event: React.MouseEvent) => void + className?: string +} + +const EvaluationRunTag = ({ + label, + compareIndex, + isBaseRun, + closable, + closeIcon, + onClose, + className, +}: EvaluationRunTagProps) => { + const resolvedCompareIndex = compareIndex ?? 0 + const resolvedIsBaseRun = isBaseRun ?? resolvedCompareIndex === 0 + const tagColor = getComparisonSolidColor(resolvedCompareIndex) + const tagBg = getComparisonColor(resolvedCompareIndex) + + return ( + + ) : undefined + } + closable={closable} + closeIcon={closeIcon} + onClose={onClose} + > + + {label} + + + ) +} + +export default EvaluationRunTag diff --git a/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsChart/index.tsx b/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsChart/index.tsx index a822f96bc7..bf99328933 100644 --- a/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsChart/index.tsx +++ b/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsChart/index.tsx @@ -41,6 +41,38 @@ interface EvaluatorLabelProps { fallbackLabel: string } +type MetricDeltaTone = "positive" | "negative" | "neutral" + +interface MetricStripEntry { + key: string + label: string + color: string + value: number | null + displayValue: string + isMain: boolean + deltaText: string + deltaTone: MetricDeltaTone +} + +const getMainEvaluatorSeries = (entries: MetricStripEntry[]) => + entries.find((entry) => entry.isMain) ?? entries[0] + +const computeDeltaPercent = (current: number | null, baseline: number | null) => { + if (typeof current !== "number" || typeof baseline !== "number") return null + if (!Number.isFinite(current) || !Number.isFinite(baseline) || baseline === 0) return null + return ((current - baseline) / baseline) * 100 +} + +const formatDelta = (delta: number | null): {text: string; tone: MetricDeltaTone} => { + if (delta === null || !Number.isFinite(delta)) { + return {text: "-", tone: "neutral"} + } + const rounded = Math.round(delta) + if (rounded > 0) return {text: `+${rounded}%`, tone: "positive"} + if (rounded < 0) return {text: `${rounded}%`, tone: "negative"} + return {text: "0%", tone: "neutral"} +} + const EvaluatorMetricsChartTitle = memo( ({runId, evaluatorRef, fallbackLabel}: EvaluatorLabelProps) => { const evaluatorAtom = useMemo( @@ -109,9 +141,10 @@ const EvaluatorMetricsChart = ({ return {} as BasicStats }, [resolvedStats]) - const {data: numericHistogramData} = useMemo(() => { - return buildHistogramChartData(stats as unknown as Record) - }, [stats]) + const {data: numericHistogramData} = useMemo( + () => buildHistogramChartData(stats as unknown as Record), + [stats], + ) const hasNumericHistogram = numericHistogramData.length > 0 const categoricalFrequencyData = useMemo( @@ -243,23 +276,209 @@ const EvaluatorMetricsChart = ({ (isBooleanMetric && booleanChartData.length > 0) || hasCategoricalFrequency - const summaryValue = useMemo((): string | null => { - if (isBooleanMetric) { - const percentage = booleanHistogram.percentages.true - return Number.isFinite(percentage) ? `${percentage.toFixed(2)}%` : "—" - } - if (hasCategoricalFrequency && categoricalFrequencyData.length) { - return null + const comparisonBooleanPercentMap = useMemo(() => { + const map = new Map() + comparisonBooleanHistograms.forEach((entry) => { + if (Number.isFinite(entry.histogram.percentages.true)) { + map.set(entry.runId, entry.histogram.percentages.true) + } + }) + return map + }, [comparisonBooleanHistograms]) + + const summaryItems = useMemo(() => { + const baseValue = (() => { + if (!resolvedStats) return {value: null, displayValue: "—"} + if (isBooleanMetric) { + const percentage = booleanHistogram.percentages.true + return Number.isFinite(percentage) + ? {value: percentage, displayValue: `${percentage.toFixed(2)}%`} + : {value: null, displayValue: "—"} + } + if (hasCategoricalFrequency) { + return {value: null, displayValue: "—"} + } + if (typeof resolvedStats.mean === "number" && Number.isFinite(resolvedStats.mean)) { + return {value: resolvedStats.mean, displayValue: format3Sig(resolvedStats.mean)} + } + return {value: null, displayValue: "—"} + })() + + const baseEntry: MetricStripEntry = { + key: baseSeriesKey, + label: resolvedRunName, + color: resolvedBaseColor, + value: baseValue.value, + displayValue: baseValue.displayValue, + isMain: true, + deltaText: "-", + deltaTone: "neutral", } - if (typeof stats.mean === "number") return format3Sig(stats.mean) - return "—" + + const comparisonEntries = comparisonSeries.map((entry) => { + const statsValue = entry.stats + if (!statsValue) { + return { + key: entry.runId, + label: entry.runName, + color: entry.color, + value: null, + displayValue: "—", + isMain: false, + deltaText: "-", + deltaTone: "neutral", + } + } + if (isBooleanMetric) { + const percentage = comparisonBooleanPercentMap.get(entry.runId) + return { + key: entry.runId, + label: entry.runName, + color: entry.color, + value: typeof percentage === "number" ? percentage : null, + displayValue: + typeof percentage === "number" && Number.isFinite(percentage) + ? `${percentage.toFixed(2)}%` + : "—", + isMain: false, + deltaText: "-", + deltaTone: "neutral", + } + } + if (hasCategoricalFrequency) { + return { + key: entry.runId, + label: entry.runName, + color: entry.color, + value: null, + displayValue: "—", + isMain: false, + deltaText: "-", + deltaTone: "neutral", + } + } + if (typeof statsValue.mean === "number" && Number.isFinite(statsValue.mean)) { + return { + key: entry.runId, + label: entry.runName, + color: entry.color, + value: statsValue.mean, + displayValue: format3Sig(statsValue.mean), + isMain: false, + deltaText: "-", + deltaTone: "neutral", + } + } + return { + key: entry.runId, + label: entry.runName, + color: entry.color, + value: null, + displayValue: "—", + isMain: false, + deltaText: "-", + deltaTone: "neutral", + } + }) + + const entries = [baseEntry, ...comparisonEntries] + const mainSeries = getMainEvaluatorSeries(entries) + + return entries.map((entry) => { + if (entry.isMain) { + return entry + } + const delta = computeDeltaPercent(entry.value, mainSeries?.value ?? null) + const formatted = formatDelta(delta) + return { + ...entry, + deltaText: formatted.text, + deltaTone: formatted.tone, + } + }) }, [ + baseSeriesKey, booleanHistogram.percentages.true, - categoricalFrequencyData, - effectiveScenarioCount, + comparisonBooleanPercentMap, + comparisonSeries, hasCategoricalFrequency, isBooleanMetric, - stats, + resolvedBaseColor, + resolvedRunName, + resolvedStats, + ]) + + const numericSeries = useMemo( + () => [ + { + key: baseSeriesKey, + name: resolvedRunName, + color: resolvedBaseColor, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, + }, + ...comparisonSeries.map((entry) => ({ + key: entry.runId, + name: entry.runName, + color: entry.color, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, + })), + ], + [baseSeriesKey, comparisonSeries, resolvedBaseColor, resolvedRunName], + ) + + const numericHistogramRows = useMemo(() => { + if (!numericHistogramAvailable || !hasNumericHistogram) return [] + const rowMap = new Map< + string, + {label: string; order: number; [key: string]: number | string} + >() + + numericHistogramData.forEach((bin, idx) => { + const order = typeof bin.edge === "number" && Number.isFinite(bin.edge) ? bin.edge : idx + const key = + typeof bin.edge === "number" && Number.isFinite(bin.edge) + ? String(bin.edge) + : `${idx}-${bin.x}` + const existing = + rowMap.get(key) ?? + ({ + label: String(bin.x), + order, + } as {label: string; order: number; [key: string]: number | string}) + existing[baseSeriesKey] = Number(bin.y ?? 0) + rowMap.set(key, existing) + }) + + comparisonSeries.forEach((entry) => { + if (!entry.stats) return + const {data} = buildHistogramChartData(entry.stats as Record) + data.forEach((bin, idx) => { + const order = + typeof bin.edge === "number" && Number.isFinite(bin.edge) ? bin.edge : idx + const key = + typeof bin.edge === "number" && Number.isFinite(bin.edge) + ? String(bin.edge) + : `${idx}-${bin.x}` + const existing = + rowMap.get(key) ?? + ({ + label: String(bin.x), + order, + } as {label: string; order: number; [key: string]: number | string}) + existing[entry.runId] = Number(bin.y ?? 0) + rowMap.set(key, existing) + }) + }) + + return Array.from(rowMap.values()) + .sort((a, b) => a.order - b.order) + .map(({order, ...rest}) => rest) + }, [ + baseSeriesKey, + comparisonSeries, + hasNumericHistogram, + numericHistogramAvailable, + numericHistogramData, ]) const chartContent = () => { @@ -277,13 +496,13 @@ const EvaluatorMetricsChart = ({ key: baseSeriesKey, name: resolvedRunName, color: resolvedBaseColor, - barProps: {radius: [8, 8, 0, 0]}, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, }, ...comparisonBooleanHistograms.map((entry) => ({ key: entry.runId, name: entry.runName, color: entry.color, - barProps: {radius: [8, 8, 0, 0]}, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, })), ] @@ -297,8 +516,8 @@ const EvaluatorMetricsChart = ({ yDomain={[0, 100]} series={series} barCategoryGap="20%" - showLegend={stableComparisons.length > 0} - reserveLegendSpace={stableComparisons.length > 0} + showLegend={false} + reserveLegendSpace={false} /> ) } @@ -363,13 +582,13 @@ const EvaluatorMetricsChart = ({ key: baseSeriesKey, name: resolvedRunName, color: resolvedBaseColor, - barProps: {radius: [8, 8, 0, 0]}, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, }, ...comparisonMaps.map((entry) => ({ key: entry.runId, name: entry.runName, color: entry.color, - barProps: {radius: [8, 8, 0, 0]}, + barProps: {radius: [8, 8, 0, 0], minPointSize: 2}, })), ] @@ -383,8 +602,8 @@ const EvaluatorMetricsChart = ({ yDomain={[0, "auto"]} series={series} barCategoryGap="20%" - showLegend={stableComparisons.length > 0} - reserveLegendSpace={stableComparisons.length > 0} + showLegend={false} + reserveLegendSpace={false} /> ) } @@ -398,35 +617,16 @@ const EvaluatorMetricsChart = ({ } if (numericHistogramAvailable && hasNumericHistogram) { - const referenceLines = [] as {value: number; color?: string; label?: string}[] - if (typeof stats.mean === "number" && Number.isFinite(stats.mean)) { - referenceLines.push({ - value: stats.mean, - color: resolvedBaseColor, - label: `${resolvedRunName} mean ${format3Sig(stats.mean)}`, - }) - } - comparisonSeries.forEach((entry) => { - if (!entry.stats) return - const mean = typeof entry.stats.mean === "number" ? entry.stats.mean : NaN - if (Number.isFinite(mean)) { - referenceLines.push({ - value: mean, - color: entry.color, - label: `${entry.runName} mean ${format3Sig(mean)}`, - }) - } - }) - return ( format3Sig(value)} yDomain={[0, "auto"]} - referenceLines={referenceLines} + series={numericSeries} + barCategoryGap="20%" showLegend={false} reserveLegendSpace={stableComparisons.length > 0} /> @@ -443,10 +643,11 @@ const EvaluatorMetricsChart = ({ return ( + > +
+
- } - > -
- {stableComparisons.length === 0 && ( -
- {summaryValue !== null ? ( - +
+ {summaryItems.map((entry) => ( +
- {summaryValue} - - ) : null} + + {entry.displayValue} + + + {entry.deltaText} + +
+ ))} +
+
+
+
+
+ {isLoading ? ( + + ) : hasError && !resolvedStats ? ( +
+ Unable to load metric data. +
+ ) : ( + chartContent() + )}
- )} -
0 ? "h-[370px]" : "h-[300px]"}> - {isLoading ? ( - - ) : hasError && !resolvedStats ? ( -
- Unable to load metric data. -
- ) : ( - chartContent() - )}
diff --git a/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsSpiderChart/EvaluatorMetricsSpiderChart.tsx b/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsSpiderChart/EvaluatorMetricsSpiderChart.tsx index 46e63411ae..9f890284d5 100644 --- a/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsSpiderChart/EvaluatorMetricsSpiderChart.tsx +++ b/web/oss/src/components/EvalRunDetails/components/EvaluatorMetricsSpiderChart/EvaluatorMetricsSpiderChart.tsx @@ -76,8 +76,8 @@ const EvaluatorMetricsSpiderChart = ({ ) } - const LABEL_OFFSET = 12 - const NUDGE = 5 + const LABEL_OFFSET = 10 + const NUDGE = 0 const RAD = Math.PI / 180 return ( @@ -106,9 +106,28 @@ const EvaluatorMetricsSpiderChart = ({ const nudgeX = cos * NUDGE const nudgeY = sin * NUDGE + const truncateText = (text: string, max = 18) => { + if (text.length <= max) return text + return `${text.slice(0, Math.max(1, max - 1))}…` + } + const clampLines = (s: string, max = 18) => { - const parts = s.includes(" - ") ? s.split(" - ") : [s] - if (parts.length >= 2) return parts.slice(0, 2) + const separator = s.includes(": ") + ? ": " + : s.includes(" - ") + ? " - " + : null + + if (separator) { + const parts = s.split(separator) + const evaluator = truncateText(parts[0]?.trim() ?? "", 16) + const metric = truncateText( + parts.slice(1).join(separator).trim(), + max, + ) + return metric ? [evaluator, metric] : [evaluator] + } + const words = s.split(/\s+/) let line1 = "" let line2 = "" @@ -126,10 +145,12 @@ const EvaluatorMetricsSpiderChart = ({ } const lines = clampLines(label, 18) + const lineHeight = 12 + const blockOffset = -((lines.length - 1) * lineHeight) / 2 return ( {lines.map((ln, i) => ( - + {ln} ))} diff --git a/web/oss/src/components/EvalRunDetails/components/FocusDrawer.tsx b/web/oss/src/components/EvalRunDetails/components/FocusDrawer.tsx index 224f37b6f0..e2c65a5158 100644 --- a/web/oss/src/components/EvalRunDetails/components/FocusDrawer.tsx +++ b/web/oss/src/components/EvalRunDetails/components/FocusDrawer.tsx @@ -1,7 +1,10 @@ -import {memo, useCallback, useMemo} from "react" +import type {KeyboardEvent, ReactNode} from "react" +import {memo, useCallback, useMemo, useRef, useState} from "react" import {isValidElement} from "react" -import {Popover, Skeleton, Tag, Typography} from "antd" +import {DownOutlined} from "@ant-design/icons" +import {Button, Popover, Skeleton, Typography} from "antd" +import clsx from "clsx" import {useAtomValue, useSetAtom} from "jotai" import {AlertCircle} from "lucide-react" import dynamic from "next/dynamic" @@ -9,9 +12,10 @@ import dynamic from "next/dynamic" import {previewRunMetricStatsSelectorFamily} from "@/oss/components/Evaluations/atoms/runMetrics" import MetricDetailsPreviewPopover from "@/oss/components/Evaluations/components/MetricDetailsPreviewPopover" import GenericDrawer from "@/oss/components/GenericDrawer" +import SharedGenerationResultUtils from "@/oss/components/SharedGenerationResultUtils" -import ReadOnlyBox from "../../pages/evaluations/onlineEvaluation/components/ReadOnlyBox" -import {getComparisonSolidColor} from "../atoms/compare" +import {compareRunIdsAtom, MAX_COMPARISON_RUNS} from "../atoms/compare" +import {invocationTraceSummaryAtomFamily} from "../atoms/invocationTraceSummary" import { applicationReferenceQueryAtomFamily, testsetReferenceQueryAtomFamily, @@ -43,16 +47,13 @@ import {clearFocusDrawerQueryParams} from "../state/urlFocusDrawer" import {renderScenarioChatMessages} from "../utils/chatMessages" import {formatMetricDisplay, METRIC_EMPTY_PLACEHOLDER} from "../utils/metricFormatter" +import EvaluationRunTag from "./EvaluationRunTag" import FocusDrawerHeader from "./FocusDrawerHeader" import FocusDrawerSidePanel from "./FocusDrawerSidePanel" +import {SectionCard} from "./views/ConfigurationView/components/SectionPrimitives" const JsonEditor = dynamic(() => import("@/oss/components/Editor/Editor"), {ssr: false}) -const SECTION_CARD_CLASS = "rounded-xl border border-[#EAECF0] bg-white" - -// Color palette for category tags (same as MetricCell) -const TAG_COLORS = ["green", "blue", "purple", "orange", "cyan", "magenta", "gold", "lime"] - const toSectionAnchorId = (value: string) => `focus-section-${value .toLowerCase() @@ -81,7 +82,7 @@ const buildStaticMetricColumn = ( } as EvaluationTableColumn & {__source: "runMetric"} } -const {Text, Title} = Typography +const {Text} = Typography type FocusDrawerColumn = EvaluationTableColumn & {__source?: "runMetric"} @@ -125,6 +126,32 @@ const resolveRunMetricScalar = (stats: any): unknown => { return undefined } +const FocusValueCard = ({ + label, + children, + className, +}: { + label: ReactNode + children: ReactNode + className?: string +}) => ( +
+ {label} +
{children}
+
+) + +const MetricValuePill = ({value, muted}: {value: ReactNode; muted?: boolean}) => ( + + {value} + +) + interface FocusDrawerContentProps { runId: string scenarioId: string @@ -170,12 +197,11 @@ const useFocusDrawerSections = (runId: string | null) => { return groups .map((group) => { - if (group.kind === "metric" && group.id === "metrics:human") { + if (group.kind === "metric") { return null } - const sectionLabel = - group.kind === "metric" && group.id === "metrics:auto" ? "Metrics" : group.label + const sectionLabel = group.label const dynamicColumns: SectionColumnEntry[] = group.columnIds .map((columnId) => columnMap.get(columnId)) @@ -285,14 +311,14 @@ const FocusGroupLabel = ({ ) if (group?.kind === "input" && testsetId && testsetQuery.data?.name) { - return <>{`Testset ${testsetQuery.data.name}`} + return "Input" } if (group?.kind === "invocation") { const applicationLabel = appQuery.data?.name ?? appQuery.data?.slug ?? appQuery.data?.id ?? applicationId ?? null - if (applicationLabel) return <>{`Application ${applicationLabel}`} + if (applicationLabel) return "Outputs" } return <>{label} @@ -358,32 +384,23 @@ const RunMetricValue = memo( return (
{column.displayLabel ?? column.label ?? column.id} - - {isLoading ? ( - - ) : ( - - - {formattedValue} - - - )} - + {isLoading ? ( + + ) : ( + + + + )}
) }, @@ -534,52 +551,57 @@ const ScenarioColumnValue = memo( })() // Render array metrics as tags in a vertical stack + const isLongTextMetric = + !arrayTags.length && + typeof formattedValue === "string" && + (formattedValue.length > 80 || formattedValue.includes("\n")) + const renderMetricContent = () => { if (arrayTags.length > 0) { return ( -
+
{arrayTags.map((tag, index) => ( - - {tag} - + ))}
) } - return ( - - {formattedValue} - - ) + if (isLongTextMetric) { + return ( + + {formattedValue} + + ) + } + return + } + + const metricContent = showSkeleton ? ( + + ) : ( + + {renderMetricContent()} + + ) + + if (isLongTextMetric) { + return {metricContent} } return ( -
- {displayLabel} - - {showSkeleton ? ( - - ) : ( - - {renderMetricContent()} - - )} - +
+ {displayLabel} + {metricContent}
) } @@ -707,16 +729,169 @@ const ScenarioColumnValue = memo( } } + return {renderValue()} + }, +) + +ScenarioColumnValue.displayName = "ScenarioColumnValue" + +const EvalOutputMetaRow = memo( + ({ + runId, + scenarioId, + compareIndex, + }: { + runId: string + scenarioId: string + compareIndex?: number + }) => { + const runDisplayNameAtom = useMemo(() => runDisplayNameAtomFamily(runId), [runId]) + const runDisplayName = useAtomValue(runDisplayNameAtom) + const traceSummaryAtom = useMemo( + () => invocationTraceSummaryAtomFamily({scenarioId, runId}), + [runId, scenarioId], + ) + const traceSummary = useAtomValue(traceSummaryAtom) + const resolvedCompareIndex = compareIndex ?? 0 + return ( -
- {displayLabel} - {renderValue()} +
+ +
) }, ) -ScenarioColumnValue.displayName = "ScenarioColumnValue" +EvalOutputMetaRow.displayName = "EvalOutputMetaRow" + +const FocusSectionHeader = ({ + title, + collapsed, + onToggle, +}: { + title: ReactNode + collapsed: boolean + onToggle: () => void +}) => { + const handleKeyDown = (event: KeyboardEvent) => { + if (event.key === "Enter" || event.key === " ") { + event.preventDefault() + onToggle() + } + } + + return ( +
+ {title} +
+ ) +} + +const FocusSectionContent = memo( + ({ + section, + runId, + scenarioId, + }: { + section: FocusDrawerSection + runId: string + scenarioId: string + }) => { + const isInputSection = section.group?.kind === "input" + + return ( +
+ {section.group?.kind === "invocation" ? ( + + ) : null} + + {section.columns.map(({column, descriptor}) => ( + + ))} +
+ ) + }, +) + +FocusSectionContent.displayName = "FocusSectionContent" + +const FocusDrawerSectionCard = memo( + ({ + section, + runId, + scenarioId, + }: { + section: FocusDrawerSection + runId: string + scenarioId: string + }) => { + const [collapsed, setCollapsed] = useState(false) + const sectionLabelNode = useMemo( + () => , + [runId, section.group, section.label], + ) + + return ( +
+ setCollapsed((value) => !value)} + /> + {!collapsed ? ( + + + + ) : null} +
+ ) + }, +) + +FocusDrawerSectionCard.displayName = "FocusDrawerSectionCard" const InvocationMetaChips = memo( ({group, runId}: {group: EvaluationTableColumnGroup | null; runId: string | null}) => { @@ -765,7 +940,7 @@ const InvocationMetaChips = memo( : null return ( -
+
{appLabel ? {appLabel} : null} {variantLabel ? (
@@ -792,65 +967,105 @@ const CompareRunColumnContent = memo( runId, scenarioId, section, - compareIndex, }: { runId: string scenarioId: string section: FocusDrawerSection - compareIndex: number }) => { - const runDisplayNameAtom = useMemo(() => runDisplayNameAtomFamily(runId), [runId]) - const runDisplayName = useAtomValue(runDisplayNameAtom) - return ( -
- {/* Run header with color indicator */} -
-
- - {runDisplayName || - (compareIndex === 0 ? "Base Run" : `Comparison ${compareIndex}`)} - -
+ + + + ) + }, +) - {/* Invocation meta chips if applicable */} - {section.group?.kind === "invocation" ? ( - - ) : null} +CompareRunColumnContent.displayName = "CompareRunColumnContent" - {/* Column values */} -
- {section.columns.map(({column, descriptor}) => ( - - ))} +const CompareMetaRow = memo( + ({ + compareScenarios, + columnMinWidth, + registerScrollContainer, + onScrollSync, + }: { + compareScenarios: { + runId: string | null + scenarioId: string | null + compareIndex: number + }[] + columnMinWidth: number + registerScrollContainer: (node: HTMLDivElement | null) => void + onScrollSync: (node: HTMLDivElement) => void + }) => { + const scrollRef = useRef(null) + const columnsCount = compareScenarios.length + const rowGridStyle = useMemo( + () => ({ + gridTemplateColumns: `repeat(${columnsCount}, 1fr)`, + }), + [columnsCount], + ) + const handleScroll = useCallback(() => { + if (scrollRef.current) { + onScrollSync(scrollRef.current) + } + }, [onScrollSync]) + + return ( + +
{ + scrollRef.current = node + registerScrollContainer(node) + }} + className="overflow-x-auto [scrollbar-width:none] [&::-webkit-scrollbar]:hidden" + onScroll={handleScroll} + > +
+ {compareScenarios.map(({runId, scenarioId, compareIndex}) => { + if (!runId || !scenarioId) { + return ( +
+ +
+ ) + } + + return ( + + ) + })} +
-
+ ) }, ) -CompareRunColumnContent.displayName = "CompareRunColumnContent" +CompareMetaRow.displayName = "CompareMetaRow" /** - * A single section card containing all runs side-by-side + * A single compare section rendered as a collapsible row, aligned to shared columns. */ -const CompareSectionCard = memo( +const CompareSectionRow = memo( ({ sectionId, sectionLabel, sectionGroup, compareScenarios, sectionMapsPerRun, + columnMinWidth, + registerScrollContainer, + onScrollSync, }: { sectionId: string sectionLabel: string @@ -861,60 +1076,87 @@ const CompareSectionCard = memo( compareIndex: number }[] sectionMapsPerRun: Map[] + columnMinWidth: number + registerScrollContainer: (node: HTMLDivElement | null) => void + onScrollSync: (node: HTMLDivElement) => void }) => { - // Get the first available section for the label + const [collapsed, setCollapsed] = useState(false) + const scrollRef = useRef(null) const firstSection = sectionMapsPerRun.find((map) => map.get(sectionId))?.get(sectionId) - + const sectionLabelNode = ( + <> + {sectionGroup && firstSection ? ( + + ) : ( + sectionLabel + )} + + ) + const columnsCount = compareScenarios.length + const rowGridStyle = useMemo( + () => ({ + gridTemplateColumns: `repeat(${columnsCount}, 1fr)`, + }), + [columnsCount], + ) + const handleScroll = useCallback(() => { + if (scrollRef.current) { + onScrollSync(scrollRef.current) + } + }, [onScrollSync]) return ( -
- {/* Section header */} -
- - {sectionGroup && firstSection ? ( - <FocusGroupLabel - group={sectionGroup} - label={sectionLabel} - runId={compareScenarios[0]?.runId ?? ""} - /> - ) : ( - sectionLabel - )} - -
- - {/* Run columns side by side */} -
- {compareScenarios.map(({runId, scenarioId, compareIndex}) => { - const section = sectionMapsPerRun[compareIndex]?.get(sectionId) - - if (!runId || !scenarioId || !section) { - return ( -
- -
- ) - } +
+ setCollapsed((value) => !value)} + /> + {!collapsed ? ( +
{ + scrollRef.current = node + registerScrollContainer(node) + }} + className="overflow-x-auto [scrollbar-width:none] [&::-webkit-scrollbar]:hidden" + onScroll={handleScroll} + > +
+ {compareScenarios.map(({runId, scenarioId, compareIndex}) => { + const section = sectionMapsPerRun[compareIndex]?.get(sectionId) + + if (!runId || !scenarioId || !section) { + return ( +
+ +
+ ) + } - return ( - - ) - })} -
-
+ return ( + + ) + })} +
+
+ ) : null} +
) }, ) -CompareSectionCard.displayName = "CompareSectionCard" +CompareSectionRow.displayName = "CompareSectionRow" /** * Inner component that handles the section data fetching for compare mode @@ -929,29 +1171,28 @@ const FocusDrawerCompareContentInner = ({ compareIndex: number }[] }) => { - // Get sections for base run (index 0) - const baseRunId = compareScenarios[0]?.runId ?? null - const {sections: baseSections} = useFocusDrawerSections(baseRunId) - - // Get sections for comparison run 1 (index 1) - const compare1RunId = compareScenarios[1]?.runId ?? null - const {sections: compare1Sections} = useFocusDrawerSections(compare1RunId) - - // Get sections for comparison run 2 (index 2) - const compare2RunId = compareScenarios[2]?.runId ?? null - const {sections: compare2Sections} = useFocusDrawerSections(compare2RunId) + const expectedColumns = Math.min(compareScenarios.length, MAX_COMPARISON_RUNS + 1) + const runId0 = compareScenarios[0]?.runId ?? null + const runId1 = compareScenarios[1]?.runId ?? null + const runId2 = compareScenarios[2]?.runId ?? null + const runId3 = compareScenarios[3]?.runId ?? null + const runId4 = compareScenarios[4]?.runId ?? null + + const {sections: sections0} = useFocusDrawerSections(runId0) + const {sections: sections1} = useFocusDrawerSections(runId1) + const {sections: sections2} = useFocusDrawerSections(runId2) + const {sections: sections3} = useFocusDrawerSections(runId3) + const {sections: sections4} = useFocusDrawerSections(runId4) // Collect all sections per run const sectionsPerRun = useMemo(() => { - const result: FocusDrawerSection[][] = [baseSections] - if (compareScenarios.length > 1) result.push(compare1Sections) - if (compareScenarios.length > 2) result.push(compare2Sections) - return result - }, [baseSections, compare1Sections, compare2Sections, compareScenarios.length]) + const all = [sections0, sections1, sections2, sections3, sections4] + return all.slice(0, expectedColumns) + }, [expectedColumns, sections0, sections1, sections2, sections3, sections4]) // Normalize section key for matching across runs // Use group.kind for invocation/input sections (which have run-specific IDs) - // Use section.id for metric sections (which have stable IDs like "metrics:auto") + // Use section.id for other stable sections const getNormalizedSectionKey = (section: FocusDrawerSection): string => { const kind = section.group?.kind if (kind === "invocation" || kind === "input") { @@ -992,18 +1233,96 @@ const FocusDrawerCompareContentInner = ({ }) }, [sectionsPerRun]) + const inputSectionEntry = useMemo(() => { + for (let index = 0; index < sectionMapsPerRun.length; index += 1) { + const section = sectionMapsPerRun[index]?.get("input") + const runId = compareScenarios[index]?.runId ?? null + const scenarioId = compareScenarios[index]?.scenarioId ?? null + if (section && runId && scenarioId) { + return {section, runId, scenarioId} + } + } + return null + }, [compareScenarios, sectionMapsPerRun]) + + const compareSections = useMemo( + () => + allSections.filter( + (section) => + section.normalizedKey !== "input" && section.normalizedKey !== "invocation", + ), + [allSections], + ) + const invocationSectionEntry = useMemo( + () => allSections.find((section) => section.normalizedKey === "invocation") ?? null, + [allSections], + ) + + const compareColumnMinWidth = 480 + const scrollContainersRef = useRef([]) + const isSyncingRef = useRef(false) + const registerScrollContainer = useCallback((node: HTMLDivElement | null) => { + if (!node) return + const list = scrollContainersRef.current + if (list.includes(node)) return + list.push(node) + }, []) + const onScrollSync = useCallback((source: HTMLDivElement) => { + if (isSyncingRef.current) return + isSyncingRef.current = true + const left = source.scrollLeft + scrollContainersRef.current.forEach((node) => { + if (node !== source && node.scrollLeft !== left) { + node.scrollLeft = left + } + }) + isSyncingRef.current = false + }, []) + return ( -
- {allSections.map(({normalizedKey, label, group}) => ( - + {inputSectionEntry ? ( + - ))} + ) : null} +
+ {invocationSectionEntry ? ( + + ) : null} + {invocationSectionEntry ? ( + + ) : null} + {compareSections.map(({normalizedKey, label, group}) => ( + + ))} +
) } @@ -1023,7 +1342,7 @@ const FocusDrawerCompareContent = () => { } return ( -
+
) @@ -1041,6 +1360,12 @@ export const FocusDrawerContent = ({ const runIndex = useAtomValue( useMemo(() => evaluationRunIndexAtomFamily(runId ?? null), [runId]), ) + const compareRunIds = useAtomValue(compareRunIdsAtom) + const compareIndex = useMemo(() => { + if (!runId) return 0 + const idx = compareRunIds.findIndex((id) => id === runId) + return idx === -1 ? 0 : idx + 1 + }, [compareRunIds, runId]) const groups = columnResult.groups ?? [] const columnMap = useMemo(() => { @@ -1057,12 +1382,11 @@ export const FocusDrawerContent = ({ return groups .map((group) => { - if (group.kind === "metric" && group.id === "metrics:human") { + if (group.kind === "metric") { return null } - const sectionLabel = - group.kind === "metric" && group.id === "metrics:auto" ? "Metrics" : group.label + const sectionLabel = group.label const dynamicColumns: SectionColumnEntry[] = group.columnIds .map((columnId) => columnMap.get(columnId)) @@ -1106,41 +1430,40 @@ export const FocusDrawerContent = ({ return (
- {sections.map((section) => ( -
-
- - <FocusGroupLabel - group={section.group} - label={section.label} - runId={runId} - /> - -
- {section.group?.kind === "invocation" ? ( - - ) : null} -
- {section.columns.map(({column, descriptor}) => ( - { + if (section.group?.kind === "invocation") { + return ( +
+ + + + - ))} -
-
- ))} +
+ ) + } + return ( + + ) + })}
) } @@ -1182,7 +1505,7 @@ const FocusDrawer = () => { afterOpenChange={handleAfterOpenChange} closeOnLayoutClick={false} expandable - className="[&_.ant-drawer-body]:p-0 [&_.ant-drawer-body]:bg-[#F8FAFC]" + className="[&_.ant-drawer-body]:p-0 [&_.ant-drawer-header]:p-4" sideContentDefaultSize={240} headerExtra={ shouldRenderContent ? ( diff --git a/web/oss/src/components/EvalRunDetails/components/FocusDrawerHeader.tsx b/web/oss/src/components/EvalRunDetails/components/FocusDrawerHeader.tsx index 401bb10827..7d830b330d 100644 --- a/web/oss/src/components/EvalRunDetails/components/FocusDrawerHeader.tsx +++ b/web/oss/src/components/EvalRunDetails/components/FocusDrawerHeader.tsx @@ -1,9 +1,10 @@ import {memo, useCallback, useEffect, useMemo} from "react" -import {LeftOutlined, RightOutlined} from "@ant-design/icons" +import {CaretDownIcon, CaretUpIcon} from "@phosphor-icons/react" import {Button, Select, SelectProps, Tag, Typography} from "antd" import {useAtomValue} from "jotai" +import TooltipWithCopyAction from "@/oss/components/EnhancedUIs/Tooltip" import {useInfiniteTablePagination} from "@/oss/components/InfiniteVirtualTable" import {evaluationPreviewTableStore} from "../evaluationPreviewTableStore" @@ -127,17 +128,17 @@ const FocusDrawerHeader = ({runId, scenarioId, onScenarioChange}: FocusDrawerHea ) return ( -
+
+ {selectedOption?.description ? ( - - + + {selectedOption.description} - - + + ) : null}
) diff --git a/web/oss/src/components/EvalRunDetails/components/FocusDrawerSidePanel.tsx b/web/oss/src/components/EvalRunDetails/components/FocusDrawerSidePanel.tsx index 73b76e5b8a..fb78d62c5a 100644 --- a/web/oss/src/components/EvalRunDetails/components/FocusDrawerSidePanel.tsx +++ b/web/oss/src/components/EvalRunDetails/components/FocusDrawerSidePanel.tsx @@ -1,10 +1,11 @@ -import {memo, useCallback, useMemo} from "react" -import type {Key} from "react" +import {memo, useCallback, useMemo, useState} from "react" +import type {ReactNode} from "react" import {TreeStructure, Download, Sparkle, Speedometer} from "@phosphor-icons/react" -import {Skeleton, Tree, type TreeDataNode} from "antd" +import {Skeleton} from "antd" import {useAtomValue} from "jotai" +import CustomTreeComponent from "@/oss/components/CustomUIs/CustomTreeComponent" import {useInfiniteTablePagination} from "@/oss/components/InfiniteVirtualTable" import {evaluationPreviewTableStore} from "../evaluationPreviewTableStore" @@ -16,7 +17,14 @@ const toSectionAnchorId = (value: string) => .replace(/[^a-z0-9]+/g, "-") .replace(/^-+|-+$/g, "")}` -type AnchorTreeNode = TreeDataNode & {anchorId?: string} +interface FocusTreeNode { + id: string + title: string + icon?: ReactNode + anchorId?: string + children?: FocusTreeNode[] + expanded?: boolean +} interface FocusDrawerSidePanelProps { runId: string @@ -26,6 +34,7 @@ interface FocusDrawerSidePanelProps { const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => { const {columnResult} = usePreviewTableData({runId}) const evalType = useAtomValue(previewEvalTypeAtom) + const [selectedKey, setSelectedKey] = useState(null) const {rows} = useInfiniteTablePagination({ store: evaluationPreviewTableStore, @@ -57,11 +66,11 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => return map }, [columnResult?.groups]) - const evaluatorNodes = useMemo(() => { + const evaluatorNodes = useMemo(() => { if (!columnResult?.evaluators?.length) return [] return columnResult.evaluators.map((evaluator) => ({ title: evaluator.name ?? evaluator.slug ?? "Evaluator", - key: `evaluator:${evaluator.id ?? evaluator.slug ?? evaluator.name}`, + id: `evaluator:${evaluator.id ?? evaluator.slug ?? evaluator.name}`, icon: , anchorId: (evaluator.id && groupAnchorMap.get(`annotation:${evaluator.id}`)) ?? @@ -70,25 +79,13 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => })) }, [columnResult?.evaluators, groupAnchorMap]) - const metricNodes = useMemo(() => { - if (!columnResult?.groups?.length) return [] - return columnResult.groups - .filter((group) => group.kind === "metric" && group.id !== "metrics:human") - .map((group) => ({ - title: group.label, - key: `metric:${group.id}`, - icon: , - anchorId: groupAnchorMap.get(group.id) ?? toSectionAnchorId(group.id), - })) - }, [columnResult?.groups, groupAnchorMap]) - - const treeData = useMemo(() => { - if (!columnResult) return [] - - const children: AnchorTreeNode[] = [ + const treeData = useMemo(() => { + if (!columnResult) return null + + const children: FocusTreeNode[] = [ { title: "Input", - key: "input", + id: "input", icon: , anchorId: groupAnchorMap.get("inputs") ?? @@ -97,7 +94,7 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => }, { title: "Output", - key: "output", + id: "output", icon: , anchorId: groupAnchorMap.get("outputs") ?? @@ -109,7 +106,7 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => if (evaluatorNodes.length) { children.push({ title: "Evaluator", - key: "evaluator", + id: "evaluator", icon: , children: evaluatorNodes, anchorId: @@ -119,32 +116,17 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => }) } - if (metricNodes.length) { - children.push({ - title: "Metrics", - key: "metrics", - icon: , - children: metricNodes, - anchorId: - groupAnchorMap.get("metrics:auto") ?? - groupAnchorMap.get("metric") ?? - toSectionAnchorId("metrics-auto"), - }) + return { + title: parentTitle, + id: "evaluation", + icon: , + children, + expanded: true, } + }, [columnResult, evaluatorNodes, groupAnchorMap, parentTitle]) - return [ - { - title: parentTitle, - key: "evaluation", - icon: , - children, - }, - ] - }, [columnResult, parentTitle, metricNodes, evaluatorNodes]) - - const handleSelect = useCallback((_selectedKeys: Key[], info: any) => { + const handleSelect = useCallback((key: string, node: FocusTreeNode) => { if (typeof window === "undefined") return - const node = info?.node as AnchorTreeNode | undefined const anchorId = node?.anchorId if (!anchorId) return const target = document.getElementById(anchorId) @@ -161,20 +143,25 @@ const FocusDrawerSidePanel = ({runId, scenarioId}: FocusDrawerSidePanelProps) => ) } - return ( -
-
- -
-
- ) + return treeData ? ( + node.id} + getChildren={(node) => node.children} + renderLabel={(node) => ( +
+ {node.icon} + {node.title} +
+ )} + selectedKey={selectedKey} + onSelect={(key, node) => { + setSelectedKey(key) + handleSelect(key, node) + }} + defaultExpanded + /> + ) : null } export default memo(FocusDrawerSidePanel) diff --git a/web/oss/src/components/EvalRunDetails/components/Page.tsx b/web/oss/src/components/EvalRunDetails/components/Page.tsx index 2693c8cb91..4827164d24 100644 --- a/web/oss/src/components/EvalRunDetails/components/Page.tsx +++ b/web/oss/src/components/EvalRunDetails/components/Page.tsx @@ -130,6 +130,7 @@ const EvalRunPreviewPage = ({runId, evaluationType, projectId = null}: EvalRunPr return ( setActiveViewParam(v)} /> } - headerClassName="px-2" + headerClassName="px-4 pt-2" > -
+
+
), diff --git a/web/oss/src/components/EvalRunDetails/components/PreviewEvalRunHeader.tsx b/web/oss/src/components/EvalRunDetails/components/PreviewEvalRunHeader.tsx index a3b75e74bb..78ad5c7612 100644 --- a/web/oss/src/components/EvalRunDetails/components/PreviewEvalRunHeader.tsx +++ b/web/oss/src/components/EvalRunDetails/components/PreviewEvalRunHeader.tsx @@ -1,34 +1,26 @@ import {memo, useCallback, useMemo, useState} from "react" -import {Pause, Play} from "@phosphor-icons/react" +import {PauseIcon, PlayIcon, XCircleIcon} from "@phosphor-icons/react" import {useQueryClient} from "@tanstack/react-query" -import {Button, Space, Tabs, Tag, Tooltip} from "antd" +import {Button, Tabs, Tooltip, Typography} from "antd" import clsx from "clsx" -import {useAtomValue} from "jotai" +import {atom, useAtomValue, useSetAtom} from "jotai" import {message} from "@/oss/components/AppMessageContext" -import dayjs from "@/oss/lib/helpers/dateTimeHelper/dayjs" import {invalidatePreviewRunCache} from "@/oss/lib/hooks/usePreviewEvaluations/assets/previewRunBatcher" import {startSimpleEvaluation, stopSimpleEvaluation} from "@/oss/services/onlineEvaluations/api" +import {compareRunIdsAtom, compareRunIdsWriteAtom, getComparisonSolidColor} from "../atoms/compare" import { + runDisplayNameAtomFamily, runInvocationRefsAtomFamily, runTestsetIdsAtomFamily, runFlagsAtomFamily, } from "../atoms/runDerived" -import {evaluationRunQueryAtomFamily} from "../atoms/table" import {previewEvalTypeAtom} from "../state/evalType" import CompareRunsMenu from "./CompareRunsMenu" - -const statusColor = (status?: string | null) => { - if (!status) return "default" - const normalized = status.toLowerCase() - if (normalized.includes("success") || normalized.includes("completed")) return "green" - if (normalized.includes("fail") || normalized.includes("error")) return "red" - if (normalized.includes("running") || normalized.includes("queued")) return "blue" - return "default" -} +import EvaluationRunTag from "./EvaluationRunTag" type ActiveView = "overview" | "focus" | "scenarios" | "configuration" @@ -150,49 +142,77 @@ const PreviewEvalRunMeta = ({ projectId?: string | null className?: string }) => { - const runQueryAtom = useMemo(() => evaluationRunQueryAtomFamily(runId), [runId]) - const runQuery = useAtomValue(runQueryAtom) const _invocationRefs = useAtomValue(useMemo(() => runInvocationRefsAtomFamily(runId), [runId])) const _testsetIds = useAtomValue(useMemo(() => runTestsetIdsAtomFamily(runId), [runId])) const {canStopOnline, handleOnlineAction, onlineAction, showOnlineAction} = useOnlineEvaluationActions(runId, projectId) - - const runData = runQuery.data?.camelRun ?? runQuery.data?.rawRun ?? null - const runStatus = runData?.status ?? null - const updatedTs = - (runData as any)?.updatedAt || - (runData as any)?.updated_at || - (runData as any)?.createdAt || - (runData as any)?.created_at || - null - const updatedMoment = updatedTs ? dayjs(updatedTs) : null - const lastUpdated = updatedMoment?.isValid() ? updatedMoment.fromNow() : undefined + const compareRunIds = useAtomValue(compareRunIdsAtom) + const setCompareRunIds = useSetAtom(compareRunIdsWriteAtom) + + const orderedRunIds = useMemo(() => { + const ids = [runId, ...compareRunIds].filter((id): id is string => Boolean(id)) + const seen = new Set() + return ids.filter((id) => { + if (seen.has(id)) return false + seen.add(id) + return true + }) + }, [compareRunIds, runId]) + + const runDescriptorsAtom = useMemo( + () => + atom((get) => + orderedRunIds.map((id) => ({ + id, + name: get(runDisplayNameAtomFamily(id)), + })), + ), + [orderedRunIds], + ) + const runDescriptors = useAtomValue(runDescriptorsAtom) return ( -
- - {runStatus ? ( - <> - - {runStatus} - - - ) : null} - {lastUpdated ? ( - - - Updated {lastUpdated} - - - ) : null} - +
+
+ Evaluations: +
+ {runDescriptors.map((run, index) => { + const isBaseRun = index === 0 + const tagColor = getComparisonSolidColor(index) + return ( + + ) : undefined + } + onClose={ + !isBaseRun + ? (event) => { + event.preventDefault() + setCompareRunIds((prev) => + prev.filter((id) => id !== run.id), + ) + } + : undefined + } + /> + ) + })} +
+
+
{showOnlineAction ? (
- } + content={popoverContent} trigger="hover" mouseEnterDelay={0.5} mouseLeaveDelay={0.1} diff --git a/web/oss/src/components/EvalRunDetails/components/TableCells/InputCell.tsx b/web/oss/src/components/EvalRunDetails/components/TableCells/InputCell.tsx index 058e7d42ca..4feeaaf8f4 100644 --- a/web/oss/src/components/EvalRunDetails/components/TableCells/InputCell.tsx +++ b/web/oss/src/components/EvalRunDetails/components/TableCells/InputCell.tsx @@ -1,14 +1,45 @@ import {memo, useMemo} from "react" -import dynamic from "next/dynamic" +import {useAtomValue} from "jotai" + +import { + CellContentPopover, + ChatMessagesCellContent, + JsonCellContent, + TextCellContent, + extractChatMessages, + normalizeValue, + safeJsonStringify, + tryParseJson, +} from "@/oss/components/CellRenderers" import type {EvaluationTableColumn} from "../../atoms/table" import useScenarioCellValue from "../../hooks/useScenarioCellValue" -import {renderScenarioChatMessages} from "../../utils/chatMessages" +import {scenarioRowHeightAtom, type ScenarioRowHeight} from "../../state/rowHeight" + +// Max lines for JSON/text content (fills most of the cell) +// Small (80px - 16px padding) / ~14px line height ≈ 4 lines +// Medium (160px - 24px padding) / ~14px line height ≈ 9 lines +// Large (280px - 24px padding) / ~14px line height ≈ 18 lines +const MAX_LINES_BY_HEIGHT: Record = { + small: 4, + medium: 9, + large: 18, +} -import CellContentPopover from "./CellContentPopover" +// Max total lines for chat messages (accounting for role labels ~1 line each) +const MAX_CHAT_LINES_BY_HEIGHT: Record = { + small: 3, + medium: 7, + large: 14, +} -const JsonEditor = dynamic(() => import("@/oss/components/Editor/Editor"), {ssr: false}) +// Max lines per individual chat message content +const MAX_LINES_PER_MESSAGE_BY_HEIGHT: Record = { + small: 2, + medium: 5, + large: 8, +} interface PreviewEvaluationInputCellProps { scenarioId?: string @@ -33,78 +64,6 @@ const unwrapInputsWrapper = (value: unknown): unknown => { return value } -/** - * Try to parse a JSON string, returns the parsed value or null if not valid JSON - */ -const tryParseJson = (value: unknown): {parsed: unknown; isJson: boolean} => { - if (value === null || value === undefined) { - return {parsed: value, isJson: false} - } - // Already an object/array - if (typeof value === "object") { - return {parsed: value, isJson: true} - } - // Try to parse string as JSON - if (typeof value === "string") { - const trimmed = value.trim() - if ( - (trimmed.startsWith("{") && trimmed.endsWith("}")) || - (trimmed.startsWith("[") && trimmed.endsWith("]")) - ) { - try { - const parsed = JSON.parse(trimmed) - return {parsed, isJson: true} - } catch { - return {parsed: value, isJson: false} - } - } - } - return {parsed: value, isJson: false} -} - -/** - * Safely stringify a value to JSON - */ -const safeJsonStringify = (value: unknown): string => { - try { - return JSON.stringify(value, null, 2) - } catch { - return String(value) - } -} - -/** - * Render JSON content using the code editor - */ -const JsonContent = memo(({value, height}: {value: unknown; height?: number}) => { - const jsonString = useMemo(() => safeJsonStringify(value), [value]) - return ( -
- -
- ) -}) -JsonContent.displayName = "JsonContent" - -const normalizeValue = (value: unknown): string => { - if (value === null || value === undefined) return "—" - if (typeof value === "string") return value - if (typeof value === "number" || typeof value === "boolean") { - return String(value) - } - return safeJsonStringify(value) -} - const CONTAINER_CLASS = "scenario-table-cell" const PreviewEvaluationInputCell = ({ @@ -112,34 +71,35 @@ const PreviewEvaluationInputCell = ({ runId, column, }: PreviewEvaluationInputCellProps) => { + const rowHeight = useAtomValue(scenarioRowHeightAtom) const {ref, selection, showSkeleton} = useScenarioCellValue({scenarioId, runId, column}) const {value: rawValue} = selection + // Get limits based on row height + const maxLines = MAX_LINES_BY_HEIGHT[rowHeight] + const maxChatTotalLines = MAX_CHAT_LINES_BY_HEIGHT[rowHeight] + const maxLinesPerMessage = MAX_LINES_PER_MESSAGE_BY_HEIGHT[rowHeight] + // Unwrap redundant "inputs" wrapper from online evaluations const value = useMemo(() => unwrapInputsWrapper(rawValue), [rawValue]) // Try to parse JSON strings - must be before any early returns const {parsed: jsonValue, isJson} = useMemo(() => tryParseJson(value), [value]) - const widthStyle = {width: "100%"} - const chatNodes = useMemo( - () => - renderScenarioChatMessages( - value, - `${scenarioId ?? "scenario"}-${column.id ?? column.path ?? "input"}`, - ), - [column.id, column.path, scenarioId, value], - ) + // Check for chat messages + const chatMessages = useMemo(() => extractChatMessages(jsonValue), [jsonValue]) + const isChatMessages = chatMessages !== null && chatMessages.length > 0 - // Generate popover content (full content without truncation) - const popoverChatNodes = useMemo( - () => - renderScenarioChatMessages( - value, - `${scenarioId ?? "scenario"}-${column.id ?? column.path ?? "input"}-popover`, - ), - [column.id, column.path, scenarioId, value], - ) + // Compute display value and copy text before early returns (React hooks rule) + const displayValue = useMemo(() => normalizeValue(value), [value]) + const copyText = useMemo(() => { + if (value === undefined || value === null) return undefined + if (isChatMessages || isJson) return safeJsonStringify(jsonValue) + return displayValue + }, [value, isChatMessages, isJson, jsonValue, displayValue]) + + const keyPrefix = `${scenarioId ?? "scenario"}-${column.id ?? column.path ?? "input"}` + const widthStyle = {width: "100%"} if (showSkeleton) { return ( @@ -157,40 +117,54 @@ const PreviewEvaluationInputCell = ({ ) } - const displayValue = normalizeValue(value) - const popoverContent = popoverChatNodes?.length ? ( -
{popoverChatNodes}
- ) : isJson ? ( - - ) : ( - {displayValue} - ) - - if (chatNodes && chatNodes.length) { + // Render chat messages + if (isChatMessages) { return ( - + + } + copyText={copyText} + >
-
{chatNodes}
+
) } - // Render JSON objects/arrays using the JSON editor + // Render JSON objects/arrays if (isJson) { return ( - + } + copyText={copyText} + >
- +
) } + // Plain text return ( - + } + copyText={copyText} + >
- {displayValue} +
) diff --git a/web/oss/src/components/EvalRunDetails/components/TableCells/InvocationCell.tsx b/web/oss/src/components/EvalRunDetails/components/TableCells/InvocationCell.tsx index 7a69b9792d..5daf016d85 100644 --- a/web/oss/src/components/EvalRunDetails/components/TableCells/InvocationCell.tsx +++ b/web/oss/src/components/EvalRunDetails/components/TableCells/InvocationCell.tsx @@ -1,60 +1,51 @@ import {memo, useMemo} from "react" import clsx from "clsx" +import {useAtomValue} from "jotai" import {AlertCircle} from "lucide-react" -import dynamic from "next/dynamic" + +import { + CellContentPopover, + ChatMessagesCellContent, + JsonCellContent, + TextCellContent, + extractChatMessages, + safeJsonStringify, + tryParseJson, +} from "@/oss/components/CellRenderers" import type {EvaluationTableColumn} from "../../atoms/table" import useScenarioCellValue from "../../hooks/useScenarioCellValue" -import {renderScenarioChatMessages} from "../../utils/chatMessages" +import {scenarioRowHeightAtom, type ScenarioRowHeight} from "../../state/rowHeight" -import CellContentPopover from "./CellContentPopover" import InvocationTraceSummary from "./InvocationTraceSummary" -const JsonEditor = dynamic(() => import("@/oss/components/Editor/Editor"), {ssr: false}) +// Max lines for JSON/text content (fills most of the cell) +const MAX_LINES_BY_HEIGHT: Record = { + small: 4, + medium: 9, + large: 18, +} -const CONTAINER_CLASS = "scenario-table-cell" +// Max total lines for chat messages (accounting for role labels ~1 line each) +const MAX_CHAT_LINES_BY_HEIGHT: Record = { + small: 3, + medium: 7, + large: 14, +} -/** - * Try to parse a JSON string, returns the parsed value or null if not valid JSON - */ -const tryParseJson = (value: unknown): {parsed: unknown; isJson: boolean} => { - if (value === null || value === undefined) { - return {parsed: value, isJson: false} - } - // Already an object/array - if (typeof value === "object") { - return {parsed: value, isJson: true} - } - // Try to parse string as JSON - if (typeof value === "string") { - const trimmed = value.trim() - if ( - (trimmed.startsWith("{") && trimmed.endsWith("}")) || - (trimmed.startsWith("[") && trimmed.endsWith("]")) - ) { - try { - const parsed = JSON.parse(trimmed) - return {parsed, isJson: true} - } catch { - return {parsed: value, isJson: false} - } - } - } - return {parsed: value, isJson: false} +// Max lines per individual chat message content +const MAX_LINES_PER_MESSAGE_BY_HEIGHT: Record = { + small: 2, + medium: 5, + large: 8, } +const CONTAINER_CLASS = "scenario-table-cell" + /** - * Safely stringify a value to JSON + * Extract assistant content from invocation output for display */ -const safeJsonStringify = (value: unknown): string => { - try { - return JSON.stringify(value, null, 2) - } catch { - return String(value) - } -} - const extractAssistantContent = (entry: any): string | undefined => { if (!entry) return undefined if (typeof entry === "string") return entry @@ -81,6 +72,9 @@ const extractAssistantContent = (entry: any): string | undefined => { return undefined } +/** + * Coerce invocation output to a display string + */ const coerceInvocationOutput = (value: unknown): string | undefined => { if (typeof value === "string") return value if (Array.isArray(value)) { @@ -121,7 +115,10 @@ const coerceInvocationOutput = (value: unknown): string | undefined => { return undefined } -const normalizeValue = (value: unknown): string => { +/** + * Normalize invocation output value for display + */ +const normalizeInvocationValue = (value: unknown): string => { if (value === null || value === undefined) return "—" const coerced = coerceInvocationOutput(value) if (coerced) return coerced @@ -130,29 +127,6 @@ const normalizeValue = (value: unknown): string => { return safeJsonStringify(value) } -/** - * Render JSON content using the code editor - */ -const JsonContent = memo(({value, height}: {value: unknown; height?: number}) => { - const jsonString = useMemo(() => safeJsonStringify(value), [value]) - return ( -
- -
- ) -}) -JsonContent.displayName = "JsonContent" - const PreviewEvaluationInvocationCell = ({ scenarioId, runId, @@ -162,31 +136,32 @@ const PreviewEvaluationInvocationCell = ({ runId?: string column: EvaluationTableColumn }) => { + const rowHeight = useAtomValue(scenarioRowHeightAtom) const {ref, selection, showSkeleton} = useScenarioCellValue({scenarioId, runId, column}) const {value, stepError} = selection + // Get limits based on row height + const maxLines = MAX_LINES_BY_HEIGHT[rowHeight] + const maxChatTotalLines = MAX_CHAT_LINES_BY_HEIGHT[rowHeight] + const maxLinesPerMessage = MAX_LINES_PER_MESSAGE_BY_HEIGHT[rowHeight] + // Try to parse JSON strings - must be before any early returns const {parsed: jsonValue, isJson} = useMemo(() => tryParseJson(value), [value]) - const widthStyle = {width: "100%"} - const chatNodes = useMemo( - () => - renderScenarioChatMessages( - value, - `${scenarioId ?? "scenario"}-${column.stepKey ?? column.id ?? "invocation"}`, - ), - [column.id, column.stepKey, scenarioId, value], - ) + // Check for chat messages + const chatMessages = useMemo(() => extractChatMessages(jsonValue), [jsonValue]) + const isChatMessages = chatMessages !== null && chatMessages.length > 0 - // Generate popover content (full content without truncation) - const popoverChatNodes = useMemo( - () => - renderScenarioChatMessages( - value, - `${scenarioId ?? "scenario"}-${column.stepKey ?? column.id ?? "invocation"}-popover`, - ), - [column.id, column.stepKey, scenarioId, value], - ) + // Compute display value and copy text before early returns (React hooks rule) + const displayValue = useMemo(() => normalizeInvocationValue(value), [value]) + const copyText = useMemo(() => { + if (value === undefined || value === null) return undefined + if (isChatMessages || isJson) return safeJsonStringify(jsonValue) + return displayValue + }, [value, isChatMessages, isJson, jsonValue, displayValue]) + + const keyPrefix = `${scenarioId ?? "scenario"}-${column.stepKey ?? column.id ?? "invocation"}` + const widthStyle = {width: "100%"} if (showSkeleton) { return ( @@ -215,8 +190,9 @@ const PreviewEvaluationInvocationCell = ({
) + const errorCopyContent = `${stepError?.message}${stepError?.stacktrace ? `\n${stepError?.stacktrace}` : ""}` return ( - +
{popoverChatNodes}
- ) : isJson ? ( - - ) : ( - {displayValue} - ) - - if (chatNodes && chatNodes.length) { + // Render chat messages + if (isChatMessages) { return ( - -
+ + } + copyText={copyText} + > +
-
{chatNodes}
+
+ } + copyText={copyText} + >
- +
+ } + copyText={copyText} + >
- {displayValue} +
null diff --git a/web/oss/src/components/EvalRunDetails/components/TableCells/MetricCell.tsx b/web/oss/src/components/EvalRunDetails/components/TableCells/MetricCell.tsx index cb9a2857da..8f1bb8b4fc 100644 --- a/web/oss/src/components/EvalRunDetails/components/TableCells/MetricCell.tsx +++ b/web/oss/src/components/EvalRunDetails/components/TableCells/MetricCell.tsx @@ -9,7 +9,7 @@ import MetricDetailsPreviewPopover from "@/oss/components/Evaluations/components import EvaluatorMetricBar from "@/oss/components/Evaluations/EvaluatorMetricBar" import type {BasicStats} from "@/oss/lib/metricUtils" -import {invocationTraceSummaryAtomFamily} from "../../atoms/invocationTraceSummary" +import {scenarioHasInvocationAtomFamily} from "../../atoms/invocationTraceSummary" import type {EvaluationTableColumn} from "../../atoms/table" import useScenarioCellValue from "../../hooks/useScenarioCellValue" import {previewEvalTypeAtom} from "../../state/evalType" @@ -75,10 +75,12 @@ const PreviewEvaluationMetricCell = ({ const {value, displayValue, stepError} = selection // Check if invocation has been run for this scenario (for annotation/evaluator metrics) - const invocationSummary = useAtomValue( - useMemo(() => invocationTraceSummaryAtomFamily({scenarioId, runId}), [scenarioId, runId]), + // Use lightweight atom that only checks for invocation existence, not full trace summary + const hasInvocationAtom = useMemo( + () => scenarioHasInvocationAtomFamily({scenarioId, runId}), + [scenarioId, runId], ) - const hasInvocation = invocationSummary.state === "ready" && Boolean(invocationSummary.traceId) + const hasInvocation = useAtomValue(hasInvocationAtom) const isAnnotationColumn = column.stepType === "annotation" // For online evaluations, we don't need an invocation check since data comes from live traces // Also skip invalid state if we already have a valid value @@ -131,9 +133,9 @@ const PreviewEvaluationMetricCell = ({ column.stepType === "annotation" && Boolean( statsValue && - (Array.isArray((statsValue as any)?.frequency) || - Array.isArray((statsValue as any)?.freq) || - Array.isArray((statsValue as any)?.rank)), + (Array.isArray((statsValue as any)?.frequency) || + Array.isArray((statsValue as any)?.freq) || + Array.isArray((statsValue as any)?.rank)), ) // Parse array values into individual tags (handles comma-separated strings) @@ -277,8 +279,9 @@ const PreviewEvaluationMetricCell = ({
) + const errorCopyContent = `${stepError.message}${stepError.stacktrace ? `\n${stepError.stacktrace}` : ""}` return ( - +
{ const projectId = useAtomValue(effectiveProjectIdAtom) const testsetRefsAtom = useMemo(() => runTestsetRefsAtomFamily(runId ?? null), [runId]) @@ -56,6 +61,8 @@ export const TestsetTag = memo( revisionId={revisionId} projectId={projectId} projectURL={href ? undefined : projectURL} + toneOverride={toneOverride} + showIconOverride={showIconOverride} /> ) }, @@ -72,11 +79,15 @@ export const TestsetTagList = memo( projectURL, runId, className, + toneOverride, + showIconOverride, }: { ids: string[] projectURL?: string | null runId?: string | null className?: string + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const projectId = useAtomValue(effectiveProjectIdAtom) const testsetRefsAtom = useMemo(() => runTestsetRefsAtomFamily(runId ?? null), [runId]) @@ -109,6 +120,8 @@ export const TestsetTagList = memo( projectId={projectId} projectURL={resolvedProjectURL ?? projectURL} className={className} + toneOverride={toneOverride} + showIconOverride={showIconOverride} /> ) }, @@ -123,10 +136,14 @@ export const ApplicationReferenceLabel = memo( runId, applicationId: explicitApplicationId, projectURL: explicitProjectURL, + toneOverride, + showIconOverride, }: { runId?: string | null applicationId?: string | null projectURL?: string | null + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const projectId = useAtomValue(effectiveProjectIdAtom) const {applicationId: runApplicationId} = useRunIdentifiers(runId) @@ -143,6 +160,8 @@ export const ApplicationReferenceLabel = memo( projectId={projectId} projectURL={explicitProjectURL ?? scopedProjectURL} href={appDetailHref} + toneOverride={toneOverride} + showIconOverride={showIconOverride} /> ) }, @@ -160,6 +179,8 @@ export const VariantReferenceLabel = memo( fallbackLabel, showVersionPill = false, explicitVersion, + toneOverride, + showIconOverride, }: { variantId?: string | null applicationId?: string | null @@ -167,6 +188,8 @@ export const VariantReferenceLabel = memo( fallbackLabel?: string | null showVersionPill?: boolean explicitVersion?: number | string | null + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const projectId = useAtomValue(effectiveProjectIdAtom) const {variantId: runVariantId, applicationId: runApplicationId} = useRunIdentifiers(runId) @@ -184,6 +207,8 @@ export const VariantReferenceLabel = memo( showVersionPill={showVersionPill} explicitVersion={explicitVersion} href={href} + toneOverride={toneOverride} + showIconOverride={showIconOverride} /> ) }, @@ -203,6 +228,8 @@ export const VariantRevisionLabel = memo( runId, fallbackVariantName, fallbackRevision, + toneOverride, + showIconOverride, }: { variantId?: string | null revisionId?: string | null @@ -210,6 +237,8 @@ export const VariantRevisionLabel = memo( runId?: string | null fallbackVariantName?: string | null fallbackRevision?: number | string | null + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const projectId = useAtomValue(effectiveProjectIdAtom) const { @@ -266,6 +295,8 @@ export const VariantRevisionLabel = memo( fallbackVariantName={resolvedVariantName} fallbackRevision={resolvedRevision} href={href} + toneOverride={toneOverride} + showIconOverride={showIconOverride} /> ) }, diff --git a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/EvaluatorSection.tsx b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/EvaluatorSection.tsx index 65e50101d9..b19694ec70 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/EvaluatorSection.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/EvaluatorSection.tsx @@ -6,7 +6,6 @@ import {useAtomValue} from "jotai" import dynamic from "next/dynamic" import EvaluatorDetailsPreview from "@/oss/components/pages/evaluations/onlineEvaluation/components/EvaluatorDetailsPreview" -import EvaluatorTypeTag from "@/oss/components/pages/evaluations/onlineEvaluation/components/EvaluatorTypeTag" import {EVALUATOR_CATEGORY_LABEL_MAP} from "@/oss/components/pages/evaluations/onlineEvaluation/constants" import {useEvaluatorDetails} from "@/oss/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorDetails" import {useEvaluatorTypeFromConfigs} from "@/oss/components/pages/evaluations/onlineEvaluation/hooks/useEvaluatorTypeFromConfigs" @@ -61,7 +60,7 @@ const EvaluatorSection = ({runId}: EvaluatorSectionProps) => { } return ( -
+
{evaluators.map((evaluator, index) => ( + ) + return (
- - {rawEvaluator ? ( -
-
-
- - {evaluatorDisplayLabel} - -
- - {finalShowType ? ( - - ) : null} - {evaluator.version ? ( - - V{evaluator.version} - - ) : null} -
+ +
+
+ {titleNode} + {evaluator.version ? ( + + V{evaluator.version} + + ) : null} +
+
+ {rawEvaluator && hasEvaluatorJson ? ( + setView(val as "details" | "json")} + /> + ) : null} +
+
+ {!collapsed ? ( +
+ {rawEvaluator ? ( + <> {evaluator.description ? ( {evaluator.description} ) : null} -
-
- {hasEvaluatorJson ? ( - setView(val as "details" | "json")} - /> - ) : null} -
-
- - {!collapsed ? ( - <> -
- {view === "json" && hasEvaluatorJson ? ( -
- -
- ) : ( - - )} -
- - {metricsFallback.length > 0 ? ( -
- Metrics -
- {metricsFallback.map((metric) => ( - - {metric.displayLabel ?? metric.name} - - ))} -
- ) : null} + ) : ( + + )} - ) : null} -
- ) : ( -
- - Evaluator configuration snapshot is unavailable for this run. - - {metricsFallback.length ? ( -
- {metricsFallback.map((metric) => ( - - {metric.displayLabel ?? metric.name} - - ))} + ) : ( + + Evaluator configuration snapshot is unavailable for this run. + + )} + + {metricsFallback.length > 0 ? ( +
+ Metrics +
+ {metricsFallback.map((metric) => ( + + {metric.displayLabel ?? metric.name} + + ))} +
) : null}
- )} + ) : null} ) diff --git a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/GeneralSection.tsx b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/GeneralSection.tsx index 6a4aa67553..c8df149005 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/GeneralSection.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/components/GeneralSection.tsx @@ -21,6 +21,7 @@ const {Text} = Typography interface GeneralSectionProps { runId: string showActions?: boolean + showHeader?: boolean } const GeneralSectionHeader = ({runId, index}: {runId: string; index: number}) => { @@ -31,7 +32,7 @@ const GeneralSectionHeader = ({runId, index}: {runId: string; index: number}) => ) } -const GeneralSection = ({runId, showActions = true}: GeneralSectionProps) => { +const GeneralSection = ({runId, showActions = true, showHeader = true}: GeneralSectionProps) => { const [collapsed, setCollapsed] = useState(false) const projectId = useAtomValue(effectiveProjectIdAtom) const invalidateRunsTable = useSetAtom(invalidateEvaluationRunsTableAtom) @@ -116,18 +117,22 @@ const GeneralSection = ({runId, showActions = true}: GeneralSectionProps) => { return (
- } - right={ -
- ) : null} - {!collapsed ? content : null} + {content} ) @@ -403,19 +348,89 @@ const ConfigurationSectionColumn = memo( }, ) +const EvaluationRunTagsRow = memo( + ({ + runIds, + registerScrollContainer, + syncScroll, + }: { + runIds: string[] + registerScrollContainer: (key: string, node: HTMLDivElement | null) => void + syncScroll: (key: string, scrollLeft: number) => void + }) => { + const columnClass = + runIds.length > 1 ? "auto-cols-[minmax(480px,1fr)]" : "auto-cols-[minmax(320px,1fr)]" + const refKey = "section-evaluations" + const handleRef = useCallback( + (node: HTMLDivElement | null) => registerScrollContainer(refKey, node), + [refKey, registerScrollContainer], + ) + const handleScroll = useCallback( + (event: UIEvent) => syncScroll(refKey, event.currentTarget.scrollLeft), + [refKey, syncScroll], + ) + + return ( + +
+ {runIds.map((runId, index) => ( + + ))} +
+
+ ) + }, +) + +const EvaluationRunTagItem = memo(({runId, index}: {runId: string; index: number}) => { + const runDisplayNameAtom = useMemo(() => runDisplayNameAtomFamily(runId), [runId]) + const runDisplayName = useAtomValue(runDisplayNameAtom) + const summaryAtom = useMemo( + () => configurationRunSummaryAtomFamily({runId, compareIndex: index}), + [runId, index], + ) + const summary = useAtomValue(summaryAtom) + const label = resolveLabel( + runDisplayName, + summary.runName !== "—" ? summary.runName : undefined, + summary.runSlug ?? undefined, + summary.runId, + ) + + return ( +
+ {summary.isLoading ? ( +
+ ) : ( + + )} +
+ ) +}) + const ConfigurationSectionRow = memo( ({ section, runIds, runIdsSignature, - runDescriptors, registerScrollContainer, syncScroll, }: { section: SectionDefinition runIds: string[] runIdsSignature: string - runDescriptors: RunDescriptor[] registerScrollContainer: (key: string, node: HTMLDivElement | null) => void syncScroll: (key: string, scrollLeft: number) => void }) => { @@ -455,14 +470,13 @@ const ConfigurationSectionRow = memo( return null } - const showRowHeader = false - // section.key === "general" || section.key === "query" - + const columnClass = + runIds.length > 1 ? "auto-cols-[minmax(480px,1fr)]" : "auto-cols-[minmax(320px,1fr)]" const grid = (
{runIds.map((runId, index) => ( setCollapsed((v) => !v) : undefined} /> ))}
) - return
{grid}
+ return ( +
+
setCollapsed((value) => !value)} + onKeyDown={(event: KeyboardEvent) => { + if (event.key === "Enter" || event.key === " ") { + event.preventDefault() + setCollapsed((value) => !value) + } + }} + > + {section.title} + +
+ {!collapsed ? grid : null} +
+ ) }, ) const ConfigurationLayout = memo(({runIds}: {runIds: string[]}) => { const runIdsSignature = useMemo(() => runIds.join("|"), [runIds]) const {register, syncScroll} = useScrollSync() - const {runDescriptors} = useRunMetricData(runIds) return ( -
+
+ {sectionDefinitions.map((section) => ( ))} - {/* Render evaluators without a shared wrapper; each run renders its own evaluator cards directly */} -
- {runIds.map((runId) => ( -
- -
- ))} -
) }) @@ -534,10 +581,8 @@ const ConfigurationView = ({runId}: ConfigurationViewProps) => { } return ( -
-
- -
+
+
) } diff --git a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/utils.ts b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/utils.ts index d858f34967..1b77d642ed 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/utils.ts +++ b/web/oss/src/components/EvalRunDetails/components/views/ConfigurationView/utils.ts @@ -59,12 +59,12 @@ export const toDisplayable = (value: unknown): string | null => { export const hasQueryReference = (reference: Record): boolean => Boolean( reference && - (reference.queryId || - reference.querySlug || - reference.queryRevisionId || - reference.queryRevisionSlug || - reference.queryVariantId || - reference.queryVariantSlug), + (reference.queryId || + reference.querySlug || + reference.queryRevisionId || + reference.queryRevisionSlug || + reference.queryVariantId || + reference.queryVariantSlug), ) export const formatSamplingRate = (rate: unknown): string => { diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView.tsx index 86a81b7ea0..774bbaea5d 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView.tsx @@ -36,9 +36,9 @@ const OverviewView = ({runId}: OverviewViewProps) => { const comparisonRunIds = useMemo(() => runIds.slice(1), [runIds]) return ( -
+
-
+
{baseRunId ? ( } return ( - +
-
-
- +
+
+ + Evaluator Scores Overview + + + Average evaluator score across evaluations +
-
- +
+
+ +
+
+ +
diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/BaseRunMetricsSection.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/BaseRunMetricsSection.tsx index 707f24753f..3b96690766 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/BaseRunMetricsSection.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/BaseRunMetricsSection.tsx @@ -1,6 +1,6 @@ import {memo, useMemo} from "react" -import {Alert, Card, Typography} from "antd" +import {Alert} from "antd" import {isBooleanMetricStats} from "@/oss/components/EvalRunDetails/utils/metricDistributions" import type {TemporalMetricPoint} from "@/oss/components/Evaluations/atoms/runMetrics" @@ -335,18 +335,9 @@ const BaseRunMetricsSection = ({baseRunId, comparisonRunIds}: BaseRunMetricsSect } return ( - - {runDisplayName} -
- } - > -
-
{renderContent()}
-
- +
+
{renderContent()}
+
) } diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetadataSummaryTable.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetadataSummaryTable.tsx index 0ca7cdf8d5..b51216cca8 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetadataSummaryTable.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetadataSummaryTable.tsx @@ -10,7 +10,6 @@ import useEvaluatorReference from "@/oss/components/References/hooks/useEvaluato import type {BasicStats} from "@/oss/lib/metricUtils" import {useProjectData} from "@/oss/state/project" -import {getComparisonColor} from "../../../../atoms/compare" import {evaluationQueryRevisionAtomFamily} from "../../../../atoms/query" import { runCreatedAtAtomFamily, @@ -206,14 +205,19 @@ const StatusCell = ({runId}: MetadataCellProps) => { } const ApplicationCell = ({runId, projectURL}: MetadataCellProps) => ( -
- +
+
) const LegacyVariantCell = memo(({runId}: MetadataCellProps) => ( -
- +
+
)) @@ -235,8 +239,10 @@ const MetadataRunNameCell = memo( runId ?? "—" const accent = - accentColor ?? - (typeof runData?.accentColor === "string" ? (runData as any).accentColor : null) + accentColor === null + ? null + : (accentColor ?? + (typeof runData?.accentColor === "string" ? (runData as any).accentColor : null)) return (
@@ -248,7 +254,18 @@ const MetadataRunNameCell = memo( const LegacyTestsetsCell = memo(({runId, projectURL}: MetadataCellProps) => { const testsetAtom = useMemo(() => runTestsetIdsAtomFamily(runId), [runId]) const testsetIds = useAtomValueWithSchedule(testsetAtom, {priority: LOW_PRIORITY}) ?? [] - return + return ( +
+ +
+ ) }) const formatCurrency = (value: number | undefined | null) => { @@ -362,7 +379,14 @@ const InvocationErrorsCell = makeMetricCell("attributes.ag.metrics.errors.cumula }) const METADATA_ROWS: MetadataRowRecord[] = [ - {key: "evaluations", label: "Evaluations", Cell: MetadataRunNameCell}, + { + key: "testsets", + label: "Test set", + Cell: LegacyTestsetsCell, + shouldDisplay: ({snapshots}) => + snapshots.some(({testsetIds}) => (testsetIds?.length ?? 0) > 0), + }, + {key: "evaluation", label: "Evaluation", Cell: MetadataRunNameCell}, {key: "status", label: "Status", Cell: StatusCell}, {key: "created", label: "Created at", Cell: CreatedCell}, {key: "updated", label: "Updated at", Cell: UpdatedCell}, @@ -375,12 +399,12 @@ const METADATA_ROWS: MetadataRowRecord[] = [ const refs = invocationRefs?.rawRefs ?? {} return Boolean( invocationRefs?.applicationId || - refs?.application || - refs?.application_revision || - refs?.applicationRevision || - refs?.agent || - refs?.agent_revision || - refs?.agentRevision, + refs?.application || + refs?.application_revision || + refs?.applicationRevision || + refs?.agent || + refs?.agent_revision || + refs?.agentRevision, ) }), }, @@ -393,20 +417,13 @@ const METADATA_ROWS: MetadataRowRecord[] = [ const refs = invocationRefs?.rawRefs ?? {} return Boolean( invocationRefs?.variantId || - invocationRefs?.applicationVariantId || - refs?.variant || - refs?.applicationVariant || - refs?.application_variant, + invocationRefs?.applicationVariantId || + refs?.variant || + refs?.applicationVariant || + refs?.application_variant, ) }), }, - { - key: "testsets", - label: "Test sets", - Cell: LegacyTestsetsCell, - shouldDisplay: ({snapshots}) => - snapshots.some(({testsetIds}) => (testsetIds?.length ?? 0) > 0), - }, // {key: "scenarios", label: "Scenarios evaluated", Cell: ScenarioCountCell}, {key: "invocation_cost", label: "Cost (Total)", Cell: InvocationCostCell}, {key: "invocation_duration", label: "Duration (Total)", Cell: InvocationDurationCell}, @@ -422,7 +439,7 @@ const EvaluatorNameLabel = ({evaluatorId}: {evaluatorId: string}) => { const MetadataSummaryTable = ({runIds, projectURL}: MetadataSummaryTableProps) => { const orderedRunIds = useMemo(() => runIds.filter((id): id is string => Boolean(id)), [runIds]) - const {metricSelections, runColorMap, runDescriptors} = useRunMetricData(orderedRunIds) + const {metricSelections, runDescriptors} = useRunMetricData(orderedRunIds) const runReferenceSnapshotsAtom = useMemo( () => atom((get) => @@ -605,8 +622,6 @@ const MetadataSummaryTable = ({runIds, projectURL}: MetadataSummaryTableProps) = return rows }, [anyHasQuery, evaluatorMetricRows, rowContext]) - const isComparison = orderedRunIds.length > 1 - const columns = useMemo>(() => { const baseColumn = { title: null, @@ -625,47 +640,44 @@ const MetadataSummaryTable = ({runIds, projectURL}: MetadataSummaryTableProps) = key: runId, width: 160, onCell: (record: MetadataRowRecord) => { - if (!isComparison || record.key === "query_config") { - return {} + if (record.key === "testsets") { + return index === 0 ? {colSpan: orderedRunIds.length} : {colSpan: 0} } - const tone = getComparisonColor(index) - return tone ? {style: {backgroundColor: tone}} : {} + return {} + }, + render: (_: unknown, record: MetadataRowRecord) => { + if (record.key === "testsets" && index !== 0) { + return null + } + return ( + + ) }, - render: (_: unknown, record: MetadataRowRecord) => ( - - ), })) return [baseColumn, ...runColumns] - }, [isComparison, orderedRunIds, projectURL, runColorMap, runNameMap]) + }, [orderedRunIds, projectURL, runNameMap]) return ( -
-
- Evaluator Scores Overview - - Average evaluator score across evaluations - -
-
-
- - className="metadata-summary-table" - rowKey="key" - size="small" - pagination={false} - columns={columns} - dataSource={dataSource} - scroll={{x: "max-content"}} - showHeader={false} - /> -
+
+
+ + className="metadata-summary-table" + rowKey="key" + size="small" + pagination={false} + columns={columns} + dataSource={dataSource} + scroll={{x: "max-content"}} + showHeader={false} + bordered={true} + />
) diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetricComparisonCard.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetricComparisonCard.tsx index 5f554198d2..bdfde0110e 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetricComparisonCard.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/MetricComparisonCard.tsx @@ -12,6 +12,7 @@ import { YAxis, } from "recharts" +import {buildHistogramChartData} from "@/oss/components/EvalRunDetails/components/EvaluatorMetricsChart/utils/chartData" import { buildBooleanHistogram, isBooleanMetricStats, @@ -111,13 +112,7 @@ const buildComparisonChartConfig = ( const histogramEntries = entries.map((entry) => ({ entry, - data: - entry.stats?.distribution ?? - entry.stats?.hist?.map((h: any) => ({ - x: h?.interval?.[0] ?? h?.value ?? h?.bin ?? 0, - y: h?.count ?? h?.value ?? 0, - })) ?? - [], + data: buildHistogramChartData(entry.stats as Record).data, })) if (histogramEntries.some(({data}) => Array.isArray(data) && data.length > 0)) { const rowMap = new Map< diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/OverviewPlaceholders.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/OverviewPlaceholders.tsx index 25bdb8062d..e304041fae 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/OverviewPlaceholders.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/OverviewPlaceholders.tsx @@ -187,6 +187,7 @@ export const OverviewLoadingPlaceholder = ({ className={clsx( "flex w-full items-center justify-center rounded-lg bg-[#F8FAFC]", "border border-dashed border-[#E2E8F0]", + "h-full", )} style={{minHeight}} > @@ -204,6 +205,7 @@ export const OverviewEmptyPlaceholder = ({ className={clsx( "flex w-full flex-col items-center justify-center gap-2 rounded-lg bg-[#F8FAFC] px-6 py-10", "border border-dashed border-[#E2E8F0] text-center", + "h-full", )} style={{minHeight}} > diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/RunNameTag.tsx b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/RunNameTag.tsx index a30be878b2..4ffa5ff94f 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/RunNameTag.tsx +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/components/RunNameTag.tsx @@ -59,12 +59,6 @@ const formatDateTime = (value: string | number | Date | null | undefined) => { const RunNameTag = ({runId, label, accentColor}: RunNameTagProps) => { const style = useMemo(() => buildAccentStyle(accentColor), [accentColor]) - const tooltip = useMemo(() => { - if (!label) return runId - if (label === runId) return label - return `${label} (${runId})` - }, [label, runId]) - const runQuery = useAtomValueWithSchedule( useMemo(() => evaluationRunQueryAtomFamily(runId), [runId]), {priority: LOW_PRIORITY}, @@ -97,10 +91,12 @@ const RunNameTag = ({runId, label, accentColor}: RunNameTagProps) => { const popoverContent = (
-
- - {label || runId} - +
+
+ + {label || runId} + +
Run details
{isLoading ? ( @@ -162,13 +158,7 @@ const RunNameTag = ({runId, label, accentColor}: RunNameTagProps) => { return ( - + ) } diff --git a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/utils/evaluatorMetrics.ts b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/utils/evaluatorMetrics.ts index 83f85fdc14..c17f436f6a 100644 --- a/web/oss/src/components/EvalRunDetails/components/views/OverviewView/utils/evaluatorMetrics.ts +++ b/web/oss/src/components/EvalRunDetails/components/views/OverviewView/utils/evaluatorMetrics.ts @@ -92,7 +92,9 @@ export const buildEvaluatorMetricEntries = ( if (!rawKey) return const canonicalKey = canonicalizeMetricKey(rawKey) if (hasSchema && !allowedCanonicalKeys.has(canonicalKey)) { - return + if (!rawKey.startsWith("attributes.ag.data.outputs.")) { + return + } } if (!unique.has(canonicalKey)) { const fallbackDefinition = fallbackByCanonicalKey.get(canonicalKey) diff --git a/web/oss/src/components/EvalRunDetails/hooks/useCellVisibility.ts b/web/oss/src/components/EvalRunDetails/hooks/useCellVisibility.ts index 4a5732d4fb..1949906009 100644 --- a/web/oss/src/components/EvalRunDetails/hooks/useCellVisibility.ts +++ b/web/oss/src/components/EvalRunDetails/hooks/useCellVisibility.ts @@ -1,81 +1,32 @@ -import {useCallback, useEffect, useMemo, useState} from "react" +import {useCallback, useEffect, useState} from "react" import {useVirtualTableScrollContainer} from "@/oss/components/InfiniteVirtualTable" -const COLUMN_LOOKAHEAD = 1.6 -const ROW_LOOKAHEAD = 2 -const MIN_HORIZONTAL_BUFFER = 120 -const MAX_HORIZONTAL_BUFFER = 800 -const MIN_VERTICAL_BUFFER = 160 -const MAX_VERTICAL_BUFFER = 400 - +// Fixed buffer values - no need for dynamic calculation per cell +// These provide generous lookahead for smooth scrolling +const HORIZONTAL_BUFFER = 400 +const VERTICAL_BUFFER = 300 +const ROOT_MARGIN = `${VERTICAL_BUFFER}px ${HORIZONTAL_BUFFER}px ${VERTICAL_BUFFER}px ${HORIZONTAL_BUFFER}px` + +/** + * Optimized cell visibility hook. + * Uses a single IntersectionObserver per cell with fixed margins. + * Removed ResizeObserver to reduce overhead - uses fixed buffer instead. + * + * Returns `hasBeenVisible` to track if the cell has ever been visible, + * which prevents showing loading state when scrolling back to already-loaded cells. + */ export const useCellVisibility = () => { const scrollContainer = useVirtualTableScrollContainer() const [element, setElement] = useState(null) const [isVisible, setIsVisible] = useState(false) - const [dimensions, setDimensions] = useState<{width: number; height: number}>({ - width: 0, - height: 0, - }) + // Track if cell has ever been visible - once true, stays true (use state for re-render) + const [hasBeenVisible, setHasBeenVisible] = useState(false) const ref = useCallback((node: HTMLDivElement | null) => { setElement(node) }, []) - useEffect(() => { - if (!element) { - setDimensions({width: 0, height: 0}) - return undefined - } - - if (typeof ResizeObserver === "undefined") { - const rect = element.getBoundingClientRect() - setDimensions({ - width: rect.width, - height: rect.height, - }) - return undefined - } - - const resizeObserver = new ResizeObserver((entries) => { - entries.forEach((entry) => { - if (entry.target === element) { - const {width, height} = entry.contentRect - setDimensions({ - width: width || element.offsetWidth || 0, - height: height || element.offsetHeight || 0, - }) - } - }) - }) - - resizeObserver.observe(element) - return () => { - resizeObserver.disconnect() - } - }, [element]) - - const horizontalBuffer = useMemo(() => { - const base = dimensions.width ? dimensions.width * COLUMN_LOOKAHEAD : MIN_HORIZONTAL_BUFFER - return Math.max( - MIN_HORIZONTAL_BUFFER, - Math.min(base || MIN_HORIZONTAL_BUFFER, MAX_HORIZONTAL_BUFFER), - ) - }, [dimensions.width]) - - const verticalBuffer = useMemo(() => { - const base = dimensions.height ? dimensions.height * ROW_LOOKAHEAD : MIN_VERTICAL_BUFFER - return Math.max( - MIN_VERTICAL_BUFFER, - Math.min(base || MIN_VERTICAL_BUFFER, MAX_VERTICAL_BUFFER), - ) - }, [dimensions.height]) - - const rootMargin = useMemo( - () => `${verticalBuffer}px ${horizontalBuffer}px ${verticalBuffer}px ${horizontalBuffer}px`, - [horizontalBuffer, verticalBuffer], - ) - useEffect(() => { if (!element) { setIsVisible(false) @@ -90,20 +41,26 @@ export const useCellVisibility = () => { if (!root) { setIsVisible(true) + return undefined } const observer = new IntersectionObserver( (entries) => { - entries.forEach((entry) => { - if (entry.target === element) { - setIsVisible(entry.isIntersecting || !root) + // Only process the first entry since we observe a single element + const entry = entries[0] + if (entry) { + const nowVisible = entry.isIntersecting + setIsVisible(nowVisible) + // Once visible, mark as having been visible (only set, never unset) + if (nowVisible) { + setHasBeenVisible(true) } - }) + } }, { root, threshold: 0, - rootMargin, + rootMargin: ROOT_MARGIN, }, ) @@ -111,9 +68,9 @@ export const useCellVisibility = () => { return () => { observer.disconnect() } - }, [element, scrollContainer, rootMargin]) + }, [element, scrollContainer]) - return {ref, isVisible} + return {ref, isVisible, hasBeenVisible} } export default useCellVisibility diff --git a/web/oss/src/components/EvalRunDetails/hooks/useScenarioCellValue.ts b/web/oss/src/components/EvalRunDetails/hooks/useScenarioCellValue.ts index 90da8e6f08..edcc944faf 100644 --- a/web/oss/src/components/EvalRunDetails/hooks/useScenarioCellValue.ts +++ b/web/oss/src/components/EvalRunDetails/hooks/useScenarioCellValue.ts @@ -1,4 +1,4 @@ -import {useMemo} from "react" +import {useMemo, useRef} from "react" import {LOW_PRIORITY, useAtomValueWithSchedule} from "jotai-scheduler" @@ -29,9 +29,13 @@ const useScenarioCellValue = ({ disableVisibilityTracking = false, }: UseScenarioCellValueArgs) => { const {ref, isVisible} = useCellVisibility() - const enabled = disableVisibilityTracking ? true : isVisible - const columnConfig = useMemo(() => buildColumnValueConfig(column, {enabled}), [column, enabled]) + // Cache the last valid selection to prevent flickering when scrolling + const cachedSelectionRef = useRef(null) + + // Always use enabled: true for the atom key to maintain cache identity + // Visibility-based loading is handled at the component level, not atom level + const columnConfig = useMemo(() => buildColumnValueConfig(column, {enabled: true}), [column]) const selectionAtom = useMemo( () => scenarioColumnValueSelectionAtomFamily({scenarioId, runId, column: columnConfig}), [scenarioId, runId, columnConfig], @@ -39,13 +43,24 @@ const useScenarioCellValue = ({ const selection = useAtomValueWithSchedule(selectionAtom, {priority: LOW_PRIORITY}) - const showSkeleton = disableVisibilityTracking - ? selection.isLoading - : !isVisible || selection.isLoading + // Update cache when we have a valid value + if (selection.value !== undefined && selection.value !== null) { + cachedSelectionRef.current = selection + } + + // Use cached value if available and current selection has no value + const effectiveSelection = + selection.value !== undefined && selection.value !== null + ? selection + : (cachedSelectionRef.current ?? selection) + + // Show skeleton only during initial load when we have no data yet + const hasValue = effectiveSelection.value !== undefined && effectiveSelection.value !== null + const showSkeleton = !hasValue && selection.isLoading return { ref: disableVisibilityTracking ? undefined : ref, - selection, + selection: effectiveSelection, isVisible: disableVisibilityTracking ? true : isVisible, showSkeleton, } diff --git a/web/oss/src/components/EvaluationRunsTablePOC/atoms/view.ts b/web/oss/src/components/EvaluationRunsTablePOC/atoms/view.ts index abb202bed2..4e2440f2d6 100644 --- a/web/oss/src/components/EvaluationRunsTablePOC/atoms/view.ts +++ b/web/oss/src/components/EvaluationRunsTablePOC/atoms/view.ts @@ -600,9 +600,9 @@ export const evaluationRunsFilterOptionsAtom = atom((get) => { : [] const evaluatorLoading = Boolean( isActive && - (evaluatorQueries?.isLoading || - evaluatorQueries?.isPending || - evaluatorQueries?.isFetching), + (evaluatorQueries?.isLoading || + evaluatorQueries?.isPending || + evaluatorQueries?.isFetching), ) const evaluatorOptions = diff --git a/web/oss/src/components/EvaluationRunsTablePOC/components/EvaluationRunsCreateButton.tsx b/web/oss/src/components/EvaluationRunsTablePOC/components/EvaluationRunsCreateButton.tsx index 3d6c012f7e..6eee82b2bb 100644 --- a/web/oss/src/components/EvaluationRunsTablePOC/components/EvaluationRunsCreateButton.tsx +++ b/web/oss/src/components/EvaluationRunsTablePOC/components/EvaluationRunsCreateButton.tsx @@ -1,7 +1,7 @@ import {useCallback, useEffect, useMemo} from "react" -import {CaretDown, Check, Plus} from "@phosphor-icons/react" -import {Button, Dropdown, Space, Tooltip, type MenuProps} from "antd" +import {PlusIcon} from "@phosphor-icons/react" +import {Button, Dropdown, Tooltip, type MenuProps} from "antd" import {useAtom, useAtomValue} from "jotai" import { @@ -37,13 +37,17 @@ const createTypeCopy: Record< }, } -const isSupportedCreateType = (value: string): value is SupportedCreateType => - SUPPORTED_CREATE_TYPES.includes(value as SupportedCreateType) +const isSupportedCreateType = (value: unknown): value is SupportedCreateType => { + return typeof value === "string" && (SUPPORTED_CREATE_TYPES as string[]).includes(value) +} + +const FALLBACK_CREATE_TYPE: SupportedCreateType = "auto" const EvaluationRunsCreateButton = () => { const {createEnabled, createTooltip, evaluationKind, defaultCreateType, scope} = useAtomValue( evaluationRunsTableHeaderStateAtom, ) + const isAllTab = evaluationKind === "all" const isAppScoped = scope === "app" const [createOpen, setCreateOpen] = useAtom(evaluationRunsCreateModalOpenAtom) const [selectedCreateType, setSelectedCreateType] = useAtom( @@ -52,40 +56,50 @@ const EvaluationRunsCreateButton = () => { const [createTypePreference, setCreateTypePreference] = useAtom( evaluationRunsCreateTypePreferenceAtom, ) - const isAllTab = evaluationKind === "all" + + const availableTypes = useMemo(() => { + if (!isAllTab) return [] + if (isAppScoped) return SUPPORTED_CREATE_TYPES.filter((t) => t !== "online") + return SUPPORTED_CREATE_TYPES + }, [isAllTab, isAppScoped]) + + const normalizeAllTabType = useCallback( + (value: unknown): SupportedCreateType => { + const candidate = isSupportedCreateType(value) ? value : FALLBACK_CREATE_TYPE + return availableTypes.includes(candidate) + ? candidate + : (availableTypes[0] ?? FALLBACK_CREATE_TYPE) + }, + [availableTypes], + ) useEffect(() => { - if (!createEnabled && createOpen) { - setCreateOpen(false) - } + if (!createEnabled && createOpen) setCreateOpen(false) }, [createEnabled, createOpen, setCreateOpen]) useEffect(() => { - if (!isAllTab && defaultCreateType && selectedCreateType !== defaultCreateType) { - setSelectedCreateType(defaultCreateType) - } + if (isAllTab) return + if (!defaultCreateType) return + if (selectedCreateType !== defaultCreateType) setSelectedCreateType(defaultCreateType) }, [defaultCreateType, isAllTab, selectedCreateType, setSelectedCreateType]) useEffect(() => { if (!isAllTab) return - const normalizedPreference = isSupportedCreateType(createTypePreference) - ? createTypePreference - : "auto" - if (!isSupportedCreateType(createTypePreference)) { - setCreateTypePreference(normalizedPreference) - } - if (selectedCreateType !== normalizedPreference) { - setSelectedCreateType(normalizedPreference) - } + + const normalized = normalizeAllTabType(createTypePreference) + + if (createTypePreference !== normalized) setCreateTypePreference(normalized) + if (selectedCreateType !== normalized) setSelectedCreateType(normalized) }, [ - createTypePreference, isAllTab, + createTypePreference, selectedCreateType, setCreateTypePreference, setSelectedCreateType, + normalizeAllTabType, ]) - const handlePrimaryClick = useCallback(() => { + const openCreateModal = useCallback(() => { if (!createEnabled) return setCreateOpen(true) }, [createEnabled, setCreateOpen]) @@ -93,74 +107,56 @@ const EvaluationRunsCreateButton = () => { const handleMenuClick = useCallback>( ({key}) => { if (!isSupportedCreateType(key)) return - setSelectedCreateType(key) - setCreateTypePreference(key) - if (!createEnabled) return - setCreateOpen(true) + + const normalized = normalizeAllTabType(key) + + setSelectedCreateType(normalized) + setCreateTypePreference(normalized) + openCreateModal() }, - [createEnabled, setCreateOpen, setCreateTypePreference, setSelectedCreateType], + [normalizeAllTabType, openCreateModal, setCreateTypePreference, setSelectedCreateType], ) - const dropdownMenuItems = useMemo(() => { + const menuItems = useMemo(() => { if (!isAllTab) return [] - // Filter out "online" (Live Evaluation) in app-scoped views - const availableTypes = isAppScoped - ? SUPPORTED_CREATE_TYPES.filter((type) => type !== "online") - : SUPPORTED_CREATE_TYPES + return availableTypes.map((type) => { const copy = createTypeCopy[type] - const isActive = selectedCreateType === type return { key: type, label: ( -
-
- {isActive ? : null} -
-
- {copy.title} - {copy.description} -
+
+ {copy.title} + {copy.description}
), } }) - }, [isAllTab, isAppScoped, selectedCreateType]) - - const buttonLabel = useMemo(() => { - if (!isAllTab) return "New Evaluation" - const shortLabel = isSupportedCreateType(selectedCreateType) - ? createTypeCopy[selectedCreateType]?.short - : null - return shortLabel ? `New ${shortLabel} Evaluation` : "New Evaluation" - }, [isAllTab, selectedCreateType]) + }, [availableTypes, isAllTab]) return (
{isAllTab ? ( - + - - diff --git a/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsFiltersContent.tsx b/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsFiltersContent.tsx index 9725f1a464..6c06983fed 100644 --- a/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsFiltersContent.tsx +++ b/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsFiltersContent.tsx @@ -5,7 +5,7 @@ import {Button, Divider, Select, Tag, Typography} from "antd" import {useAtomValue, useSetAtom} from "jotai" import type {RunFlagsFilter} from "@/oss/lib/hooks/usePreviewEvaluations" -import {useTestsetsData} from "@/oss/state/testset" +import {testsetsListQueryAtomFamily} from "@/oss/state/entities/testset" import {evaluationRunsTableComponentSliceAtom} from "../../atoms/context" import { @@ -172,9 +172,9 @@ const EvaluationRunsFiltersContent = ({isOpen, onClose}: EvaluationRunsFiltersCo const setDraft = useSetAtom(evaluationRunsFiltersDraftAtom) const initializeDraft = useSetAtom(evaluationRunsFiltersDraftInitializeAtom) const clearDraft = useSetAtom(evaluationRunsFiltersDraftClearAtom) - const {testsets, isLoading: testsetsLoading} = useTestsetsData({ - enabled: Boolean(projectId && isOpen), - }) + const testsetsQuery = useAtomValue(testsetsListQueryAtomFamily(null)) + const testsets = testsetsQuery.data?.testsets ?? [] + const testsetsLoading = testsetsQuery.isPending const draftStatusFilters = draft?.statusFilters ?? summary.statusFilters const draftReferences = draft?.referenceFilters ?? createReferenceDraftFromSummary(summary) diff --git a/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsHeaderFilters.tsx b/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsHeaderFilters.tsx index 89e9948624..47a27115ae 100644 --- a/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsHeaderFilters.tsx +++ b/web/oss/src/components/EvaluationRunsTablePOC/components/filters/EvaluationRunsHeaderFilters.tsx @@ -9,9 +9,8 @@ import { getReferenceToneColors, type ReferenceTone, } from "@/oss/components/References/referenceColors" -import {useTestsetsData} from "@/oss/state/testset" +import {testsetsListQueryAtomFamily} from "@/oss/state/entities/testset" -import {evaluationRunsTableComponentSliceAtom} from "../../atoms/context" import { evaluationRunsFilterOptionsAtom, evaluationRunsFiltersSummaryAtom, @@ -100,10 +99,9 @@ const isReferenceChipPending = (payload: {label?: string; value: string; loading const FiltersSummary = () => { const summary = useAtomValue(evaluationRunsFiltersSummaryAtom) - const {projectId} = useAtomValue(evaluationRunsTableComponentSliceAtom) - const {testsets, isLoading: testsetsLoading} = useTestsetsData({ - enabled: Boolean(projectId && summary.testsetFilters.length > 0), - }) + const testsetsQuery = useAtomValue(testsetsListQueryAtomFamily(null)) + const testsets = testsetsQuery.data?.testsets ?? [] + const testsetsLoading = testsetsQuery.isPending const hasEvaluatorFilters = summary.evaluatorFilters.length > 0 const hasAppFilters = summary.appFilters.length > 0 const hasVariantFilters = summary.variantFilters.length > 0 diff --git a/web/oss/src/components/EvaluationRunsTablePOC/hooks/usePreviewRunSummary.ts b/web/oss/src/components/EvaluationRunsTablePOC/hooks/usePreviewRunSummary.ts index c6675c855e..1094f62cfb 100644 --- a/web/oss/src/components/EvaluationRunsTablePOC/hooks/usePreviewRunSummary.ts +++ b/web/oss/src/components/EvaluationRunsTablePOC/hooks/usePreviewRunSummary.ts @@ -58,10 +58,10 @@ export const usePreviewRunSummary = ( const hasSummary = Boolean(summary) const isLoading = Boolean( enabled && - projectId && - runId && - !hasSummary && - (summaryQuery?.isLoading || summaryQuery?.isFetching || summaryQuery?.isPending), + projectId && + runId && + !hasSummary && + (summaryQuery?.isLoading || summaryQuery?.isFetching || summaryQuery?.isPending), ) const stepReferences = summary?.stepReferences as Record | undefined diff --git a/web/oss/src/components/Evaluations/EvaluatorMetricBar.tsx b/web/oss/src/components/Evaluations/EvaluatorMetricBar.tsx index 9f6e48350e..ca7675a700 100644 --- a/web/oss/src/components/Evaluations/EvaluatorMetricBar.tsx +++ b/web/oss/src/components/Evaluations/EvaluatorMetricBar.tsx @@ -1,3 +1,5 @@ +import {memo} from "react" + import {Tooltip} from "antd" import clsx from "clsx" @@ -81,111 +83,118 @@ const resolveSegments = (stats?: BasicStats): {label: string; value: number}[] = return [] } -const EvaluatorMetricBar = ({ - stats, - segments: explicitSegments, - width = BASE_BAR_WIDTH, - className, -}: EvaluatorMetricBarProps) => { - const segments = - (explicitSegments && explicitSegments.length ? explicitSegments : undefined) ?? - resolveSegments(stats) - if (!segments.length) { - return null - } - - const total = segments.reduce((sum, entry) => sum + entry.value, 0) - if (!total || !Number.isFinite(total) || total <= 0) { - return null - } - - const normalized = segments - .map((entry) => ({ - label: entry.label, - value: entry.value, - ratio: entry.value / total, - percent: (entry.value / total) * 100, - })) - .sort((a, b) => b.ratio - a.ratio) - - const booleanCandidates = new Set( - [ - ...normalized.map((entry) => entry.label?.toString().toLowerCase()), - ...((Array.isArray((stats as any)?.unique) ? (stats as any).unique : []) as any[]).map( - (val) => val?.toString().toLowerCase(), - ), - ].filter(Boolean), - ) - - // Always enforce deterministic ordering for boolean categories: [true, false] - const isBoolean = booleanCandidates.has("true") || booleanCandidates.has("false") - const displaySegments = (() => { - if (!isBoolean) return [...normalized] - const byKey = new Map( - normalized.map((e) => [e.label?.toString().toLowerCase(), e] as const), +const EvaluatorMetricBar = memo( + ({ + stats, + segments: explicitSegments, + width = BASE_BAR_WIDTH, + className, + }: EvaluatorMetricBarProps) => { + const segments = + (explicitSegments && explicitSegments.length ? explicitSegments : undefined) ?? + resolveSegments(stats) + if (!segments.length) { + return null + } + + const total = segments.reduce((sum, entry) => sum + entry.value, 0) + if (!total || !Number.isFinite(total) || total <= 0) { + return null + } + + const normalized = segments + .map((entry) => ({ + label: entry.label, + value: entry.value, + ratio: entry.value / total, + percent: (entry.value / total) * 100, + })) + .sort((a, b) => b.ratio - a.ratio) + + const booleanCandidates = new Set( + [ + ...normalized.map((entry) => entry.label?.toString().toLowerCase()), + ...( + (Array.isArray((stats as any)?.unique) ? (stats as any).unique : []) as any[] + ).map((val) => val?.toString().toLowerCase()), + ].filter(Boolean), ) - const trueSeg = - byKey.get("true") ?? ({label: "true", value: 0, ratio: 0, percent: 0} as const) - const falseSeg = - byKey.get("false") ?? ({label: "false", value: 0, ratio: 0, percent: 0} as const) - return [trueSeg, falseSeg] - })() - - if (!displaySegments.length) { - return null - } - const legendEntries = displaySegments.slice(0, Math.min(2, displaySegments.length)) - - return ( -
+ // Always enforce deterministic ordering for boolean categories: [true, false] + const isBoolean = booleanCandidates.has("true") || booleanCandidates.has("false") + const displaySegments = (() => { + if (!isBoolean) return [...normalized] + const byKey = new Map( + normalized.map((e) => [e.label?.toString().toLowerCase(), e] as const), + ) + const trueSeg = + byKey.get("true") ?? ({label: "true", value: 0, ratio: 0, percent: 0} as const) + const falseSeg = + byKey.get("false") ?? ({label: "false", value: 0, ratio: 0, percent: 0} as const) + return [trueSeg, falseSeg] + })() + + if (!displaySegments.length) { + return null + } + + const legendEntries = displaySegments.slice(0, Math.min(2, displaySegments.length)) + + return (
- {displaySegments.map((entry, index) => ( - +
+ {displaySegments.map((entry, index) => ( + +
+ + ))} +
+
+ {legendEntries.map((entry, index) => (
- - ))} -
-
- {legendEntries.map((entry, index) => ( -
- - {entry.label} - - {Formatter.format(entry.percent)}% - -
- ))} + key={`${entry.label}-legend-${index}`} + className="flex items-center gap-1.5" + style={{color: getLabelColor(entry.label, index)}} + > + + {entry.label} + + {Formatter.format(entry.percent)}% + +
+ ))} +
-
- ) -} + ) + }, +) export default EvaluatorMetricBar diff --git a/web/oss/src/components/Evaluations/components/MetricDetailsPreviewPopover.tsx b/web/oss/src/components/Evaluations/components/MetricDetailsPreviewPopover.tsx index eb86ce193a..5c2c20b834 100644 --- a/web/oss/src/components/Evaluations/components/MetricDetailsPreviewPopover.tsx +++ b/web/oss/src/components/Evaluations/components/MetricDetailsPreviewPopover.tsx @@ -668,6 +668,7 @@ const MetricDetailsPreviewPopover = memo( prefetchedStats, evaluationType, scenarioTimestamp, + fullWidth = true, children, }: { runId?: string @@ -684,6 +685,8 @@ const MetricDetailsPreviewPopover = memo( evaluationType?: "auto" | "human" | "online" | "custom" /** Timestamp for the scenario row (used for online evaluations to get temporal stats) */ scenarioTimestamp?: string | number | null + /** Controls whether the trigger wrapper stretches to full width */ + fullWidth?: boolean children: React.ReactNode }) => { const [shouldLoad, setShouldLoad] = useState(false) @@ -716,7 +719,7 @@ const MetricDetailsPreviewPopover = memo( /> } > -
{children}
+
{children}
) }, diff --git a/web/oss/src/components/Evaluators/assets/evaluatorFiltering.ts b/web/oss/src/components/Evaluators/assets/evaluatorFiltering.ts index ec01f427c4..ff617479c5 100644 --- a/web/oss/src/components/Evaluators/assets/evaluatorFiltering.ts +++ b/web/oss/src/components/Evaluators/assets/evaluatorFiltering.ts @@ -20,6 +20,7 @@ export const ENABLED_EVALUATORS = [ "auto_semantic_similarity", "auto_regex_test", "field_match_test", + "json_multi_field_match", "auto_json_diff", "auto_ai_critique", "auto_custom_code_run", diff --git a/web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx b/web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx index f21209048a..29d317cc72 100644 --- a/web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx +++ b/web/oss/src/components/Evaluators/components/ConfigureEvaluator/index.tsx @@ -32,7 +32,9 @@ import ConfigureEvaluatorSkeleton from "./assets/ConfigureEvaluatorSkeleton" const ConfigureEvaluator = dynamic( () => - import("@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator"), + import( + "@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator" + ), {ssr: false}, ) diff --git a/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx b/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx index 562868a8a2..b1168e1993 100644 --- a/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx +++ b/web/oss/src/components/Evaluators/components/SelectEvaluatorModal/index.tsx @@ -17,7 +17,7 @@ const SelectEvaluatorModal = ({open, onCancel, ...modalProps}: SelectEvaluatorMo footer={null} width={520} className="[&_.ant-modal-container]:!p-0 [&_.ant-modal-content]:!rounded-xl [&_.ant-modal-content]:!p-0" - classNames={{body: "!p-0"}} + classNames={{body: "!p-0 !overflow-hidden"}} {...modalProps} > diff --git a/web/oss/src/components/Filters/Sort.tsx b/web/oss/src/components/Filters/Sort.tsx index 5fc148b58f..b354514d4d 100644 --- a/web/oss/src/components/Filters/Sort.tsx +++ b/web/oss/src/components/Filters/Sort.tsx @@ -22,7 +22,7 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({ flexDirection: "column", }, popover: { - "& .ant-popover-inner": { + "& .ant-popover-container": { transition: "width 0.3s ease", padding: 4, }, @@ -48,8 +48,9 @@ export interface SortResult { type: "custom" | "standard" sorted: string customRange?: {startTime?: string; endTime?: string} + label?: SortTypes } -type SortTypes = +export type SortTypes = | "30 mins" | "1 hour" | "6 hours" @@ -69,9 +70,12 @@ interface CustomTimeRange { interface Props { onSortApply: ({type, sorted, customRange}: SortResult) => void defaultSortValue: SortTypes + type?: "link" | "text" | "default" | "primary" | "dashed" + disabled?: boolean + exclude?: SortTypes[] } -const Sort: React.FC = ({onSortApply, defaultSortValue}) => { +const Sort: React.FC = ({onSortApply, defaultSortValue, type, disabled, exclude}) => { const classes = useStyles() const [sort, setSort] = useState(defaultSortValue) @@ -121,6 +125,7 @@ const Sort: React.FC = ({onSortApply, defaultSortValue}) => { type: sortData == "custom" ? "custom" : "standard", sorted: sortedTime as string, customRange: customRangeTime, + label: sortData, }) } @@ -178,32 +183,42 @@ const Sort: React.FC = ({onSortApply, defaultSortValue}) => {
- {options.map((item) => ( -
onSelectItem(item)} - className={`${classes.popupItems} ${sort === item.value && classes.popupSelectedItem}`} - > - {item.label} -
- ))} + {options + .filter( + (item) => + !exclude?.includes(item.value as SortTypes) && + !exclude?.includes(item.label as SortTypes), + ) + .map((item) => ( +
onSelectItem(item)} + className={`${classes.popupItems} ${sort === item.value && classes.popupSelectedItem}`} + > + {item.label} +
+ ))} -
- -
+ {!exclude?.includes("custom") && ( + <> +
+ +
-
{ - setCustomOptionSelected(true) - setSort("custom") - }} - > - - Define start and end time - - -
+
{ + setCustomOptionSelected(true) + setSort("custom") + }} + > + + Define start and end time + + +
+ + )}
@@ -276,6 +291,8 @@ const Sort: React.FC = ({onSortApply, defaultSortValue}) => { } >
) }, } @@ -300,7 +304,11 @@ function createUserColumn(def: UserColumnDef) render: (value: string | null | undefined, record: T) => { if (record.__isSkeleton) return null const userId = getUserId ? getUserId(record) : value - return + return ( +
+ +
+ ) }, onHeaderCell: () => ({ style: {minWidth: width}, diff --git a/web/oss/src/components/InfiniteVirtualTable/components/ColumnVisibilityHeader.tsx b/web/oss/src/components/InfiniteVirtualTable/components/ColumnVisibilityHeader.tsx index 5f893a0ec4..6bb9d61c6a 100644 --- a/web/oss/src/components/InfiniteVirtualTable/components/ColumnVisibilityHeader.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/components/ColumnVisibilityHeader.tsx @@ -35,7 +35,10 @@ const ColumnVisibilityHeader = forwardRef + {children} ) diff --git a/web/oss/src/components/InfiniteVirtualTable/components/InfiniteVirtualTableInner.tsx b/web/oss/src/components/InfiniteVirtualTable/components/InfiniteVirtualTableInner.tsx index fb92c2d142..7ca296bd19 100644 --- a/web/oss/src/components/InfiniteVirtualTable/components/InfiniteVirtualTableInner.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/components/InfiniteVirtualTableInner.tsx @@ -15,6 +15,7 @@ import clsx from "clsx" import {useSetAtom} from "jotai" import { + deleteColumnViewportVisibilityAtom, setColumnUserVisibilityAtom, setColumnViewportVisibilityAtom, } from "../atoms/columnVisibility" @@ -148,6 +149,7 @@ const InfiniteVirtualTableInnerBase = ({ [resizableProcessedColumns], ) const internalViewportVisibilityHandler = useSetAtom(setColumnViewportVisibilityAtom) + const internalViewportVisibilityDeleteHandler = useSetAtom(deleteColumnViewportVisibilityAtom) const internalUserVisibilityHandler = useSetAtom(setColumnUserVisibilityAtom) const viewportVisibilityHandler = handleViewportVisibilityChange ?? internalViewportVisibilityHandler @@ -216,6 +218,7 @@ const InfiniteVirtualTableInnerBase = ({ scopeId: resolvedScopeId, containerRef: visibilityRootRef, onVisibilityChange: viewportVisibilityHandler, + onColumnUnregister: internalViewportVisibilityDeleteHandler, enabled: visibilityTrackingEnabled, suspendUpdates: isResizing, viewportMargin: columnVisibility?.viewportMargin, @@ -341,18 +344,30 @@ const InfiniteVirtualTableInnerBase = ({ setTableHeaderHeight(null) return } + let frameId: number | null = null const updateHeight = () => { - const nextHeight = headerEl.getBoundingClientRect().height - setTableHeaderHeight((prev) => { - if (prev === nextHeight) return prev - return Number.isFinite(nextHeight) ? nextHeight : prev + if (frameId !== null) { + cancelAnimationFrame(frameId) + } + frameId = requestAnimationFrame(() => { + frameId = null + const nextHeight = headerEl.getBoundingClientRect().height + setTableHeaderHeight((prev) => { + if (prev === nextHeight) return prev + return Number.isFinite(nextHeight) ? nextHeight : prev + }) }) } const observer = new ResizeObserver(() => updateHeight()) observer.observe(headerEl) updateHeight() - return () => observer.disconnect() - }, [columns, dataSource, resolvedTableProps.components]) + return () => { + if (frameId !== null) { + cancelAnimationFrame(frameId) + } + observer.disconnect() + } + }, []) const scrollConfig = useMemo(() => { if (typeof bodyHeight === "number" && Number.isFinite(bodyHeight)) { @@ -383,12 +398,13 @@ const InfiniteVirtualTableInnerBase = ({ if (typeof rawX === "number" || typeof rawX === "string") { return rawX } - // Use the larger of computedScrollX or scrollX (container width) - // This ensures columns fill available space when total < container - // and enables horizontal scroll when total > container const computed = Number.isFinite(computedScrollX) && computedScrollX > 0 ? computedScrollX : 0 const container = scrollX > 0 ? scrollX : 0 + + // Always use the larger of computed or container width + // The sum constraint is enforced in computeSmartWidths, + // so computed should always >= container const maxWidth = Math.max(computed, container) return maxWidth > 0 ? maxWidth : undefined })() @@ -428,11 +444,18 @@ const InfiniteVirtualTableInnerBase = ({ tableHeaderHeight, ]) - const {scrollContainer, visibilityRoot} = useScrollContainer(containerRef, { - scrollX: scrollConfig.x, - scrollY: scrollConfig.y, - className: resolvedTableProps.className, - }) + // Memoize dependencies object to prevent unnecessary useEffect runs in useScrollContainer + // Without memoization, a new object is created every render, causing infinite loops during scroll + const scrollContainerDeps = useMemo( + () => ({ + scrollX: scrollConfig.x, + scrollY: scrollConfig.y, + className: resolvedTableProps.className, + }), + [scrollConfig.x, scrollConfig.y, resolvedTableProps.className], + ) + + const {scrollContainer, visibilityRoot} = useScrollContainer(containerRef, scrollContainerDeps) // Sync visibilityRootRef with visibilityRoot from hook useEffect(() => { @@ -572,7 +595,7 @@ const InfiniteVirtualTableInnerBase = ({ x: scrollConfig.x, y: scrollConfig.y, }} - virtual={true} + virtual />
diff --git a/web/oss/src/components/InfiniteVirtualTable/components/TableShell.tsx b/web/oss/src/components/InfiniteVirtualTable/components/TableShell.tsx index cd945ba782..fd42a55df6 100644 --- a/web/oss/src/components/InfiniteVirtualTable/components/TableShell.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/components/TableShell.tsx @@ -34,16 +34,26 @@ const TableShell = ({ children, }: TableShellProps) => { const headerRef = useRef(null) + const lastHeightRef = useRef(0) useLayoutEffect(() => { if (!onHeaderHeightChange) return const element = headerRef.current if (!element) { - onHeaderHeightChange(0) + if (lastHeightRef.current !== 0) { + lastHeightRef.current = 0 + onHeaderHeightChange(0) + } return } const update = () => { - onHeaderHeightChange(element.getBoundingClientRect().height) + const nextHeight = element.getBoundingClientRect().height + // Only call callback if height actually changed + // This prevents infinite loops during horizontal scroll + if (lastHeightRef.current !== nextHeight) { + lastHeightRef.current = nextHeight + onHeaderHeightChange(nextHeight) + } } update() const observer = new ResizeObserver(() => update()) diff --git a/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/ColumnVisibilityMenuTrigger.tsx b/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/ColumnVisibilityMenuTrigger.tsx index 793495f0ba..2920d686e5 100644 --- a/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/ColumnVisibilityMenuTrigger.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/ColumnVisibilityMenuTrigger.tsx @@ -9,10 +9,8 @@ import ColumnVisibilityPopoverContent, { type ColumnVisibilityPopoverContentProps, } from "./ColumnVisibilityPopoverContent" -interface ColumnVisibilityMenuTriggerProps extends Omit< - ColumnVisibilityPopoverContentProps, - "onClose" -> { +interface ColumnVisibilityMenuTriggerProps + extends Omit, "onClose"> { variant?: "icon" | "button" label?: string controls?: ColumnVisibilityState diff --git a/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/TableSettingsDropdown.tsx b/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/TableSettingsDropdown.tsx index ae66ac805e..f8fb6e81f3 100644 --- a/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/TableSettingsDropdown.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/components/columnVisibility/TableSettingsDropdown.tsx @@ -117,10 +117,14 @@ const TableSettingsDropdown = ({ return ( { + if (!open) { + setColumnVisibilityOpen(false) + } + }} content={renderColumnVisibilityContent(controls, handleCloseColumnVisibility)} destroyOnHidden > diff --git a/web/oss/src/components/InfiniteVirtualTable/features/InfiniteVirtualTableFeatureShell.tsx b/web/oss/src/components/InfiniteVirtualTable/features/InfiniteVirtualTableFeatureShell.tsx index 1170767fec..623720b0bc 100644 --- a/web/oss/src/components/InfiniteVirtualTable/features/InfiniteVirtualTableFeatureShell.tsx +++ b/web/oss/src/components/InfiniteVirtualTable/features/InfiniteVirtualTableFeatureShell.tsx @@ -1,7 +1,7 @@ import type {CSSProperties, Key, ReactNode} from "react" import {useCallback, useEffect, useMemo, useState} from "react" -import {Trash} from "@phosphor-icons/react" +import {TrashIcon} from "@phosphor-icons/react" import {Button, Grid, Tabs, Tooltip} from "antd" import type {MenuProps} from "antd" import clsx from "clsx" @@ -274,7 +274,9 @@ function InfiniteVirtualTableFeatureShellBase( }, [onRowsChange, pagination.rows]) const handleLoadMore = useCallback(() => { - if (!enableInfiniteScroll) return + if (!enableInfiniteScroll) { + return + } pagination.loadNextPage() }, [enableInfiniteScroll, pagination.loadNextPage]) @@ -302,6 +304,7 @@ function InfiniteVirtualTableFeatureShellBase( beforeExport, resolveValue, resolveColumnLabel, + columnsOverride: exportColumnsOverride, } = exportOptions ?? {} const resolvedExportFilename = exportOptionsFilename ?? exportFilename ?? "table-export.csv" const exportHandler = useCallback(async () => { @@ -319,7 +322,7 @@ function InfiniteVirtualTableFeatureShellBase( }) : pagination.rows await tableExport({ - columns, + columns: exportColumnsOverride ?? columns, rows: rowsToExport, filename: resolvedExportFilename, isColumnExportable, @@ -369,7 +372,7 @@ function InfiniteVirtualTableFeatureShellBase( ) @@ -434,18 +437,21 @@ function InfiniteVirtualTableFeatureShellBase( ) }, [builtInDeleteButton, builtInExportButton, secondaryActions, exportButtonNode]) + // Only show export in settings when enableExport is true AND no custom renderExportButton is provided + const showExportInSettings = enableExport && !renderExportButton + const columnVisibilityRenderer = useMemo( () => resolveColumnVisibilityRenderer(columnVisibilityMenuRenderer, columnVisibility, { scopeId, - onExport: enableExport ? exportHandler : undefined, + onExport: showExportInSettings ? exportHandler : undefined, isExporting, }), [ columnVisibilityMenuRenderer, columnVisibility, scopeId, - enableExport, + showExportInSettings, exportHandler, isExporting, ], @@ -461,7 +467,7 @@ function InfiniteVirtualTableFeatureShellBase( (controls: ColumnVisibilityState) => ( ( renderColumnVisibilityContent={(ctrls, close) => columnVisibilityRenderer(ctrls, close, { scopeId, - onExport: enableExport ? exportHandler : undefined, + onExport: showExportInSettings ? exportHandler : undefined, isExporting, }) } @@ -478,7 +484,7 @@ function InfiniteVirtualTableFeatureShellBase( ), [ columnVisibilityRenderer, - enableExport, + showExportInSettings, exportHandler, isExporting, scopeId, @@ -538,6 +544,8 @@ function InfiniteVirtualTableFeatureShellBase( ) }, [tabs, headerExtra]) + const effectiveDataSource = dataSource ?? pagination.rows + return (
( useIsolatedStore={!store} store={store} columns={columns} - dataSource={dataSource ?? pagination.rows} + dataSource={effectiveDataSource} loadMore={handleLoadMore} rowKey={rowKey} rowSelection={rowSelection} diff --git a/web/oss/src/components/InfiniteVirtualTable/helpers/createSimpleTableStore.ts b/web/oss/src/components/InfiniteVirtualTable/helpers/createSimpleTableStore.ts index fcc99f4c84..3aa5893222 100644 --- a/web/oss/src/components/InfiniteVirtualTable/helpers/createSimpleTableStore.ts +++ b/web/oss/src/components/InfiniteVirtualTable/helpers/createSimpleTableStore.ts @@ -3,7 +3,7 @@ import type {Atom} from "jotai" import {createInfiniteDatasetStore} from "../createInfiniteDatasetStore" import type {InfiniteDatasetStore} from "../createInfiniteDatasetStore" -import type {InfiniteTableFetchResult, InfiniteTableRowBase} from "../types" +import type {InfiniteTableFetchResult, InfiniteTableRowBase, WindowingState} from "../types" import {createTableRowHelpers} from "./createTableRowHelpers" import type {TableRowHelpersConfig} from "./createTableRowHelpers" @@ -47,13 +47,14 @@ export interface SimpleTableStoreConfig< rowHelpers: TableRowHelpersConfig /** * Fetch function that retrieves data from the API. - * Should handle pagination via limit/offset/cursor. + * Should handle pagination via limit/offset/cursor/windowing. */ fetchData: (params: { meta: TMeta limit: number offset: number cursor: string | null + windowing: WindowingState | null }) => Promise> /** * Optional custom isEnabled check. @@ -138,7 +139,7 @@ export function createSimpleTableStore< isEnabled: isEnabled ?? ((meta) => Boolean(meta?.projectId)), clientRowsAtom, excludeRowIdsAtom, - fetchPage: async ({limit, offset, cursor, meta}) => { + fetchPage: async ({limit, offset, cursor, windowing, meta}) => { if (!meta?.projectId) { return { rows: [], @@ -150,7 +151,7 @@ export function createSimpleTableStore< } } - return fetchData({meta, limit, offset, cursor}) + return fetchData({meta, limit, offset, cursor, windowing}) }, }) diff --git a/web/oss/src/components/InfiniteVirtualTable/hooks/useHeaderViewportVisibility.ts b/web/oss/src/components/InfiniteVirtualTable/hooks/useHeaderViewportVisibility.ts index 5dfb792c8a..6cadf8f4d1 100644 --- a/web/oss/src/components/InfiniteVirtualTable/hooks/useHeaderViewportVisibility.ts +++ b/web/oss/src/components/InfiniteVirtualTable/hooks/useHeaderViewportVisibility.ts @@ -13,6 +13,7 @@ const useHeaderViewportVisibility = ({ scopeId, containerRef, onVisibilityChange, + onColumnUnregister, enabled = true, viewportMargin, exitDebounceMs = 150, @@ -23,6 +24,9 @@ const useHeaderViewportVisibility = ({ scopeId: string | null containerRef: RefObject onVisibilityChange: ViewportVisibilityCallback | undefined + onColumnUnregister?: + | ((payload: {scopeId: string | null; columnKey: string}) => void) + | undefined enabled?: boolean viewportMargin?: string exitDebounceMs?: number @@ -174,6 +178,10 @@ const useHeaderViewportVisibility = ({ // Skip processing if updates are suspended (e.g., during resize or vertical scroll) if (suspendUpdatesRef.current) return if (!onVisibilityChange || !scopeId) return + + // Batch process entries to reduce state updates during rapid scrolling + const updates: {columnKey: string; isVisible: boolean}[] = [] + entries.forEach((entry) => { const columnKey = elementToKeyRef.current.get(entry.target as HTMLElement) if (!columnKey) return @@ -210,6 +218,12 @@ const useHeaderViewportVisibility = ({ intersectionWidth > 0 && intersectionHeight > 0 && boundingRect.width > 0 + + updates.push({columnKey, isVisible}) + }) + + // Process all updates together to minimize re-renders + updates.forEach(({columnKey, isVisible}) => { queueVisibilityUpdate(columnKey, isVisible) }) }, @@ -365,7 +379,7 @@ const useHeaderViewportVisibility = ({ } const wasFixed = fixedKeysRef.current.delete(columnKey) if (wasFixed) { - // emitVisibilityChanges([{columnKey, visible: false}]) + // Fixed columns don't need cleanup return } const previousNode = keyToElementRef.current.get(columnKey) @@ -374,20 +388,27 @@ const useHeaderViewportVisibility = ({ elementToKeyRef.current.delete(previousNode) } keyToElementRef.current.delete(columnKey) - const scheduleHide = () => { + + // Clear visibility state to prevent stale values on re-mount + const scheduleCleanup = () => { visibilityStateRef.current.delete(columnKey) - emitVisibilityChanges([{columnKey, visible: false}]) + // Delete from atom instead of setting to false to prevent stale state + // When column is re-registered, it will default to visible (true) + if (onColumnUnregister && scopeId) { + onColumnUnregister({scopeId, columnKey}) + } } + if (typeof window !== "undefined") { if (!pendingUnregisterTimeoutsRef.current.has(columnKey)) { const timeoutId = window.setTimeout(() => { pendingUnregisterTimeoutsRef.current.delete(columnKey) - scheduleHide() + scheduleCleanup() }, exitDebounceMs ?? 150) pendingUnregisterTimeoutsRef.current.set(columnKey, timeoutId) } } else { - scheduleHide() + scheduleCleanup() } } }, @@ -399,6 +420,7 @@ const useHeaderViewportVisibility = ({ exitDebounceMs, isFixedHeaderNode, onVisibilityChange, + onColumnUnregister, scopeId, ], ) diff --git a/web/oss/src/components/InfiniteVirtualTable/hooks/useInfiniteTablePagination.ts b/web/oss/src/components/InfiniteVirtualTable/hooks/useInfiniteTablePagination.ts index 699beffbe1..27c83ea448 100644 --- a/web/oss/src/components/InfiniteVirtualTable/hooks/useInfiniteTablePagination.ts +++ b/web/oss/src/components/InfiniteVirtualTable/hooks/useInfiniteTablePagination.ts @@ -80,9 +80,13 @@ const useInfiniteTablePagination = ({ const loadedRowCount = useMemo(() => rows.filter((row) => !row.__isSkeleton).length, [rows]) const loadNextPage = useCallback(() => { - if (!paginationInfo.hasMore) return + if (!paginationInfo.hasMore) { + return + } const nextCursor = paginationInfo.nextCursor - if (!nextCursor || paginationInfo.isFetching) return + if (!nextCursor || paginationInfo.isFetching) { + return + } const nextOffset = paginationInfo.nextOffset ?? totalRows const nextWindowing = diff --git a/web/oss/src/components/InfiniteVirtualTable/hooks/useSmartResizableColumns.ts b/web/oss/src/components/InfiniteVirtualTable/hooks/useSmartResizableColumns.ts index be911215a5..dc9a2be819 100644 --- a/web/oss/src/components/InfiniteVirtualTable/hooks/useSmartResizableColumns.ts +++ b/web/oss/src/components/InfiniteVirtualTable/hooks/useSmartResizableColumns.ts @@ -7,7 +7,7 @@ import {ResizableTitle} from "@/oss/components/EnhancedUIs/Table/assets/CustomCe import {getColumnWidthsAtom} from "../atoms/columnWidths" -const DEFAULT_MIN_WIDTH = 48 +const DEFAULT_MIN_WIDTH = 150 const DEFAULT_COLUMN_WIDTH = 200 type ColumnEntry = ColumnsType[number] @@ -57,6 +57,8 @@ export interface UseSmartResizableColumnsResult { } | null getTotalWidth: (cols?: ColumnsType) => number isResizing: boolean + /** Whether any column has been manually resized by the user */ + hasUserResizedAny: boolean } /** @@ -116,26 +118,23 @@ export const useSmartResizableColumns = ({ ) // Compute smart widths based on available space + // KEY CONSTRAINT: Total width must always >= containerWidth const computeSmartWidths = useCallback( (columnsMeta: ColumnMeta[]): Record => { const result: Record = {} - // Check if ANY column has been user-resized - const hasAnyUserResize = columnsMeta.some((c) => userResizedWidths[c.key] !== undefined) - // 1. Separate columns by type const fixedPositionCols = columnsMeta.filter((c) => c.isFixed) const constrainedCols = columnsMeta.filter((c) => !c.isFixed && c.hasMaxWidth) const flexibleCols = columnsMeta.filter((c) => !c.isFixed && !c.hasMaxWidth) - // 2. Calculate fixed widths (these never change) + // 2. Calculate fixed widths (these NEVER change) let fixedWidth = selectionColumnWidth - // Fixed position columns use their width (or user-resized width) + // Fixed position columns use their ORIGINAL width (never user-resized) for (const col of fixedPositionCols) { - const width = userResizedWidths[col.key] ?? col.width - result[col.key] = width - fixedWidth += width + result[col.key] = col.width + fixedWidth += col.width } // Constrained columns use their maxWidth @@ -150,47 +149,64 @@ export const useSmartResizableColumns = ({ return result } - // Available space for flexible columns - const availableForFlexible = containerWidth - fixedWidth - - // KEY BEHAVIOR CHANGE: - // Once ANY column has been user-resized, ALL flexible columns should use - // either their user-resized width or their default width (not redistribute). - // This prevents other columns from shrinking when one is expanded. - if (hasAnyUserResize) { - // Use user-resized width if available, otherwise use default width - for (const col of flexibleCols) { - const width = userResizedWidths[col.key] ?? col.width - result[col.key] = Math.max(width, col.minWidth) - } - return result + // Available space for flexible columns (must be filled!) + const availableForFlexible = Math.max(0, containerWidth - fixedWidth) + + // Separate user-resized and non-resized flexible columns + const userResizedFlexCols = flexibleCols.filter( + (c) => userResizedWidths[c.key] !== undefined, + ) + const nonResizedFlexCols = flexibleCols.filter( + (c) => userResizedWidths[c.key] === undefined, + ) + + // Calculate space taken by user-resized columns + let userResizedTotal = 0 + for (const col of userResizedFlexCols) { + const width = Math.max(userResizedWidths[col.key]!, col.minWidth) + result[col.key] = width + userResizedTotal += width } - // No user resizes yet - distribute space proportionally to fill container - // This is the initial state before any manual resizing - // - // KEY: Use default widths as the floor, not minWidth. - // This ensures columns don't shrink below their intended default size. - // If total default widths exceed container, allow horizontal scrolling. - const totalDefaultWidth = flexibleCols.reduce((sum, col) => sum + col.width, 0) - - // If there's not enough space for all columns at their default widths, - // use default widths and allow horizontal scrolling - if (availableForFlexible <= totalDefaultWidth) { - for (const col of flexibleCols) { - result[col.key] = col.width + // Remaining space for non-resized columns + const remainingForNonResized = availableForFlexible - userResizedTotal + + if (nonResizedFlexCols.length === 0) { + // All flexible columns have been user-resized + // If total < available, we need to expand the last resized column + // to maintain the sum constraint + if (userResizedTotal < availableForFlexible && userResizedFlexCols.length > 0) { + const lastCol = userResizedFlexCols[userResizedFlexCols.length - 1] + const deficit = availableForFlexible - userResizedTotal + result[lastCol.key] = (result[lastCol.key] ?? 0) + deficit } return result } - // There's extra space - distribute it proportionally - const totalWeight = flexibleCols.reduce((sum, col) => sum + col.width, 0) + // Distribute remaining space among non-resized columns + // Use default width as floor to ensure readability, allow horizontal scroll if needed + const totalDefaultWeight = nonResizedFlexCols.reduce((sum, col) => sum + col.width, 0) - for (const col of flexibleCols) { - const proportion = col.width / totalWeight - const computedWidth = availableForFlexible * proportion - // Use default width as floor, not minWidth - result[col.key] = Math.max(computedWidth, col.width) + if (remainingForNonResized <= 0) { + // User-resized columns take all space, use default width for others + // This may cause total > container, enabling horizontal scroll + for (const col of nonResizedFlexCols) { + result[col.key] = col.width + } + } else if (remainingForNonResized < totalDefaultWeight) { + // Not enough space for all at default width - use default widths + // and allow horizontal scrolling rather than squeezing columns + for (const col of nonResizedFlexCols) { + result[col.key] = col.width + } + } else { + // Enough space - distribute proportionally + for (const col of nonResizedFlexCols) { + const proportion = col.width / totalDefaultWeight + const computedWidth = remainingForNonResized * proportion + // Use default width as floor, not minWidth + result[col.key] = Math.max(computedWidth, col.width) + } } return result @@ -355,11 +371,18 @@ export const useSmartResizableColumns = ({ [minWidth, resizableColumns], ) + // Check if any column has been user-resized + const hasUserResizedAny = useMemo( + () => Object.keys(userResizedWidths).length > 0, + [userResizedWidths], + ) + return { columns: resizableColumns, headerComponents: enabled ? {cell: ResizableTitle} : null, getTotalWidth, isResizing, + hasUserResizedAny, } } diff --git a/web/oss/src/components/InfiniteVirtualTable/hooks/useTableExport.ts b/web/oss/src/components/InfiniteVirtualTable/hooks/useTableExport.ts index cc14508d30..f081598fa3 100644 --- a/web/oss/src/components/InfiniteVirtualTable/hooks/useTableExport.ts +++ b/web/oss/src/components/InfiniteVirtualTable/hooks/useTableExport.ts @@ -162,9 +162,8 @@ export interface TableExportColumnContext { columnIndex: number } -export interface TableExportValueArgs< - Row extends InfiniteTableRowBase, -> extends TableExportColumnContext { +export interface TableExportValueArgs + extends TableExportColumnContext { row: Row } @@ -177,18 +176,17 @@ export interface TableExportOptions { beforeExport?: (rows: Row[]) => void | Row[] | Promise resolveValue?: (args: TableExportResolveArgs) => unknown | Promise resolveColumnLabel?: (context: TableExportColumnContext) => string | undefined + columnsOverride?: ColumnsType } -export interface TableExportParams< - Row extends InfiniteTableRowBase, -> extends TableExportOptions { +export interface TableExportParams + extends TableExportOptions { columns: ColumnsType rows: Row[] } -export interface TableExportResolveArgs< - Row extends InfiniteTableRowBase, -> extends TableExportValueArgs { +export interface TableExportResolveArgs + extends TableExportValueArgs { rowIndex: number columnKey: string columnIdentifier: string diff --git a/web/oss/src/components/InfiniteVirtualTable/types.ts b/web/oss/src/components/InfiniteVirtualTable/types.ts index 91c2cbf457..2676bcebcf 100644 --- a/web/oss/src/components/InfiniteVirtualTable/types.ts +++ b/web/oss/src/components/InfiniteVirtualTable/types.ts @@ -8,6 +8,8 @@ import type {VisibilityRegistrationHandler} from "./components/ColumnVisibilityH export interface WindowingState { next: string | null + oldest?: string | null + newest?: string | null stop?: string | null order?: string | null limit?: number | null diff --git a/web/oss/src/components/PageLayout/PageLayout.tsx b/web/oss/src/components/PageLayout/PageLayout.tsx index dfca3c9610..b74a1b1370 100644 --- a/web/oss/src/components/PageLayout/PageLayout.tsx +++ b/web/oss/src/components/PageLayout/PageLayout.tsx @@ -5,6 +5,7 @@ import classNames from "classnames" interface PageLayoutProps { title?: ReactNode + titleLevel?: 1 | 2 | 3 | 4 | 5 headerTabs?: ReactNode headerTabsProps?: TabsProps children: ReactNode @@ -14,12 +15,14 @@ interface PageLayoutProps { const PageLayout = ({ title, + titleLevel = 5, headerTabs, headerTabsProps, children, className, headerClassName, }: PageLayoutProps) => { + const titleText = typeof title === "string" || typeof title === "number" ? String(title) : "" const headerTabsContent = headerTabsProps ? ( ) : ( @@ -35,9 +38,15 @@ const PageLayout = ({ headerClassName, )} > - - {title} - +
+ + {title} + +
{headerTabsContent ? (
{headerTabsContent} diff --git a/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/index.tsx b/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/index.tsx index 6684a30091..2df44cb4b6 100644 --- a/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/index.tsx +++ b/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/index.tsx @@ -3,7 +3,6 @@ import {cloneElement, isValidElement, useMemo, useState} from "react" import {Database} from "@phosphor-icons/react" import dynamic from "next/dynamic" -import {TestsetTraceData} from "@/oss/components/SharedDrawers/AddToTestsetDrawer/assets/types" import {getResponseLazy} from "@/oss/lib/hooks/useStatelessVariants/state" import EnhancedButton from "../../../../EnhancedUIs/Button" @@ -26,65 +25,53 @@ const TestsetDrawerButton = ({ }: TestsetDrawerButtonProps) => { const [isTestsetDrawerOpen, setIsTestsetDrawerOpen] = useState(false) - let traces: (Record | null | undefined)[] = [] - const testsetTraceData = useMemo(() => { + // Extract span IDs from results - entity atoms will fetch the actual data + const spanIds = useMemo(() => { if (!isTestsetDrawerOpen) return [] + let traces: (Record | null | undefined)[] = [] if (results) { traces = Array.isArray(results) ? results : [results] } else if (resultHashes) { const traceHashes = Array.isArray(resultHashes) ? resultHashes : [resultHashes] traces = traceHashes - .map((hash) => { - return hash ? getResponseLazy(hash) : undefined - }) + .map((hash) => (hash ? getResponseLazy(hash) : undefined)) .filter((tr) => !!tr) } if (traces.length === 0) return [] - const extractedData = traces - ?.map((result, idx) => { - return { - data: - (result?.response?.tree?.nodes?.[0]?.data as Record) || - result?.response?.data, - key: - (result?.response?.tree?.nodes?.[0]?.node?.id as string) || - result?.response?.span_id, - id: idx + 1, - } - }) - .filter((result) => result.data) - return extractedData + // Extract only span IDs - let entity atoms fetch the data + return traces + .map((result) => { + // Use span_id (hex format) not node.id (UUID format) + const spanId = + (result?.response?.tree?.nodes?.[0]?.span_id as string) || + result?.response?.span_id + // Validate that the span has data (successful generation) + const hasData = result?.response?.tree?.nodes?.[0]?.data || result?.response?.data + return hasData ? spanId : null + }) + .filter((id): id is string => !!id) }, [resultHashes, results, isTestsetDrawerOpen]) - // Count of valid result hashes (may include failed ones; see validResultsCount for success only) - // const isResults = useMemo(() => resultHashes?.filter(Boolean)?.length, [resultHashes]) // Count only successful results (those that have response data) + // We count eagerly (before drawer opens) for button disabled state const validResultsCount = useMemo(() => { - // Direct results prop (rare path) + let traces: (Record | null | undefined)[] = [] + if (results) { - const arr = Array.isArray(results) ? results : [results] - return arr.filter((r: any) => { - const data = - (r?.response?.tree?.nodes?.[0]?.data as Record) || - r?.response?.data - return Boolean(data) - }).length + traces = Array.isArray(results) ? results : [results] + } else if (resultHashes) { + const hashes = Array.isArray(resultHashes) ? resultHashes : [resultHashes] + traces = hashes.map((h) => (h ? getResponseLazy(h) : null)).filter(Boolean) } - // Hash-based results (common path) - const hashes = Array.isArray(resultHashes) ? resultHashes : [resultHashes] - return hashes - .map((h) => (h ? getResponseLazy(h) : null)) - .filter(Boolean) - .filter((r: any) => { - const data = - (r?.response?.tree?.nodes?.[0]?.data as Record) || - r?.response?.data - return Boolean(data) - }).length + return traces.filter((r: any) => { + const data = + (r?.response?.tree?.nodes?.[0]?.data as Record) || r?.response?.data + return Boolean(data) + }).length }, [results, resultHashes]) return ( @@ -121,7 +108,7 @@ const TestsetDrawerButton = ({ { setIsTestsetDrawerOpen(false) diff --git a/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/types.d.ts b/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/types.d.ts index fde1fc4c38..6d49ada527 100644 --- a/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/types.d.ts +++ b/web/oss/src/components/Playground/Components/Drawers/TestsetDrawer/types.d.ts @@ -2,9 +2,9 @@ import type {ReactNode} from "react" import {TestResult} from "@/oss/components/Playground/assets/utilities/transformer/types/testRun" -import {TooltipButtonProps} from "../../../../EnhancedUIs/Button" +import {EnhancedButtonProps} from "../../../../EnhancedUIs/Button/types" -export interface TestsetDrawerButtonProps extends TooltipButtonProps { +export interface TestsetDrawerButtonProps extends EnhancedButtonProps { icon?: boolean children?: ReactNode resultHashes?: (TestResult | string | null | undefined)[] diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetButton/index.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetButton/index.tsx index 3a3f56742e..f0f72f29e6 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetButton/index.tsx +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetButton/index.tsx @@ -1,4 +1,4 @@ -import {cloneElement, isValidElement, SetStateAction, useCallback, useState} from "react" +import {cloneElement, isValidElement, useCallback, useState} from "react" import {Database} from "@phosphor-icons/react" import {Button} from "antd" @@ -8,6 +8,8 @@ import dynamic from "next/dynamic" import {appChatModeAtom} from "@/oss/components/Playground/state/atoms" import {loadTestsetNormalizedMutationAtom} from "@/oss/components/Playground/state/atoms/mutations/testset/loadNormalized" +import {LoadTestsetSelectionPayload} from "../types" + import {LoadTestsetButtonProps} from "./types" const LoadTestsetModal = dynamic(() => import("../.."), {ssr: false}) @@ -23,29 +25,20 @@ const LoadTestsetButton = ({ const isChat = useAtomValue(appChatModeAtom) ?? false const [isTestsetModalOpen, setIsTestsetModalOpen] = useState(false) - const [, setTestsetData] = useState | null>(null) + const [, setTestsetData] = useState(null) const wrappedSetTestsetData = useCallback( - (d: SetStateAction | null>) => { - // Only call the mutation if we have valid testset data - if (d && Array.isArray(d) && d.length > 0) { - // Use the new mutation atom to load testset data - loadTestsetData({ - testsetData: d, - isChatVariant: isChat, - regenerateVariableIds: true, - }) - } else if (d && !Array.isArray(d)) { - // Handle single testset item + (payload: LoadTestsetSelectionPayload | null) => { + const testcases = payload?.testcases ?? [] + if (Array.isArray(testcases) && testcases.length > 0) { loadTestsetData({ - testsetData: [d], + testsetData: testcases, isChatVariant: isChat, regenerateVariableIds: true, }) } - // Update local state for the modal - setTestsetData(d) + setTestsetData(payload) }, [loadTestsetData, isChat], ) diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalContent/index.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalContent/index.tsx index de66617a38..08bc34b106 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalContent/index.tsx +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalContent/index.tsx @@ -1,35 +1,19 @@ -import {memo, useCallback, useEffect, useMemo, useState} from "react" +import {memo, useCallback} from "react" -import {RightOutlined} from "@ant-design/icons" -import {Divider, Input, Menu, Popover, Spin, Typography} from "antd" -import {atom, useAtom, useSetAtom} from "jotai" +import {Divider, Spin} from "antd" +import clsx from "clsx" import {useAtomValue} from "jotai" import dynamic from "next/dynamic" import {useRouter} from "next/router" -import {useRowHeight} from "@/oss/components/InfiniteVirtualTable" -import {buildRevisionMenuItems} from "@/oss/components/TestcasesTableNew/components/RevisionMenuItems" -import {TestcasesTableShell} from "@/oss/components/TestcasesTableNew/components/TestcasesTableShell" -import {useTestcasesTable} from "@/oss/components/TestcasesTableNew/hooks/useTestcasesTable" -import { - testcaseRowHeightAtom, - TESTCASE_ROW_HEIGHT_CONFIG, -} from "@/oss/components/TestcasesTableNew/state/rowHeight" -import {Testset} from "@/oss/lib/Types" -import {useEntityList} from "@/oss/state/entities/hooks/useEntityList" -import { - latestRevisionForTestsetAtomFamily, - revisionsListQueryAtomFamily, - testsetStore, -} from "@/oss/state/entities/testset" +import {testset} from "@/oss/state/entities/testset" import {projectIdAtom} from "@/oss/state/project/selectors/project" -import { - selectTestsetAtom, - selectedRevisionIdAtom, - selectedTestsetIdAtom, -} from "@/oss/state/testsetSelection" import {urlAtom} from "@/oss/state/url" +import {isCreatingNewTestsetAtom} from "../../atoms/modalState" +import {CreateTestsetCard} from "../../components/CreateTestsetCard" +import {TestsetListSidebar} from "../../components/TestsetListSidebar" +import {TestsetPreviewPanel} from "../../components/TestsetPreviewPanel" import {LoadTestsetModalContentProps} from "../types" const NoResultsFound = dynamic( @@ -39,246 +23,21 @@ const NoResultsFound = dynamic( }, ) -const TestcasesTablePreview = ({ - revisionId, - selectedRowKeys, - setSelectedRowKeys, -}: { - revisionId: string - selectedRowKeys: React.Key[] - setSelectedRowKeys: React.Dispatch> -}) => { - const table = useTestcasesTable({revisionId, mode: "view"}) - const rowHeight = useRowHeight(testcaseRowHeightAtom, TESTCASE_ROW_HEIGHT_CONFIG) - - const handleRowClick = useCallback( - (record: any) => { - const key = record?.key - if (key === undefined || key === null) return - setSelectedRowKeys((prev) => { - const exists = prev.includes(key) - if (exists) { - return prev.filter((k) => k !== key) - } - return [...prev, key] - }) - }, - [setSelectedRowKeys], - ) - - return ( -
- {}} - searchTerm={table.searchTerm} - onSearchChange={table.setSearchTerm} - header={null} - actions={null} - hideControls={false} - enableSelection - autoHeight - disableDeleteAction - /> -
- ) -} - -const LoadTestsetModalContent = ({ - modalProps, - testsetCsvData, - selectedRowKeys, - setSelectedRowKeys, - isLoadingTestset, -}: Omit< - LoadTestsetModalContentProps, - | "selectedTestset" - | "setSelectedTestset" - | "selectedRevisionId" - | "setSelectedRevisionId" - | "isChat" ->) => { - // Use shared atoms for testset/revision selection - const [selectedTestset, setSelectedTestset] = useAtom(selectedTestsetIdAtom) - const [selectedRevisionId, setSelectedRevisionId] = useAtom(selectedRevisionIdAtom) - const selectTestset = useSetAtom(selectTestsetAtom) +const LoadTestsetModalContent = ({modalProps}: LoadTestsetModalContentProps) => { const projectId = useAtomValue(projectIdAtom) - const listParams = useMemo(() => ({projectId: projectId ?? ""}), [projectId]) - const {data: testsetListResponse, isLoading: isLoadingTestsets} = useEntityList( - testsetStore, - listParams, - ) - const testsets = useMemo(() => testsetListResponse?.testsets ?? [], [testsetListResponse]) + const isCreatingNew = useAtomValue(isCreatingNewTestsetAtom) const router = useRouter() - - const [searchTerm, setSearchTerm] = useState("") const urlState = useAtomValue(urlAtom) - const [revisionPanelTestsetId, setRevisionPanelTestsetId] = useState("") - const emptyQueryAtom = useMemo( - () => - atom({ - data: [] as { - id: string - version: number - created_at?: string | null - message?: string | null - }[], - isFetching: false, - isPending: false, - isLoading: false, - }), - [], - ) - const selectedRevisionsQueryAtom = useMemo( - () => (selectedTestset ? revisionsListQueryAtomFamily(selectedTestset) : emptyQueryAtom), - [selectedTestset, emptyQueryAtom], - ) - const popoverRevisionsQueryAtom = useMemo( - () => - revisionPanelTestsetId - ? revisionsListQueryAtomFamily(revisionPanelTestsetId) - : selectedRevisionsQueryAtom, - [revisionPanelTestsetId, selectedRevisionsQueryAtom], - ) - const selectedRevisionsQuery = useAtomValue(selectedRevisionsQueryAtom) - const popoverRevisionsQuery = useAtomValue(popoverRevisionsQueryAtom) - - const revisions = useMemo( - () => - (selectedRevisionsQuery.data as { - id: string - version: number - created_at?: string | null - message?: string | null - }[]) || [], - [selectedRevisionsQuery.data], - ) - - const popoverRevisions = useMemo( - () => - (popoverRevisionsQuery.data as { - id: string - version: number - created_at?: string | null - message?: string | null - }[]) || [], - [popoverRevisionsQuery.data], - ) - - const filteredRevisions = revisions - const filteredPopoverRevisions = popoverRevisions - const popoverRevisionsLoading = - popoverRevisionsQuery.isPending || - (popoverRevisionsQuery as any).isLoading || - (popoverRevisionsQuery as any).isFetching - - const latestRevisionAtom = useMemo( - () => latestRevisionForTestsetAtomFamily(selectedTestset), - [selectedTestset], - ) - const latestRevision = useAtomValue(latestRevisionAtom) + // Use testset controller API + const testsetsQuery = useAtomValue(testset.queries.list(null)) + const testsets = testsetsQuery.data?.testsets ?? [] + const isLoadingTestsets = testsetsQuery.isLoading const handleCreateTestset = useCallback(() => { router.push(`${urlState.projectURL}/testsets`) }, [router, urlState?.projectURL]) - // Auto-select first testset when modal opens (uses shared selectTestsetAtom which also selects latest revision) - useEffect(() => { - if (!modalProps.open || !testsets.length) return - - const prevExists = - selectedTestset && testsets.some((ts: Testset) => ts?.id === selectedTestset) - if (!prevExists && testsets[0]) { - selectTestset({ - testsetId: testsets[0].id, - testsetName: testsets[0].name, - autoSelectLatest: true, - }) - } - }, [modalProps.open, testsets, selectedTestset, selectTestset]) - - // Auto-select latest revision when revisions load and none is selected - useEffect(() => { - if (!selectedTestset || selectedRevisionId) return - const latestId = - filteredRevisions.find((rev) => rev.id === latestRevision?.id)?.id || - filteredRevisions[0]?.id - if (latestId) { - setSelectedRevisionId(latestId) - } - }, [ - filteredRevisions, - latestRevision?.id, - selectedRevisionId, - selectedTestset, - setSelectedRevisionId, - ]) - - const filteredTestset = useMemo(() => { - if (!searchTerm) return testsets - return testsets.filter((item: Testset) => - item.name.toLowerCase().includes(searchTerm.toLowerCase()), - ) - }, [searchTerm, testsets]) - - const testsetMenuItems = useMemo(() => { - if (!filteredTestset.length) return [] - return filteredTestset.map((ts: Testset) => ({ - key: ts.id, - label: ts.name, - hasRevisions: - ts.latest_revision_version === undefined ? true : ts.latest_revision_version > 0, - })) - }, [filteredTestset]) - - const onChangeTestset = useCallback( - ({key}: any) => { - setSelectedRowKeys([]) - const testset = testsets.find((ts: Testset) => ts.id === key) - selectTestset({ - testsetId: key, - testsetName: testset?.name || "", - autoSelectLatest: true, - }) - }, - [setSelectedRowKeys, testsets, selectTestset], - ) - - const onChangeRevision = useCallback( - ({key}: any) => { - setSelectedRowKeys([]) - setSelectedRevisionId(key) - setRevisionPanelTestsetId("") - }, - [setRevisionPanelTestsetId, setSelectedRevisionId, setSelectedRowKeys], - ) - - const popoverMenuItems = useMemo( - () => - buildRevisionMenuItems(filteredPopoverRevisions, (revisionId) => { - setSelectedTestset(revisionPanelTestsetId || selectedTestset || "") - onChangeRevision({key: revisionId}) - setRevisionPanelTestsetId("") - }) ?? [], - [filteredPopoverRevisions, onChangeRevision, revisionPanelTestsetId, selectedTestset], - ) - - const menuSelectedKeys = useMemo( - () => (selectedTestset ? [selectedTestset] : []), - [selectedTestset], - ) - const revisionSelectedKeys = useMemo( - () => (selectedRevisionId ? [selectedRevisionId] : []), - [selectedRevisionId], - ) - if (!projectId) { return (
@@ -287,154 +46,41 @@ const LoadTestsetModalContent = ({ ) } - if (!testsets.length && !testsetCsvData.length && !isLoadingTestset && !isLoadingTestsets) + if (!testsets.length && !isLoadingTestsets && !isCreatingNew) { return ( ) + } return (
-
-
- setSearchTerm(e.target.value)} - /> - - - -
- Testsets -
+
+
+ {!isCreatingNew ? ( +
+ +
+ ) : null} - { - // Destructure hasRevisions to prevent it from being passed to DOM - const {hasRevisions: _hasRevisions, ...restTs} = ts as any - const hasRevisions = _hasRevisions ?? true - return { - ...restTs, - label: ( -
- { - e.stopPropagation() - onChangeTestset({key: ts.key}) - setRevisionPanelTestsetId(ts.key as string) - }} - className="cursor-pointer flex-1 min-w-0 truncate text-left" - title={ts.label as string} - > - {ts.label} - - {hasRevisions && ( - { - if (open) { - setRevisionPanelTestsetId(ts.key as string) - } else if (revisionPanelTestsetId === ts.key) { - setRevisionPanelTestsetId("") - } - }} - content={ - popoverRevisionsLoading ? ( -
- -
- ) : popoverMenuItems.length ? ( -
- { - setSelectedTestset( - ts.key as string, - ) - onChangeRevision(info) - setRevisionPanelTestsetId("") - }} - selectedKeys={revisionSelectedKeys} - className="min-w-[220px] !border-none !p-0 !m-0 [&_.ant-menu-item]:h-auto [&_.ant-menu-item]:min-h-[32px] [&_.ant-menu-item]:leading-normal [&_.ant-menu-item]:!py-1 [&_.ant-menu-item]:!px-3 [&_.ant-menu-item]:!my-0 [&_.ant-menu-title-content]:whitespace-normal" - rootClassName="!p-0 !m-0" - /> -
- ) : ( -
- No revisions -
- ) - } - overlayClassName="load-testset-revision-popover" - styles={{body: {padding: 0}}} - > - { - e.stopPropagation() - setRevisionPanelTestsetId(ts.key as string) - }} - onKeyDown={(e) => { - if (e.key === "Enter" || e.key === " ") { - e.preventDefault() - e.stopPropagation() - setRevisionPanelTestsetId( - ts.key as string, - ) - } - }} - onMouseDown={(e) => { - e.preventDefault() - e.stopPropagation() - }} - > - - -
- )} -
- ), - } +
- {isLoadingTestsets && ( -
- -
- )} + > + +
- {selectedRevisionId ? ( - - ) : ( -
- - Select a revision to view testcases. - -
- )} +
diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalFooter/index.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalFooter/index.tsx index ca3520fe5b..5ec4552feb 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalFooter/index.tsx +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/LoadTestsetModalFooter/index.tsx @@ -1,9 +1,12 @@ -import {memo, useCallback, useMemo} from "react" +import {memo, useCallback, useMemo, useState} from "react" import {Play} from "@phosphor-icons/react" import {Button, Tooltip} from "antd" -import {useAtomValue} from "jotai" +import {useAtomValue, useSetAtom} from "jotai" +import {message} from "@/oss/components/AppMessageContext" +import {saveNewTestsetAtom} from "@/oss/state/entities/testcase" +import {projectIdAtom} from "@/oss/state/project/selectors/project" import {appUriInfoAtom} from "@/oss/state/variant/atoms/fetcher" import {useTestsetInputsAnalysis} from "../../hooks/useTestsetInputsAnalysis" @@ -15,9 +18,15 @@ const LoadTestsetModalFooter = ({ selectedRowKeys, testsetCsvData, setTestsetData, + selectedRevisionId, + isCreatingNew, + newTestsetName, }: LoadTestsetModalFooterProps) => { const appUriInfo = useAtomValue(appUriInfoAtom) const routePath = appUriInfo?.routePath + const projectId = useAtomValue(projectIdAtom) + const saveNewTestset = useSetAtom(saveNewTestsetAtom) + const [isSaving, setIsSaving] = useState(false) // High-level analysis of inputs vs testset columns, including schema + dynamic variables const {expectedInputVariables, hasCompatibilityIssue} = useTestsetInputsAnalysis({ @@ -32,36 +41,108 @@ const LoadTestsetModalFooter = ({ return `The testset has no CSV columns matching the expected variables {{${variantList}}}. Loading may fail unless the variables align.` }, [expectedInputVariables, hasCompatibilityIssue]) - const loadTestset = useCallback(() => { + const loadTestset = useCallback(async () => { + // If creating new, save first then load + if (isCreatingNew) { + if (!newTestsetName.trim()) { + message.error("Please enter a testset name") + return + } + + if (!projectId) { + message.error("Project ID not found") + return + } + + setIsSaving(true) + try { + const result = await saveNewTestset({ + projectId, + testsetName: newTestsetName.trim(), + }) + + if (!result.success) { + message.error(result.error?.message || "Failed to save testset") + setIsSaving(false) + return + } + + message.success("Testset created successfully") + + const newTestcases = (result.testcases ?? []) as Record[] + if (newTestcases.length) { + setTestsetData({ + testcases: newTestcases, + revisionId: result.revisionId, + }) + } else { + message.info("Testset is empty. Add rows before loading.") + } + + onClose() + } catch (error) { + console.error("Error creating testset:", error) + message.error("Failed to create testset") + } finally { + setIsSaving(false) + } + return + } + + // Regular load flow for existing testsets // testsetCsvData already contains only the selected testcases (filtered by useSelectedTestcasesData hook) if (!testsetCsvData.length) { console.warn("No testcases selected") return } - setTestsetData(testsetCsvData) + setTestsetData({ + testcases: testsetCsvData as Record[], + revisionId: selectedRevisionId || undefined, + }) onClose() - }, [onClose, setTestsetData, testsetCsvData]) + }, [ + isCreatingNew, + newTestsetName, + projectId, + saveNewTestset, + testsetCsvData, + setTestsetData, + selectedRevisionId, + onClose, + ]) + + const isDisabled = isCreatingNew ? !newTestsetName.trim() : !selectedRowKeys.length + + const buttonText = isCreatingNew ? "Create & Load" : "Load testset" + + const selectionCountText = + !isCreatingNew && selectedRowKeys.length > 0 + ? `${selectedRowKeys.length} testcase${selectedRowKeys.length === 1 ? "" : "s"} selected` + : "" return ( -
- - - {/* Wrap disabled button with span so tooltip triggers on hover */} - - - - +
+
{selectionCountText}
+
+ + + {/* Wrap disabled button with span so tooltip triggers on hover */} + + + + +
) } diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/types.ts b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/types.ts index 6089b0ba61..f71c8eb9c1 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/types.ts +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/types.ts @@ -2,16 +2,21 @@ import {ModalProps} from "antd" import {Testset} from "@/oss/lib/Types" +export interface LoadTestsetSelectionPayload { + testcases: Record[] + revisionId?: string +} + export interface LoadTestsetModalProps extends ModalProps { - setTestsetData: React.Dispatch[] | null>> + setTestsetData: (payload: LoadTestsetSelectionPayload | null) => void } +/** + * Simplified props for LoadTestsetModalContent + * All state is now managed via atoms in atoms/modalState.ts + */ export interface LoadTestsetModalContentProps { modalProps: ModalProps - testsetCsvData: Testset["csvdata"] - selectedRowKeys: React.Key[] - setSelectedRowKeys: React.Dispatch> - isLoadingTestset: boolean } export interface LoadTestsetModalFooterProps { @@ -19,5 +24,8 @@ export interface LoadTestsetModalFooterProps { isLoadingTestset: boolean selectedRowKeys: React.Key[] testsetCsvData: Testset["csvdata"] - setTestsetData: React.Dispatch[] | null>> + setTestsetData: (payload: LoadTestsetSelectionPayload | null) => void + selectedRevisionId: string + isCreatingNew: boolean + newTestsetName: string } diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/atoms/modalState.ts b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/atoms/modalState.ts new file mode 100644 index 0000000000..eb7f450f89 --- /dev/null +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/atoms/modalState.ts @@ -0,0 +1,38 @@ +import {atom} from "jotai" + +/** + * Atoms for LoadTestsetModal state management + * These atoms eliminate prop drilling and provide centralized state management + */ + +/** + * Tracks whether the user is in "create new testset" mode + */ +export const isCreatingNewTestsetAtom = atom(false) + +/** + * Stores the name for a new testset being created in the UI + */ +export const newTestsetNameAtom = atom("") + +/** + * Stores the commit message for a new testset being created in the UI + */ +export const newTestsetCommitMessageAtom = atom("") + +/** + * Tracks which table rows are selected for loading into the playground + */ +export const selectedTestcaseRowKeysAtom = atom([]) + +/** + * Resets all modal state to initial values (call when modal closes) + * Note: Does NOT manually cleanup testcase atoms - that's handled by revisionChangeEffectAtom + * when the revision selection changes naturally + */ +export const resetModalStateAtom = atom(null, (_get, set) => { + set(isCreatingNewTestsetAtom, false) + set(newTestsetNameAtom, "") + set(newTestsetCommitMessageAtom, "") + set(selectedTestcaseRowKeysAtom, []) +}) diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/CreateTestsetCard.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/CreateTestsetCard.tsx new file mode 100644 index 0000000000..af7c1016c9 --- /dev/null +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/CreateTestsetCard.tsx @@ -0,0 +1,312 @@ +import {useCallback, useState} from "react" + +import {InboxOutlined} from "@ant-design/icons" +import {ArrowLeft, Table, UploadSimple} from "@phosphor-icons/react" +import {Button, Input, Typography, Upload} from "antd" +import {useAtom, useSetAtom} from "jotai" + +import {message} from "@/oss/components/AppMessageContext" +import {useTestsetFileUpload} from "@/oss/hooks/useTestsetFileUpload" +import {enableRevisionsListQueryAtom} from "@/oss/state/entities/testset" +import {selectedRevisionIdAtom, selectedTestsetIdAtom} from "@/oss/state/testsetSelection" + +import { + isCreatingNewTestsetAtom, + newTestsetCommitMessageAtom, + newTestsetNameAtom, + selectedTestcaseRowKeysAtom, +} from "../atoms/modalState" + +export interface CreateTestsetCardProps { + onTestsetCreated?: () => void +} + +export const CreateTestsetCard: React.FC = ({onTestsetCreated}) => { + const [isCreatingNew, setIsCreatingNew] = useAtom(isCreatingNewTestsetAtom) + const [newTestsetName, setNewTestsetName] = useAtom(newTestsetNameAtom) + const [newTestsetCommitMessage, setNewTestsetCommitMessage] = useAtom( + newTestsetCommitMessageAtom, + ) + const [selectedRevisionId, setSelectedRevisionId] = useAtom(selectedRevisionIdAtom) + const [selectedTestset, setSelectedTestset] = useAtom(selectedTestsetIdAtom) + const setSelectedRowKeys = useSetAtom(selectedTestcaseRowKeysAtom) + const enableRevisionsListQuery = useSetAtom(enableRevisionsListQueryAtom) + + // Track if user has selected a file (but not uploaded yet) + const [hasSelectedFile, setHasSelectedFile] = useState(false) + + // Track previous selection before entering create mode (so we can restore on cancel) + const [previousSelection, setPreviousSelection] = useState<{ + testsetId: string + revisionId: string + } | null>(null) + + // File upload hook for drag & drop functionality + const { + handleFileSelect, + uploadFile, + uploadLoading: isUploadingFile, + testsetName, + selectedFile, + } = useTestsetFileUpload({ + onSuccess: async (response) => { + message.success("Testset uploaded successfully") + + console.log("[CreateTestsetCard] Full upload response:", response.data) + + // Parse response - API returns {count: 1, testset: {...}} + const testsetData = response.data?.testset + const revisionId = testsetData?.revision_id + const testsetId = testsetData?.testset_id || testsetData?.id + + if (revisionId && testsetId) { + console.log("[CreateTestsetCard] Upload success:", {revisionId, testsetId}) + + // Refresh testsets list to show the newly created testset + // Wait for the list to refresh before setting selection + await onTestsetCreated?.() + + // Enable revisions query for the new testset + enableRevisionsListQuery(testsetId) + + // Exit create mode first (prevents auto-cleanup on selection) + setIsCreatingNew(false) + + // Select the newly created testset and revision + setSelectedTestset(testsetId) + setSelectedRevisionId(revisionId) + + // Reset file selection state + setHasSelectedFile(false) + } else { + console.warn("[CreateTestsetCard] Missing IDs in upload response:", response.data) + } + }, + onError: () => { + // Error is already handled by the hook + setHasSelectedFile(false) + }, + }) + + // Handle file selection (don't auto-upload, let user confirm) + const handleFileChange = useCallback( + (info: any) => { + const file = info.fileList[0] + if (file) { + handleFileSelect(file) + setHasSelectedFile(true) + } + }, + [handleFileSelect], + ) + + // Handle upload button click + const handleUploadClick = useCallback(async () => { + await uploadFile() + }, [uploadFile]) + + const handleStartCreatingNew = useCallback(() => { + setIsCreatingNew(true) + setNewTestsetName("") + setNewTestsetCommitMessage("") + setSelectedRowKeys([]) + setSelectedRevisionId("") + setSelectedTestset("") + }, [ + setSelectedRowKeys, + setSelectedRevisionId, + setSelectedTestset, + setIsCreatingNew, + setNewTestsetName, + setNewTestsetCommitMessage, + ]) + + const handleCreateFromUI = useCallback(() => { + // Save current selection before entering create mode + setPreviousSelection({ + testsetId: selectedTestset, + revisionId: selectedRevisionId, + }) + + // Set revision to "new" for table editing + // The testcases table hook will automatically initialize empty revision when it sees "new" + setSelectedTestset("") + setSelectedRevisionId("new") + }, [setSelectedRevisionId, setSelectedTestset, selectedTestset, selectedRevisionId]) + + const handleCancelCreateNew = useCallback(() => { + setIsCreatingNew(false) + setNewTestsetName("") + setNewTestsetCommitMessage("") + setSelectedRowKeys([]) + + // Restore previous selection if available + // The revision change will automatically trigger cleanup via revisionChangeEffectAtom + if (previousSelection) { + setSelectedTestset(previousSelection.testsetId) + setSelectedRevisionId(previousSelection.revisionId) + setPreviousSelection(null) + } else { + // No previous selection, just clear + // Setting to empty string will trigger cleanup of "new" revision + setSelectedRevisionId("") + setSelectedTestset("") + } + }, [ + setSelectedRevisionId, + setSelectedRowKeys, + setSelectedTestset, + setIsCreatingNew, + setNewTestsetName, + setNewTestsetCommitMessage, + previousSelection, + ]) + + // If in create mode with a revision selected, show the enhanced creation UI + if (isCreatingNew && selectedRevisionId) { + return ( +
+ {/* Step 1: Name input */} +
+
+ 1. + + Name new testset + +
+ setNewTestsetName(e.target.value)} + autoFocus + /> +
+ + {/* Step 2: Instructions */} +
+
+ 2. + + Add testcases + +
+ + Use the table to create testcases and custom fields. + +
+ + {/* Step 3: Commit message */} +
+
+ 3. + + Commit message + + + (optional) + +
+ setNewTestsetCommitMessage(e.target.value)} + rows={3} + /> +
+ +
+ +
+
+ ) + } + + // If file is selected, show upload confirmation + if (hasSelectedFile && selectedFile) { + return ( +
+
+ File selected + +
+
+ {selectedFile.name} + + Testset name: {testsetName || "(auto-generated)"} + +
+ +
+ ) + } + + // Otherwise, show the create options card (upload or build in UI) + return ( +
+
+ + Create a new testset + + false} + showUploadList={false} + disabled={isUploadingFile} + className="!bg-white !border-gray-200 !rounded-xl" + onChange={handleFileChange} + > +
+ + Drop CSV/JSON here or click to browse +
+
+
+ +
+ + or + +
+ +
+ +
+
+ ) +} diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetListSidebar.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetListSidebar.tsx new file mode 100644 index 0000000000..f914c7cc9f --- /dev/null +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetListSidebar.tsx @@ -0,0 +1,289 @@ +import {useCallback, useEffect, useMemo, useRef, useState} from "react" + +import {RightOutlined} from "@ant-design/icons" +import {Divider, Input, Menu, Popover, Spin, Typography} from "antd" +import {atom, useAtom, useAtomValue, useSetAtom} from "jotai" + +import {buildRevisionMenuItems} from "@/oss/components/TestcasesTableNew/components/RevisionMenuItems" +import {Testset} from "@/oss/lib/Types" +import {revision, testset} from "@/oss/state/entities/testset" +import { + selectTestsetAtom, + selectedRevisionIdAtom, + selectedTestsetIdAtom, +} from "@/oss/state/testsetSelection" + +import {selectedTestcaseRowKeysAtom} from "../atoms/modalState" + +type RevisionData = { + id: string + version: number + created_at?: string | null + message?: string | null +}[] + +interface QueryState { + data: RevisionData + isFetching: boolean + isPending: boolean + isLoading: boolean +} + +interface TestsetListSidebarProps { + modalOpen: boolean + isCreatingNew: boolean +} + +export const TestsetListSidebar: React.FC = ({ + modalOpen, + isCreatingNew, +}) => { + const [selectedTestsetId, setSelectedTestset] = useAtom(selectedTestsetIdAtom) + const [selectedRevisionId, setSelectedRevisionId] = useAtom(selectedRevisionIdAtom) + const selectTestsetAction = useSetAtom(selectTestsetAtom) + const setSelectedRowKeys = useSetAtom(selectedTestcaseRowKeysAtom) + const enableRevisionsListQuery = useSetAtom(revision.queries.enableList) + + const [searchTerm, setSearchTerm] = useState("") + const [revisionPanelTestsetId, setRevisionPanelTestsetId] = useState("") + + // Track if we've already auto-selected for the current modal session + const hasAutoSelectedRef = useRef(false) + + // Use testset controller API + const testsetsQuery = useAtomValue(testset.queries.list(null)) + const testsets = useMemo(() => testsetsQuery.data?.testsets ?? [], [testsetsQuery.data]) + const isLoadingTestsets = testsetsQuery.isLoading + + // Create an actual atom for empty query state + const emptyQueryAtom = useMemo( + () => + atom({ + data: [], + isFetching: false, + isPending: false, + isLoading: false, + }), + [], + ) + + const popoverRevisionsQueryAtom = useMemo( + () => + revisionPanelTestsetId + ? revision.queries.list(revisionPanelTestsetId) + : selectedTestsetId + ? revision.queries.list(selectedTestsetId) + : emptyQueryAtom, + [revisionPanelTestsetId, selectedTestsetId, emptyQueryAtom], + ) + + const popoverRevisionsQuery = useAtomValue(popoverRevisionsQueryAtom) + + const popoverRevisions = useMemo( + () => (popoverRevisionsQuery.data as RevisionData) || [], + [popoverRevisionsQuery.data], + ) + + const popoverRevisionsLoading = + popoverRevisionsQuery.isPending || + (popoverRevisionsQuery as any).isLoading || + (popoverRevisionsQuery as any).isFetching + + const filteredTestset = useMemo(() => { + if (!searchTerm) return testsets + return testsets.filter((item: Testset) => + item.name.toLowerCase().includes(searchTerm.toLowerCase()), + ) + }, [searchTerm, testsets]) + + const testsetMenuItems = useMemo(() => { + if (!filteredTestset.length) return [] + return filteredTestset.map((ts: Testset) => ({ + key: ts.id, + label: ts.name, + hasRevisions: true, // Always show revision arrow for testsets + })) + }, [filteredTestset]) + + const onChangeTestset = useCallback( + ({key}: any) => { + setSelectedRowKeys([]) + const foundTestset = testsets.find((ts: Testset) => ts.id === key) + // Enable revisions query for this testset + enableRevisionsListQuery(key) + selectTestsetAction({ + testsetId: key, + testsetName: foundTestset?.name || "", + autoSelectLatest: true, + }) + }, + [setSelectedRowKeys, testsets, selectTestsetAction, enableRevisionsListQuery], + ) + + const onChangeRevision = useCallback( + ({key}: any) => { + setSelectedRowKeys([]) + setSelectedRevisionId(key) + setRevisionPanelTestsetId("") + }, + [setRevisionPanelTestsetId, setSelectedRevisionId, setSelectedRowKeys], + ) + + const popoverMenuItems = useMemo( + () => + buildRevisionMenuItems(popoverRevisions, (revisionId) => { + setSelectedTestset(revisionPanelTestsetId || selectedTestsetId || "") + onChangeRevision({key: revisionId}) + setRevisionPanelTestsetId("") + }) ?? [], + [popoverRevisions, onChangeRevision, revisionPanelTestsetId, selectedTestsetId], + ) + + const menuSelectedKeys = useMemo( + () => (selectedTestsetId ? [selectedTestsetId] : []), + [selectedTestsetId], + ) + const revisionSelectedKeys = useMemo( + () => (selectedRevisionId ? [selectedRevisionId] : []), + [selectedRevisionId], + ) + + // Reset auto-selection flag when modal closes + useEffect(() => { + if (!modalOpen) { + hasAutoSelectedRef.current = false + } + }, [modalOpen]) + + // Auto-select first testset when modal opens (but only once per modal open) + useEffect(() => { + if (!modalOpen || !testsets.length || isCreatingNew || hasAutoSelectedRef.current) { + return + } + + const prevExists = + selectedTestsetId && testsets.some((ts: Testset) => ts?.id === selectedTestsetId) + + if (!prevExists && testsets[0]) { + hasAutoSelectedRef.current = true + // Enable revisions query for auto-selected testset + enableRevisionsListQuery(testsets[0].id) + selectTestsetAction({ + testsetId: testsets[0].id, + testsetName: testsets[0].name, + autoSelectLatest: true, + }) + } + }, [modalOpen, testsets.length, isCreatingNew]) + + if (isCreatingNew && selectedRevisionId) { + return null // Hide sidebar content during create flow - CreateTestsetCard handles UI + } + + return ( + <> + setSearchTerm(e.target.value)} + /> + + + +
+ Testsets +
+ + { + const {hasRevisions: _hasRevisions, ...restTs} = ts + const hasRevisions = _hasRevisions ?? true + return { + ...restTs, + label: ( +
+ { + e.stopPropagation() + onChangeTestset({key: ts.key}) + }} + className="cursor-pointer flex-1 min-w-0 truncate text-left" + title={ts.label} + > + {ts.label} + + {hasRevisions && ( + { + if (open) { + setRevisionPanelTestsetId(ts.key) + // Enable lazy query for this testset + enableRevisionsListQuery(ts.key) + } else { + setRevisionPanelTestsetId("") + } + }} + content={ + popoverRevisionsLoading ? ( +
+ +
+ ) : popoverMenuItems.length ? ( +
+ { + setSelectedTestset(ts.key) + onChangeRevision(info) + setRevisionPanelTestsetId("") + }} + classNames={{ + root: "!m-0 !p-0", + }} + selectedKeys={revisionSelectedKeys} + className="min-w-[220px] !border-none !p-0 !m-0 [&_.ant-menu-item]:h-auto [&_.ant-menu-item]:min-h-[32px] [&_.ant-menu-item]:leading-normal [&_.ant-menu-item]:!py-1 [&_.ant-menu-item]:!my-0 [&_.ant-menu-title-content]:whitespace-normal" + rootClassName="!p-0 !m-0" + /> +
+ ) : ( +
+ No revisions +
+ ) + } + classNames={{ + root: "!p-0", + content: "!p-0", + container: "load-testset-revision-popover !p-1", + }} + getPopupContainer={() => + (document.querySelector( + ".ant-modal-body", + ) as HTMLElement) || document.body + } + > + + + +
+ )} +
+ ), + } + }, + )} + selectedKeys={menuSelectedKeys} + className="flex-1 overflow-y-auto !border-none !p-0 [&_.ant-menu-item]:px-2 [&_.ant-menu-item]:py-1.5 [&_.ant-menu-item]:h-auto [&_.ant-menu-item]:min-h-[38px] [&_.ant-menu-title-content]:flex [&_.ant-menu-title-content]:items-center [&_.ant-menu-title-content]:w-full" + /> + {isLoadingTestsets && ( +
+ +
+ )} + + ) +} diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetPreviewPanel.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetPreviewPanel.tsx new file mode 100644 index 0000000000..bb0fb0b185 --- /dev/null +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/components/TestsetPreviewPanel.tsx @@ -0,0 +1,219 @@ +import {useCallback, useEffect, useState} from "react" + +import {PlusOutlined} from "@ant-design/icons" +import {Table} from "@phosphor-icons/react" +import {Button, Input, Modal, Typography} from "antd" +import {useAtom, useAtomValue} from "jotai" + +import {message} from "@/oss/components/AppMessageContext" +import {useRowHeight} from "@/oss/components/InfiniteVirtualTable" +import TestcaseEditDrawer from "@/oss/components/TestcasesTableNew/components/TestcaseEditDrawer" +import {TestcasesTableShell} from "@/oss/components/TestcasesTableNew/components/TestcasesTableShell" +import {useTestcasesTable} from "@/oss/components/TestcasesTableNew/hooks/useTestcasesTable" +import { + testcaseRowHeightAtom, + TESTCASE_ROW_HEIGHT_CONFIG, +} from "@/oss/components/TestcasesTableNew/state/rowHeight" +import {selectedRevisionIdAtom} from "@/oss/state/testsetSelection" + +import {isCreatingNewTestsetAtom, selectedTestcaseRowKeysAtom} from "../atoms/modalState" + +const TestcasesTablePreview = ({ + revisionId, + isCreateMode = false, + showActions = false, +}: { + revisionId: string + isCreateMode?: boolean + showActions?: boolean +}) => { + const [selectedRowKeys, setSelectedRowKeys] = useAtom(selectedTestcaseRowKeysAtom) + const table = useTestcasesTable({revisionId, mode: isCreateMode ? "edit" : "view"}) + const rowHeight = useRowHeight(testcaseRowHeightAtom, TESTCASE_ROW_HEIGHT_CONFIG) + const [isAddColumnModalOpen, setIsAddColumnModalOpen] = useState(false) + const [newColumnName, setNewColumnName] = useState("") + const [editingTestcaseId, setEditingTestcaseId] = useState(null) + + useEffect(() => { + setEditingTestcaseId(null) + }, [revisionId, showActions]) + + const handleRowClick = useCallback( + (record: any) => { + const key = record?.key + if (key === undefined || key === null) return + setSelectedRowKeys((prev) => { + const exists = prev.includes(key) + if (exists) { + return prev.filter((k) => k !== key) + } + return [...prev, key] + }) + if (showActions) { + const recordId = record?.id ?? (typeof key === "string" ? key : String(key)) + setEditingTestcaseId(recordId) + } + }, + [setSelectedRowKeys, showActions], + ) + + const handleAddRow = useCallback(() => { + if (!showActions) return + const newRow = table.addTestcase() + const newRowKey = String(newRow.key ?? newRow.id ?? Date.now()) + setSelectedRowKeys((prev) => (prev.includes(newRowKey) ? prev : [...prev, newRowKey])) + message.success("Row added. Fill in the cells and click Create & Load.") + setEditingTestcaseId(newRowKey) + }, [setSelectedRowKeys, showActions, table]) + + const handleDeleteSelected = useCallback(() => { + if (!showActions || !selectedRowKeys.length) return + table.deleteTestcases(selectedRowKeys.map(String)) + setSelectedRowKeys([]) + message.success("Selected rows removed. Save to keep the changes.") + setEditingTestcaseId(null) + }, [selectedRowKeys, setSelectedRowKeys, showActions, table]) + + const handleConfirmAddColumn = useCallback(() => { + if (!showActions) return + const trimmed = newColumnName.trim() + if (!trimmed) { + message.error("Column name cannot be empty") + return + } + const success = table.addColumn(trimmed) + if (!success) { + message.error(`Column "${trimmed}" already exists`) + return + } + message.success(`Added column "${trimmed}"`) + setIsAddColumnModalOpen(false) + setNewColumnName("") + }, [newColumnName, showActions, table]) + + const actionsNode = showActions ? ( +
+ + +
+ ) : null + + return ( + <> +
+ setIsAddColumnModalOpen(true) : undefined} + /> +
+ + {showActions && ( + setEditingTestcaseId(null)} + isNewRow={ + !!editingTestcaseId && + (editingTestcaseId.startsWith("new-") || + editingTestcaseId.startsWith("local-")) + } + onSaveTestset={table.saveTestset} + isSavingTestset={table.isSaving} + /> + )} + + { + setIsAddColumnModalOpen(false) + setNewColumnName("") + }} + okButtonProps={{disabled: !newColumnName.trim()}} + destroyOnHidden + > +
+ Column name + setNewColumnName(e.target.value)} + placeholder="e.g. prompt or metadata.notes" + autoFocus + /> + + Tip: Use dot notation like{" "} + meta.correct_answer to + group related columns. + +
+
+ + ) +} + +export const TestsetPreviewPanel: React.FC = () => { + const selectedRevisionId = useAtomValue(selectedRevisionIdAtom) + const isCreatingNew = useAtomValue(isCreatingNewTestsetAtom) + + if (selectedRevisionId) { + return ( + + ) + } + + // Empty state when no revision is selected + if (isCreatingNew) { + return ( +
+
+ + + +
+ + Start building your testset + + + Upload a CSV/JSON or click{" "} + Build in UI to add rows + manually. Your testcases will appear here as soon as you begin. + +
+ + + ) + } + + return ( +
+ + Select a revision to view testcases. + +
+ ) +} diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/hooks/useSelectedTestcasesData.ts b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/hooks/useSelectedTestcasesData.ts index 65d7a6a24c..9b39c0f18e 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/hooks/useSelectedTestcasesData.ts +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/hooks/useSelectedTestcasesData.ts @@ -1,12 +1,56 @@ import {useMemo} from "react" -import {getDefaultStore} from "jotai/vanilla" +import {atom, useAtomValue} from "jotai" -import {testcaseEntityAtomFamily} from "@/oss/state/entities/testcase/testcaseEntity" +import {testcase} from "@/oss/state/entities/testcase" + +/** + * Extract data fields from a testcase entity, removing metadata + */ +function extractTestcaseData(entity: Record): Record | null { + if (!entity) return null + + // Remove metadata fields, keep only data columns + // Note: rename testcase -> testcaseField to avoid shadowing imported controller + const { + id, + key: _key, + testset_id, + set_id, + testcase: testcaseField, + data: dataField, + created_at, + updated_at, + created_by_id, + updated_by_id, + deleted_at, + deleted_by_id, + flags, + tags, + meta, + __isSkeleton, + testcase_dedup_id, + ...rest + } = entity + + // Priority: testcase field > data field > rest of fields + // This handles different API response structures + if (testcaseField && typeof testcaseField === "object") { + return testcaseField as Record + } + if (dataField && typeof dataField === "object") { + return dataField as Record + } + + return rest +} /** * Extract selected testcases from entity atoms and convert to playground format * + * Uses a derived atom for proper reactivity - when any selected entity changes, + * this hook will re-render with updated data. + * * @param revisionId - Current revision ID (for cache coherence) * @param selectedRowKeys - Array of selected testcase IDs * @returns Array of testcase data in playground format (no metadata) @@ -14,57 +58,30 @@ import {testcaseEntityAtomFamily} from "@/oss/state/entities/testcase/testcaseEn export const useSelectedTestcasesData = ( revisionId: string | null, selectedRowKeys: React.Key[], -): Record[] => { - const globalStore = useMemo(() => getDefaultStore(), []) - - return useMemo(() => { - if (!revisionId || !selectedRowKeys.length) return [] - - return selectedRowKeys - .map((key) => { - try { - const testcaseId = String(key) - const entity = globalStore.get(testcaseEntityAtomFamily(testcaseId)) - - if (!entity) return null - - // Remove metadata fields, keep only data columns - const { - id, - key: _key, - testset_id, - set_id, - testcase, - data: dataField, - created_at, - updated_at, - created_by_id, - updated_by_id, - deleted_at, - deleted_by_id, - flags, - tags, - meta, - __isSkeleton, - testcase_dedup_id, - ...rest - } = entity +): Record[] => { + // Create a derived atom that subscribes to all selected entities + // This provides proper reactivity - updates when any entity changes + const selectedDataAtom = useMemo( + () => + atom((get) => { + if (!revisionId || !selectedRowKeys.length) return [] - // Priority: testcase field > data field > rest of fields - // This handles different API response structures - if (testcase && typeof testcase === "object") { - return testcase as Record - } - if (dataField && typeof dataField === "object") { - return dataField as Record - } + return selectedRowKeys + .map((key) => { + try { + const testcaseId = String(key) + // Properly subscribe to entity via selector + const entity = get(testcase.selectors.data(testcaseId)) + return extractTestcaseData(entity as Record) + } catch (error) { + console.error(`Failed to extract testcase ${key}:`, error) + return null + } + }) + .filter((data): data is Record => data !== null) + }), + [revisionId, selectedRowKeys.join(",")], + ) - return rest - } catch (error) { - console.error(`Failed to extract testcase ${key}:`, error) - return null - } - }) - .filter((data): data is Record => data !== null) - }, [revisionId, selectedRowKeys, globalStore]) + return useAtomValue(selectedDataAtom) } diff --git a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/index.tsx b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/index.tsx index 98fc38b053..6bc171fcd0 100644 --- a/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/index.tsx +++ b/web/oss/src/components/Playground/Components/Modals/LoadTestsetModal/index.tsx @@ -1,4 +1,4 @@ -import {useCallback, useEffect, useState} from "react" +import {useCallback, useEffect} from "react" import {useAtomValue, useSetAtom} from "jotai" import dynamic from "next/dynamic" @@ -7,6 +7,12 @@ import EnhancedModal from "@/oss/components/EnhancedUIs/Modal" import {resetSelectionAtom, selectedRevisionIdAtom} from "@/oss/state/testsetSelection" import {LoadTestsetModalProps} from "./assets/types" +import { + isCreatingNewTestsetAtom, + newTestsetNameAtom, + resetModalStateAtom, + selectedTestcaseRowKeysAtom, +} from "./atoms/modalState" import {useSelectedTestcasesData} from "./hooks/useSelectedTestcasesData" const LoadTestsetModalFooter = dynamic(() => import("./assets/LoadTestsetModalFooter"), { @@ -19,45 +25,46 @@ const LoadTestsetModalContent = dynamic(() => import("./assets/LoadTestsetModalC const LoadTestsetModal: React.FC = ({setTestsetData, ...props}) => { const {onCancel, afterClose, ...modalProps} = props - // Use shared atoms for testset/revision selection + // Use atoms for all modal state const selectedRevisionId = useAtomValue(selectedRevisionIdAtom) + const selectedRowKeys = useAtomValue(selectedTestcaseRowKeysAtom) + const isCreatingNew = useAtomValue(isCreatingNewTestsetAtom) + const newTestsetName = useAtomValue(newTestsetNameAtom) const resetSelection = useSetAtom(resetSelectionAtom) - - // Row selection is modal-specific (not shared) - const [selectedRowKeys, setSelectedRowKeys] = useState([]) + const resetModalState = useSetAtom(resetModalStateAtom) // Extract selected testcases from entity atoms in playground format const selectedTestcasesData = useSelectedTestcasesData(selectedRevisionId, selectedRowKeys) const isLoadingTestset = false - // Reset selection state when modal opens + // Reset state when modal opens useEffect(() => { if (modalProps.open) { - // Reset row selection when modal opens - setSelectedRowKeys([]) + resetModalState() } - }, [modalProps.open]) + }, [modalProps.open, resetModalState]) const onClose = useCallback(() => { onCancel?.({} as any) - setSelectedRowKeys([]) - }, [onCancel]) + resetModalState() + }, [onCancel, resetModalState]) return ( { - setSelectedRowKeys([]) + resetModalState() resetSelection() afterClose?.() }} - title="Load testset" + title={isCreatingNew ? "Create testset" : "Load testset"} footer={ = ({setTestsetData, ...p selectedRowKeys={selectedRowKeys} testsetCsvData={selectedTestcasesData} setTestsetData={setTestsetData} + selectedRevisionId={selectedRevisionId} + isCreatingNew={isCreatingNew} + newTestsetName={newTestsetName} /> } onCancel={onClose} classNames={{ - body: "h-[620px] overflow-hidden !flex-0 !flex", + body: "overflow-hidden !flex", }} {...modalProps} > - + ) } diff --git a/web/oss/src/components/Playground/Components/PlaygroundGenerationComparisonView/GenerationComparisonHeader/index.tsx b/web/oss/src/components/Playground/Components/PlaygroundGenerationComparisonView/GenerationComparisonHeader/index.tsx index 31912c542f..85a4850c1a 100644 --- a/web/oss/src/components/Playground/Components/PlaygroundGenerationComparisonView/GenerationComparisonHeader/index.tsx +++ b/web/oss/src/components/Playground/Components/PlaygroundGenerationComparisonView/GenerationComparisonHeader/index.tsx @@ -11,10 +11,10 @@ import {triggerWebWorkerTestAtom} from "@/oss/state/newPlayground/mutations/webW import RunButton from "../../../assets/RunButton" import { appChatModeAtom, - generationHeaderDataAtomFamily, - displayedVariantsAtom, cancelTestsMutationAtom, canRunAllChatComparisonAtom, + displayedVariantsAtom, + generationHeaderDataAtomFamily, } from "../../../state/atoms" import {clearAllRunsMutationAtom} from "../../../state/atoms/utilityMutations" import TestsetDrawerButton from "../../Drawers/TestsetDrawer" diff --git a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletion/index.tsx b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletion/index.tsx index 2a84088951..84eff83cae 100644 --- a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletion/index.tsx +++ b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletion/index.tsx @@ -35,7 +35,7 @@ const GenerationCompletion = ({ // Ensure is handled at MainLayout level to avoid write-on-render here return ( -
+
{viewType === "comparison" ? ( import("../GenerationVariableOptions"), { - ssr: false, -}) - interface Props { rowId: string variantId: string @@ -53,6 +63,102 @@ const SingleView = ({ ), ) as string[] + const inputRowIds = useAtomValue(generationInputRowIdsAtom) as string[] + const allInputRowIds = useAtomValue(inputRowIdsAtom) as string[] + const testCaseNumber = useMemo(() => { + const index = inputRowIds.indexOf(rowId) + return index >= 0 ? index + 1 : null + }, [inputRowIds, rowId]) + + // Delete and duplicate handlers + const deleteInputRow = useSetAtom(deleteGenerationInputRowMutationAtom) + const duplicateInputRow = useSetAtom(duplicateGenerationInputRowMutationAtom) + const inputRowsLength = allInputRowIds?.length || 0 + + // Check if there are results for the add to testset button + const hasResults = useMemo(() => { + return Boolean(resultHash && result) + }, [resultHash, result]) + + // Global collapse state from header (for "collapse all" / "expand all") + const [isAllGenerationsCollapsed] = useAtom(allGenerationsCollapsedAtom) + + // Local collapse state for this specific test case + const [isCollapsed, setIsCollapsed] = useState(false) + + // Track previous global state to detect changes from header + const prevGlobalCollapsed = useRef(isAllGenerationsCollapsed) + + // Sync local state when global "collapse all" state changes from header + useEffect(() => { + if (prevGlobalCollapsed.current !== isAllGenerationsCollapsed) { + setIsCollapsed(isAllGenerationsCollapsed) + prevGlobalCollapsed.current = isAllGenerationsCollapsed + } + }, [isAllGenerationsCollapsed]) + + // Collapse state for individual input/output components + const [collapsedInputs, setCollapsedInputs] = useState>({}) + const [collapsedOutput] = useState(false) + + const toggleInputCollapse = useCallback((id: string) => { + setCollapsedInputs((prev) => ({...prev, [id]: !prev[id]})) + }, []) + + if (isCollapsed && !inputOnly) { + return ( +
+
+ } + type="text" + onClick={() => setIsCollapsed(false)} + tooltipProps={{title: "Expand"}} + size="small" + /> + {testCaseNumber && ( + Test case {testCaseNumber} + )} +
+
+ } + type="text" + onClick={() => deleteInputRow(rowId)} + size="small" + disabled={inputRowsLength === 1} + tooltipProps={{title: "Remove"}} + /> + } + type="text" + onClick={() => duplicateInputRow(rowId)} + size="small" + tooltipProps={{title: "Duplicate"}} + /> + + } + type="text" + size="small" + disabled={!hasResults} + tooltipProps={{title: "Add to testset"}} + /> + +
+ {!isBusy ? ( + + ) : ( + + )} +
+
+ ) + } + return (
-
+ {!inputOnly && ( +
+ } + type="text" + onClick={() => setIsCollapsed(true)} + tooltipProps={{title: "Collapse"}} + size="small" + /> + {testCaseNumber && ( + Test case {testCaseNumber} + )} +
+
+ } + type="text" + onClick={() => deleteInputRow(rowId)} + size="small" + disabled={inputRowsLength === 1} + tooltipProps={{title: "Remove"}} + /> + } + type="text" + onClick={() => duplicateInputRow(rowId)} + size="small" + tooltipProps={{title: "Duplicate"}} + /> + + } + type="text" + size="small" + disabled={!hasResults} + tooltipProps={{title: "Add to testset"}} + /> + +
+ {!isBusy ? ( + + ) : ( + + )} +
+ )} + +
{variableIds.length > 0 && ( - <> -
- - Variables - -
-
- {variableIds.map((id) => { - return ( -
- + {variableIds.map((id) => { + const isInputCollapsed = collapsedInputs[id] || false + return ( +
+
+
+ +
+ toggleInputCollapse(id)} + size="small" + type="text" + icon={ + isInputCollapsed ? ( + + ) : ( + + ) + } + tooltipProps={{ + title: isInputCollapsed ? "Expand" : "Collapse", + }} />
- ) - })} -
- - )} - {!inputOnly && variableIds.length === 0 ? ( -
- +
+ ) + })}
- ) : null} - - {!inputOnly && ( - )}
{!inputOnly ? ( -
+
- {!isBusy ? ( - - ) : ( - + className={clsx( + "relative w-full transition-all duration-300 ease-linear overflow-hidden", + { + "max-h-[120px]": collapsedOutput, + "h-fit": !collapsedOutput, + }, )} -
- -
+ > {isBusy ? ( ) : !result ? ( diff --git a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletionRow/index.tsx b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletionRow/index.tsx index 33970492af..5c529b7c25 100644 --- a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletionRow/index.tsx +++ b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationCompletionRow/index.tsx @@ -12,13 +12,12 @@ import { displayedVariantsAtom, } from "../../../../state/atoms" import { - resolvedGenerationResultAtomFamily, generationRunStatusAtomFamily, + resolvedGenerationResultAtomFamily, } from "../../../../state/atoms/generationProperties" import DefaultView from "./DefaultView" import SingleView from "./SingleView" -import {useStyles} from "./styles" import type {GenerationCompletionRowProps} from "./types" // Keep dynamic imports local to presentational components @@ -33,8 +32,6 @@ const GenerationCompletionRow = ({ forceSingle, ...props }: GenerationCompletionRowProps) => { - const classes = useStyles() - const isChat = useAtomValue(appChatModeAtom) // Only subscribe to generation result atoms in completion mode @@ -117,7 +114,7 @@ const GenerationCompletionRow = ({ resultHash={resultHash} runRow={runRow} cancelRow={cancelRow} - containerClassName={classes.container} + containerClassName={"border-0 border-b border-solid border-colorBorderSecondary"} /> ) : ( ({ - container: { - borderBottom: `1px solid ${theme.colorBorderSecondary}`, - borderRight: `1px solid ${theme.colorBorderSecondary}`, - }, -})) diff --git a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/index.tsx b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/index.tsx index 9687ef87a4..740a15f678 100644 --- a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/index.tsx +++ b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/index.tsx @@ -1,9 +1,11 @@ import {useCallback, useEffect, useMemo} from "react" +import {ArrowsInLineVerticalIcon, ArrowsOutLineVerticalIcon} from "@phosphor-icons/react" import {Button, Tooltip, Typography} from "antd" import clsx from "clsx" -import {useAtomValue, useSetAtom} from "jotai" +import {useAtom, useAtomValue, useSetAtom} from "jotai" +import EnhancedButton from "@/oss/components/EnhancedUIs/Button" import {appTypeAtom} from "@/oss/components/Playground/state/atoms/app" import {generationInputRowIdsAtom} from "@/oss/components/Playground/state/atoms/generationProperties" import {clearAllRunsMutationAtom} from "@/oss/components/Playground/state/atoms/utilityMutations" @@ -13,10 +15,13 @@ import RunButton from "../../../../assets/RunButton" import {usePlaygroundAtoms} from "../../../../hooks/usePlaygroundAtoms" import {generationHeaderDataAtomFamily, triggerWebWorkerTestAtom} from "../../../../state/atoms" +import {allGenerationsCollapsedAtom} from "./store" import {useStyles} from "./styles" import TestSetMenu from "./TestSetMenu" import type {GenerationHeaderProps} from "./types" +// Global atom to track collapse state for all generations + const GenerationHeader = ({variantId}: GenerationHeaderProps) => { const classes = useStyles() @@ -36,6 +41,7 @@ const GenerationHeader = ({variantId}: GenerationHeaderProps) => { const runAllChat = useSetAtom(runAllChatAtom) const appType = useAtomValue(appTypeAtom) const completionRowIds = useAtomValue(generationInputRowIdsAtom) as string[] + const [isAllCollapsed, setIsAllCollapsed] = useAtom(allGenerationsCollapsedAtom) const runTests = useCallback(() => { if (appType === "chat") runAllChat() @@ -69,9 +75,27 @@ const GenerationHeader = ({variantId}: GenerationHeaderProps) => { )} >
- - Generations - + {appType === "chat" ? ( + + Generations + + ) : ( + + ) : ( + + ) + } + type="text" + onClick={() => setIsAllCollapsed(!isAllCollapsed)} + tooltipProps={{ + title: isAllCollapsed ? "Expand all" : "Collapse all", + }} + className="text-[16px] leading-[18px] font-[600] text-nowrap flex items-center" + /> + )}
diff --git a/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/store.ts b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/store.ts new file mode 100644 index 0000000000..61b7113a7a --- /dev/null +++ b/web/oss/src/components/Playground/Components/PlaygroundGenerations/assets/GenerationHeader/store.ts @@ -0,0 +1,4 @@ +import {atom} from "jotai" + +// Atom to track global collapsed state of all generation inputs +export const allGenerationsCollapsedAtom = atom(false) diff --git a/web/oss/src/components/Playground/Components/PlaygroundVariantConfigPrompt/assets/ActionsOutputRenderer.tsx b/web/oss/src/components/Playground/Components/PlaygroundVariantConfigPrompt/assets/ActionsOutputRenderer.tsx index bd1563a0b2..edaa62776f 100644 --- a/web/oss/src/components/Playground/Components/PlaygroundVariantConfigPrompt/assets/ActionsOutputRenderer.tsx +++ b/web/oss/src/components/Playground/Components/PlaygroundVariantConfigPrompt/assets/ActionsOutputRenderer.tsx @@ -168,6 +168,7 @@ const ActionsOutputRenderer: React.FC = ({variantId, compoundKey, viewOnl }) } > + {toolLabel} ))} diff --git a/web/oss/src/components/Playground/Components/SharedEditor/index.tsx b/web/oss/src/components/Playground/Components/SharedEditor/index.tsx index 4d29516845..6fc50a6021 100644 --- a/web/oss/src/components/Playground/Components/SharedEditor/index.tsx +++ b/web/oss/src/components/Playground/Components/SharedEditor/index.tsx @@ -35,6 +35,7 @@ const SharedEditor = ({ syncWithInitialValueChanges = false, disableDebounce = false, antdInputProps, + onPropertyClick, ...props }: SharedEditorProps) => { const normalizedInitialValue = initialValue ?? "" @@ -163,6 +164,7 @@ const SharedEditor = ({ id={editorId} noProvider={noProvider} {...editorProps} + onPropertyClick={onPropertyClick} /> )} diff --git a/web/oss/src/components/Playground/Components/SharedEditor/types.d.ts b/web/oss/src/components/Playground/Components/SharedEditor/types.d.ts index 0753c94468..70739db7ea 100644 --- a/web/oss/src/components/Playground/Components/SharedEditor/types.d.ts +++ b/web/oss/src/components/Playground/Components/SharedEditor/types.d.ts @@ -35,4 +35,6 @@ export interface SharedEditorProps extends BaseContainerProps { syncWithInitialValueChanges?: boolean /** Disable debouncing for immediate updates (useful for undo/redo with history tracking) */ disableDebounce?: boolean + /** Callback when a JSON property key is Cmd/Meta+clicked (for drill-in navigation) */ + onPropertyClick?: (path: string) => void } diff --git a/web/oss/src/components/Playground/hooks/chat/useHasAssistantContent.ts b/web/oss/src/components/Playground/hooks/chat/useHasAssistantContent.ts index dc257d30dc..532f5e88ac 100644 --- a/web/oss/src/components/Playground/hooks/chat/useHasAssistantContent.ts +++ b/web/oss/src/components/Playground/hooks/chat/useHasAssistantContent.ts @@ -15,9 +15,9 @@ const useHasAssistantContent = ( const txt = (displayAssistantValue || "").trim() const hasTools = Boolean( (assistant as any)?.function_call || - (assistant as any)?.tool_call || - (Array.isArray((assistant as any)?.tool_calls) && - (((assistant as any)?.tool_calls as any[])?.length || 0) > 0), + (assistant as any)?.tool_call || + (Array.isArray((assistant as any)?.tool_calls) && + (((assistant as any)?.tool_calls as any[])?.length || 0) > 0), ) return Boolean(txt) || hasTools || Boolean(hasToolCallsOverride) diff --git a/web/oss/src/components/Playground/hooks/usePlayground/types.d.ts b/web/oss/src/components/Playground/hooks/usePlayground/types.d.ts index 3c775f2b8a..41cef71512 100644 --- a/web/oss/src/components/Playground/hooks/usePlayground/types.d.ts +++ b/web/oss/src/components/Playground/hooks/usePlayground/types.d.ts @@ -22,10 +22,8 @@ export interface ChatMessage { } /** Base hook configuration types */ -interface BaseHookConfig extends Omit< - SWRConfiguration, - "compare" | "fetcher" -> { +interface BaseHookConfig + extends Omit, "compare" | "fetcher"> { hookId?: string projectId?: string cache?: Map @@ -55,17 +53,16 @@ export interface PlaygroundStateData extends InitialStateType { // Playground specific config export interface PlaygroundSWRConfig - extends BaseHookConfig, SelectorConfig { + extends BaseHookConfig, + SelectorConfig { variantId?: string propertyId?: string skipBackgroundLoading?: boolean } // Each middleware extends this to add its own properties -export interface PlaygroundResponse< - T = PlaygroundStateData, - Selected = unknown, -> extends SWRResponse { +export interface PlaygroundResponse + extends SWRResponse { isDirty?: boolean mutate: CustomKeyedMutator selectedData?: Selected @@ -94,9 +91,8 @@ export type VariantUpdateFunction = ) => Partial | undefined // Single variant middleware extensions -export interface PlaygroundVariantResponse< - _T extends PlaygroundStateData = PlaygroundStateData, -> extends PlaygroundVariantsResponse { +export interface PlaygroundVariantResponse<_T extends PlaygroundStateData = PlaygroundStateData> + extends PlaygroundVariantsResponse { variant?: EnhancedVariant displayedVariants?: string[] deleteVariant?: () => Promise @@ -208,10 +204,8 @@ export interface EnhancedProperty { export type ViewType = "single" | "comparison" -export interface UIState< - Data extends PlaygroundStateData = PlaygroundStateData, - Selected = unknown, -> extends PlaygroundResponse { +export interface UIState + extends PlaygroundResponse { displayedVariants?: string[] viewType?: ViewType setSelectedVariant?: (variantId: string) => void diff --git a/web/oss/src/components/References/ReferenceLabels.tsx b/web/oss/src/components/References/ReferenceLabels.tsx index 6603118cba..1fdc442eae 100644 --- a/web/oss/src/components/References/ReferenceLabels.tsx +++ b/web/oss/src/components/References/ReferenceLabels.tsx @@ -4,10 +4,7 @@ import {Skeleton, Typography} from "antd" import clsx from "clsx" import {useAtomValue} from "jotai" -import { - latestRevisionForTestsetAtomFamily, - revisionEntityAtomFamily, -} from "@/oss/state/entities/testset" +import {latestRevisionForTestsetAtomFamily, revision} from "@/oss/state/entities/testset" import { appReferenceAtomFamily, @@ -17,6 +14,7 @@ import { queryReferenceAtomFamily, variantConfigAtomFamily, } from "./atoms/entityReferences" +import type {ReferenceTone} from "./referenceColors" import ReferenceTag from "./ReferenceTag" const {Text} = Typography @@ -32,12 +30,16 @@ export const TestsetTag = memo( revisionId, projectId, projectURL, + toneOverride, + showIconOverride, openExternally = false, }: { testsetId: string revisionId?: string | null projectId: string | null projectURL?: string | null + toneOverride?: ReferenceTone | null + showIconOverride?: boolean openExternally?: boolean }) => { const queryAtom = useMemo( @@ -47,7 +49,11 @@ export const TestsetTag = memo( const query = useAtomValue(queryAtom) // Fetch revision entity to get version number (must be called before any early returns) - const revisionEntity = useAtomValue(revisionEntityAtomFamily(revisionId ?? "")) + const revisionDataAtom = useMemo( + () => revision.selectors.data(revisionId ?? ""), + [revisionId], + ) + const revisionEntity = useAtomValue(revisionDataAtom) const revisionVersion = revisionId ? revisionEntity?.version : null // Get latest revision for testset (used when revisionId is not provided) @@ -84,7 +90,8 @@ export const TestsetTag = memo( tooltip={isDeleted ? `Testset ${testsetId} was deleted` : label} copyValue={testsetId} className="max-w-[220px] w-fit" - tone="testset" + tone={toneOverride === null ? undefined : (toneOverride ?? "testset")} + showIcon={showIconOverride ?? true} openExternally={openExternally} /> ) @@ -147,7 +154,7 @@ export const EnvironmentReferenceLabel = memo( const ref = query.data const isDeleted = Boolean( query.isError || - ((environmentId || environmentSlug) && !ref?.name && !ref?.slug && !ref?.id), + ((environmentId || environmentSlug) && !ref?.name && !ref?.slug && !ref?.id), ) const label = isDeleted ? "Deleted" @@ -199,6 +206,8 @@ export const TestsetTagList = memo( projectId, projectURL, className, + toneOverride, + showIconOverride, openExternally = false, }: { ids: string[] @@ -206,6 +215,8 @@ export const TestsetTagList = memo( projectId: string | null projectURL?: string | null className?: string + toneOverride?: ReferenceTone | null + showIconOverride?: boolean openExternally?: boolean }) => { if (!ids.length) { @@ -221,6 +232,8 @@ export const TestsetTagList = memo( revisionId={revisionMap?.get(id)} projectId={projectId} projectURL={projectURL} + toneOverride={toneOverride} + showIconOverride={showIconOverride} openExternally={openExternally} /> ))} @@ -241,6 +254,8 @@ export const ApplicationReferenceLabel = memo( href: explicitHref, openExternally = false, label: customLabel, + toneOverride, + showIconOverride, }: { applicationId: string | null projectId: string | null @@ -248,6 +263,8 @@ export const ApplicationReferenceLabel = memo( href?: string | null openExternally?: boolean label?: string + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const queryAtom = useMemo( () => appReferenceAtomFamily({projectId, appId: applicationId}), @@ -287,7 +304,8 @@ export const ApplicationReferenceLabel = memo( tooltip={isDeleted ? `Application ${applicationId} was deleted` : label} copyValue={applicationId ?? undefined} className="max-w-[220px] w-fit" - tone="app" + tone={toneOverride === null ? undefined : (toneOverride ?? "app")} + showIcon={showIconOverride ?? true} openExternally={openExternally} /> ) @@ -309,6 +327,8 @@ export const VariantReferenceLabel = memo( href: explicitHref, openExternally = false, label: customLabel, + toneOverride, + showIconOverride, }: { revisionId?: string | null projectId: string | null @@ -318,6 +338,8 @@ export const VariantReferenceLabel = memo( href?: string | null openExternally?: boolean label?: string + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { const queryAtom = useMemo( () => variantConfigAtomFamily({projectId, revisionId}), @@ -361,7 +383,8 @@ export const VariantReferenceLabel = memo( tooltip={isDeleted ? `Variant ${revisionId} was deleted` : label} copyValue={revisionId ?? undefined} className="max-w-[220px]" - tone="variant" + tone={toneOverride === null ? undefined : (toneOverride ?? "variant")} + showIcon={showIconOverride ?? true} openExternally={openExternally} /> {showVersionPill && resolvedVersion ? ( @@ -387,6 +410,8 @@ export const VariantRevisionLabel = memo( fallbackVariantName, fallbackRevision, href: explicitHref, + toneOverride, + showIconOverride, }: { variantId?: string | null revisionId?: string | null @@ -394,6 +419,8 @@ export const VariantRevisionLabel = memo( fallbackVariantName?: string | null fallbackRevision?: number | string | null href?: string | null + toneOverride?: ReferenceTone | null + showIconOverride?: boolean }) => { // Fetch variant config using revisionId to get revision number const configQueryAtom = useMemo( @@ -443,7 +470,8 @@ export const VariantRevisionLabel = memo( tooltip={isDeleted ? `Variant ${revisionId ?? variantId} was deleted` : label} copyValue={revisionId ?? variantId ?? undefined} className="max-w-[220px]" - tone="variant" + tone={toneOverride === null ? undefined : (toneOverride ?? "variant")} + showIcon={showIconOverride ?? true} /> ) }, @@ -502,6 +530,8 @@ export const EvaluatorReferenceLabel = memo( href: explicitHref, openExternally = false, label: customLabel, + toneOverride, + className, }: { evaluatorId?: string | null evaluatorSlug?: string | null @@ -509,6 +539,8 @@ export const EvaluatorReferenceLabel = memo( href?: string | null openExternally?: boolean label?: string + toneOverride?: ReferenceTone | null + className?: string }) => { const queryAtom = useMemo( () => evaluatorReferenceAtomFamily({projectId, slug: evaluatorSlug, id: evaluatorId}), @@ -522,7 +554,7 @@ export const EvaluatorReferenceLabel = memo( ) } @@ -555,8 +587,8 @@ export const EvaluatorReferenceLabel = memo( href={href ?? undefined} tooltip={isDeleted ? `Evaluator ${displayId} was deleted` : label} copyValue={displayId} - className="max-w-[220px] w-fit" - tone="evaluator" + className={clsx("max-w-[220px] w-fit", className)} + tone={toneOverride === null ? undefined : (toneOverride ?? "evaluator")} openExternally={openExternally} /> ) diff --git a/web/oss/src/components/References/ReferenceTag.tsx b/web/oss/src/components/References/ReferenceTag.tsx index 4de9929967..0025ae0992 100644 --- a/web/oss/src/components/References/ReferenceTag.tsx +++ b/web/oss/src/components/References/ReferenceTag.tsx @@ -76,7 +76,7 @@ const ReferenceTag = ({ aria-label="Open link" size={14} className="transition-transform duration-200 group-hover:translate-x-0.5 group-hover:-translate-y-0.5 cursor-pointer" - style={{color: toneColors?.text ?? "#2563eb"}} + style={{color: toneColors?.text ?? "currentColor"}} onClick={(e) => { e.preventDefault() e.stopPropagation() diff --git a/web/oss/src/components/References/cells/CreatedByCells.tsx b/web/oss/src/components/References/cells/CreatedByCells.tsx index 8c30252691..22b95ea805 100644 --- a/web/oss/src/components/References/cells/CreatedByCells.tsx +++ b/web/oss/src/components/References/cells/CreatedByCells.tsx @@ -130,9 +130,9 @@ const PreviewCreatedByCellContent = ({ const isCurrentUser = Boolean( currentUser && - ((currentUser.id && candidateIds.includes(currentUser.id)) || - (currentUsername && candidateNames.includes(currentUsername)) || - (currentEmail && candidateNames.includes(currentEmail))), + ((currentUser.id && candidateIds.includes(currentUser.id)) || + (currentUsername && candidateNames.includes(currentUsername)) || + (currentEmail && candidateNames.includes(currentEmail))), ) if (!createdBy) { diff --git a/web/oss/src/components/References/cells/TestsetCells.tsx b/web/oss/src/components/References/cells/TestsetCells.tsx index 88f27dc913..51ff1375b5 100644 --- a/web/oss/src/components/References/cells/TestsetCells.tsx +++ b/web/oss/src/components/References/cells/TestsetCells.tsx @@ -1,3 +1,5 @@ +import {useMemo} from "react" + import {Tag} from "antd" import {useAtomValue} from "jotai" @@ -9,7 +11,7 @@ import type {EvaluationRunTableRow} from "@/oss/components/EvaluationRunsTablePO import type {ReferenceColumnDescriptor} from "@/oss/components/EvaluationRunsTablePOC/utils/referenceSchema" import {getSlotByRoleOrdinal} from "@/oss/components/EvaluationRunsTablePOC/utils/referenceSchema" import SkeletonLine from "@/oss/components/InfiniteVirtualTable/components/common/SkeletonLine" -import {revisionEntityAtomFamily} from "@/oss/state/entities/testset" +import {revision} from "@/oss/state/entities/testset" import usePreviewTestsetReference from "../hooks/usePreviewTestsetReference" @@ -57,7 +59,8 @@ const PreviewTestsetCellContent = ({ // Fetch revision entity if we have a revisionId const revisionId = reference?.revisionId ?? null - const revisionEntity = useAtomValue(revisionEntityAtomFamily(revisionId ?? "")) + const revisionDataAtom = useMemo(() => revision.selectors.data(revisionId ?? ""), [revisionId]) + const revisionEntity = useAtomValue(revisionDataAtom) const revisionVersion = revisionId ? revisionEntity?.version : null const primaryName = normalize(reference?.name) diff --git a/web/oss/src/components/References/hooks/useAppReference.ts b/web/oss/src/components/References/hooks/useAppReference.ts index a34af55bee..ea3f4edd1c 100644 --- a/web/oss/src/components/References/hooks/useAppReference.ts +++ b/web/oss/src/components/References/hooks/useAppReference.ts @@ -56,10 +56,10 @@ export const useAppReference = ( const hasReference = Boolean(reference) const isLoading = Boolean( enabled && - projectId && - appId && - !hasReference && - (query?.isLoading || query?.isFetching || query?.isPending), + projectId && + appId && + !hasReference && + (query?.isLoading || query?.isFetching || query?.isPending), ) return {reference, isLoading} diff --git a/web/oss/src/components/References/hooks/useEvaluatorReference.ts b/web/oss/src/components/References/hooks/useEvaluatorReference.ts index 5f56830f69..5f34e67fd4 100644 --- a/web/oss/src/components/References/hooks/useEvaluatorReference.ts +++ b/web/oss/src/components/References/hooks/useEvaluatorReference.ts @@ -65,10 +65,10 @@ const useEvaluatorReference = ( const isLoading = Boolean( enabled && - projectId && - (evaluatorSlug || evaluatorId) && - !reference && - (queryResult?.isLoading || queryResult?.isFetching || queryResult?.isPending), + projectId && + (evaluatorSlug || evaluatorId) && + !reference && + (queryResult?.isLoading || queryResult?.isFetching || queryResult?.isPending), ) return {reference, isLoading} diff --git a/web/oss/src/components/References/hooks/usePreviewTestsetReference.ts b/web/oss/src/components/References/hooks/usePreviewTestsetReference.ts index bc89410b1f..1f5fa80277 100644 --- a/web/oss/src/components/References/hooks/usePreviewTestsetReference.ts +++ b/web/oss/src/components/References/hooks/usePreviewTestsetReference.ts @@ -121,10 +121,10 @@ export const usePreviewTestsetReference = ( const hasReference = Boolean(reference) const isLoading = Boolean( enabled && - effectiveProjectId && - testsetId && - !hasReference && - (query?.isLoading || query?.isFetching || query?.isPending), + effectiveProjectId && + testsetId && + !hasReference && + (query?.isLoading || query?.isFetching || query?.isPending), ) return {reference, isLoading} diff --git a/web/oss/src/components/References/hooks/usePreviewVariantConfig.ts b/web/oss/src/components/References/hooks/usePreviewVariantConfig.ts index 96fc572e9f..9f7527c0ef 100644 --- a/web/oss/src/components/References/hooks/usePreviewVariantConfig.ts +++ b/web/oss/src/components/References/hooks/usePreviewVariantConfig.ts @@ -69,10 +69,10 @@ const usePreviewVariantConfig = ( const isLoading = Boolean( enabled && - effectiveProjectId && - revisionId && - !hasConfig && - (query?.isLoading || query?.isFetching || query?.isPending), + effectiveProjectId && + revisionId && + !hasConfig && + (query?.isLoading || query?.isFetching || query?.isPending), ) return {config, isLoading} diff --git a/web/oss/src/components/ResultTag/ResultTag.tsx b/web/oss/src/components/ResultTag/ResultTag.tsx index 0819500f81..dc928daf05 100644 --- a/web/oss/src/components/ResultTag/ResultTag.tsx +++ b/web/oss/src/components/ResultTag/ResultTag.tsx @@ -6,46 +6,54 @@ import clsx from "clsx" import {useStyles} from "./assets/styles" import type {ResultTagProps} from "./types" -const ResultTag = memo(({value1, value2, className, popoverContent, ...props}: ResultTagProps) => { - const classes = useStyles() - - const content = - value2 !== undefined ? ( - <> - {value1} - - {value2} - - - ) : ( -
- {value1} -
+const ResultTag = memo( + ({value1, value2, className, popoverContent, bordered, variant, ...props}: ResultTagProps) => { + const classes = useStyles() + + const resolvedVariant = variant ?? (bordered === false ? "filled" : undefined) + + const content = + value2 !== undefined ? ( + <> + {value1} + + {value2} + + + ) : ( +
+ {value1} +
+ ) + + const tag = ( + + {content} + ) - const tag = ( - - {content} - - ) - - return popoverContent ? ( - - {tag} - - ) : ( - tag - ) -}) + return popoverContent ? ( + + {tag} + + ) : ( + tag + ) + }, +) export default ResultTag diff --git a/web/oss/src/components/SelectLLMProvider/index.tsx b/web/oss/src/components/SelectLLMProvider/index.tsx index fea0e50330..57dd30c710 100644 --- a/web/oss/src/components/SelectLLMProvider/index.tsx +++ b/web/oss/src/components/SelectLLMProvider/index.tsx @@ -1,13 +1,13 @@ import {useMemo, useRef, useState} from "react" import {CaretRight, Plus, X} from "@phosphor-icons/react" -import {Select, Input, Button, Divider, InputRef, Popover} from "antd" +import {Button, Divider, Input, InputRef, Popover, Select, Tooltip, Typography} from "antd" import clsx from "clsx" import useLazyEffect from "@/oss/hooks/useLazyEffect" import {useVaultSecret} from "@/oss/hooks/useVaultSecret" import {capitalize} from "@/oss/lib/helpers/utils" -import {SecretDTOProvider, PROVIDER_LABELS} from "@/oss/lib/Types" +import {PROVIDER_LABELS, SecretDTOProvider} from "@/oss/lib/Types" import LLMIcons from "../LLMIcons" import Anthropic from "../LLMIcons/assets/Anthropic" @@ -25,6 +25,7 @@ interface ProviderOption { label: string value: string key?: string + metadata?: Record } interface ProviderGroup { @@ -169,6 +170,7 @@ const SelectLLMProvider = ({ label: resolvedLabel, value: resolvedValue, key: option?.key ?? resolvedValue, + metadata: option?.metadata, } }) .filter(Boolean) as ProviderOption[]) ?? [], @@ -208,6 +210,68 @@ const SelectLLMProvider = ({ setTimeout(() => setOpen(false), 0) } + const formatCost = (cost: number) => { + const value = Number(cost) + if (isNaN(value)) return "N/A" + return value < 0.01 ? value.toFixed(4) : value.toFixed(2) + } + + const renderTooltipContent = (metadata: Record) => ( +
+ {(metadata.input !== undefined || metadata.output !== undefined) && ( + <> +
+ + Input: + + + ${formatCost(metadata.input)} / 1M + +
+
+ + Output:{" "} + + + ${formatCost(metadata.output)} / 1M + +
+ + )} +
+ ) + + const renderOptionContent = (option: ProviderOption) => { + const Icon = getProviderIcon(option.value) || LLMIcons[option.label] + return ( +
+
+ {Icon && } + {option.label} +
+
+ ) + } + + const renderOption = (option: ProviderOption) => { + const content = renderOptionContent(option) + + if (option.metadata) { + return ( + + {content} + + ) + } + + return content + } + return ( <> + + + + + { + setRenameModalOpen(false) + setOrgToRename(null) + renameForm.resetFields() + }} + onOk={() => renameForm.submit()} + confirmLoading={renameMutation.isPending} + destroyOnHidden + centered + > +
{ + if (!orgToRename) return + renameMutation.mutate({ + organizationId: orgToRename, + name: values.name, + }) + }} + > + + + + +
+ + { + setTransferModalOpen(false) + setOrgToTransfer(null) + setNewOwnerId(null) + }} + onOk={() => { + console.log("🎯 Transfer modal OK clicked:", { + orgToTransfer, + newOwnerId, + newOwnerIdType: typeof newOwnerId, + }) + if (!orgToTransfer || !newOwnerId) { + console.warn("⚠️ Missing orgToTransfer or newOwnerId") + return + } + transferMutation.mutate({ + organizationId: orgToTransfer, + newOwnerId, + }) + }} + confirmLoading={transferMutation.isPending} + destroyOnHidden + centered + > +
+ + setNewPropertyName(e.target.value)} - placeholder="Property name" - size="middle" - className="flex-1" - autoFocus - onKeyDown={(e) => { - if (e.key === "Enter" && newPropertyName.trim()) { - addObjectProperty( - newPropertyName.trim(), - newPropertyType, - ) - setNewPropertyName("") - setNewPropertyType("string") - setShowAddProperty(false) - } else if (e.key === "Escape") { - setNewPropertyName("") - setNewPropertyType("string") - setShowAddProperty(false) - } - }} - /> - ({ - value: idx, - label: `Item ${idx + 1}: ${ - typeof arrItem === - "string" - ? arrItem.substring( - 0, - 50, - ) + - (arrItem.length > - 50 - ? "..." - : "") - : typeof arrItem === - "object" - ? JSON.stringify( - arrItem, - ).substring( - 0, - 50, - ) + "..." - : String( - arrItem, - ) - }`, - }), - )} - onSelect={(idx: number) => { - // Navigate into the field first, then into the array index - setCurrentPath([ - ...fullPath, - String(idx), - ]) - }} - dropdownRender={(menu) => ( -
-
- Click an item to - drill in -
- {menu} -
- )} - /> -
- {arrayItems.length} items • Use - "Drill In" or select - an item above to edit -
-
- ) - })() - ) : dataType === "messages" ? ( - - updateValueAtPath( - fullPath, - JSON.stringify(messages), - ) - } - showControls={isMessagesArray(item.value)} - /> - ) : dataType === "json-object" ? ( - - updateValueAtPath(fullPath, value) - } - /> - ) : ( - (() => { - const editorId = `drill-field-${fullPath.join("-")}` - const textValue = getTextModeValue( - item.value, - ) - return ( - - { - const storageValue = - textModeToStorageValue( - newValue, - item.value, - ) - updateValueAtPath( - fullPath, - storageValue, - ) - }} - placeholder={`Enter ${item.name}...`} - editorType="border" - className="overflow-hidden" - disableDebounce - noProvider - header={ - - } - /> - - ) - })() - )} -
- )} -
- ) - })} -
-
- ) : ( - // JSON mode - single JSON editor using derived value from formValues -
- -
- )} + ) : null + } + />
) diff --git a/web/oss/src/components/TestcasesTableNew/components/TestcaseEditDrawerContent.tsx b/web/oss/src/components/TestcasesTableNew/components/TestcaseEditDrawerContent.tsx index 351db11cd5..e92fb42f46 100644 --- a/web/oss/src/components/TestcasesTableNew/components/TestcaseEditDrawerContent.tsx +++ b/web/oss/src/components/TestcasesTableNew/components/TestcaseEditDrawerContent.tsx @@ -2,4 +2,4 @@ * Re-export TestcaseEditDrawerContent from the new modular location * This file is kept for backwards compatibility */ -export {default, type TestcaseEditDrawerContentRef} from "./TestcaseEditDrawer" +export {default, type TestcaseEditDrawerContentRef} from "./TestcaseEditDrawer/index" diff --git a/web/oss/src/components/TestcasesTableNew/components/TestcaseHeader.tsx b/web/oss/src/components/TestcasesTableNew/components/TestcaseHeader.tsx index 8b5b8ace7b..95022e5ea2 100644 --- a/web/oss/src/components/TestcasesTableNew/components/TestcaseHeader.tsx +++ b/web/oss/src/components/TestcasesTableNew/components/TestcaseHeader.tsx @@ -1,12 +1,15 @@ -import {useMemo} from "react" +import {useEffect, useMemo, useState, type CSSProperties} from "react" import {DownOutlined, MoreOutlined} from "@ant-design/icons" -import {Link, PencilSimple, Trash} from "@phosphor-icons/react" -import {Button, Dropdown, Popover, Tag, Tooltip, Typography} from "antd" +import {Export, Link, PencilSimple, Trash} from "@phosphor-icons/react" +import {Button, Dropdown, Popover, Space, Typography} from "antd" +import {useSetAtom} from "jotai" import {useRouter} from "next/router" import {TableDescription} from "@/oss/components/InfiniteVirtualTable" import {UserReference} from "@/oss/components/References/UserReference" +import type {ExportFileType} from "@/oss/services/testsets/api" +import {enableRevisionsListQueryAtom} from "@/oss/state/entities/testset" import type {RevisionListItem, TestsetMetadata} from "../hooks/types" @@ -22,10 +25,17 @@ export interface TestcaseHeaderProps { availableRevisions: RevisionListItem[] loadingRevisions: boolean isIdCopied: boolean + isRevisionSlugCopied: boolean revisionIdParam: string | undefined + /** Whether this is a new testset (not yet saved) - disables server-dependent features */ + isNewTestset?: boolean + /** Whether an export is currently in progress */ + isExporting?: boolean onCopyId: () => void + onCopyRevisionSlug: () => void onOpenRenameModal: () => void onDeleteRevision: () => void + onExport: (fileType: ExportFileType) => void projectURL: string } @@ -40,6 +50,24 @@ export interface TestcaseHeaderProps { * * @component */ +type CopyAction = "copy-id" | "copy-revision-slug" + +const COPY_ACTION_STORAGE_KEY = "testcase-header-last-copy-action" + +const dropdownTriggerStyle: CSSProperties = { + boxSizing: "border-box", + borderWidth: 1, + borderStyle: "solid", + borderColor: "var(--ant-color-border)", + borderInlineStartWidth: 0, + borderStartStartRadius: 0, + borderEndStartRadius: 0, + borderStartEndRadius: 6, + borderEndEndRadius: 6, + paddingInline: 8, + paddingBlock: 4, +} + export function TestcaseHeader(props: TestcaseHeaderProps) { const { testsetName, @@ -48,29 +76,109 @@ export function TestcaseHeader(props: TestcaseHeaderProps) { availableRevisions, loadingRevisions, isIdCopied, + isRevisionSlugCopied, + isNewTestset = false, + isExporting = false, onCopyId, + onCopyRevisionSlug, onOpenRenameModal, onDeleteRevision, + onExport, projectURL, } = props const router = useRouter() + const enableRevisionsListQuery = useSetAtom(enableRevisionsListQueryAtom) + + // Remember last selected copy action + const [lastCopyAction, setLastCopyAction] = useState("copy-id") + + // Track whether revisions have been requested (to distinguish "not loaded" from "loaded but empty") + const [revisionsRequested, setRevisionsRequested] = useState(false) + + // Enable revisions list query when dropdown is opened + const handleRevisionDropdownOpenChange = (open: boolean) => { + if (open && metadata?.testsetId && !revisionsRequested) { + enableRevisionsListQuery(metadata.testsetId) + setRevisionsRequested(true) + } + } + + // Enable revisions list query when actions dropdown is opened (needed for delete/redirect) + const handleActionsDropdownOpenChange = (open: boolean) => { + if (open && metadata?.testsetId && !revisionsRequested) { + enableRevisionsListQuery(metadata.testsetId) + setRevisionsRequested(true) + } + } + + // Load last copy action from localStorage + useEffect(() => { + const saved = localStorage.getItem(COPY_ACTION_STORAGE_KEY) as CopyAction | null + if (saved === "copy-id" || saved === "copy-revision-slug") { + setLastCopyAction(saved) + } + }, []) // Revision dropdown menu items - const revisionMenuItems = useMemo( - () => - buildRevisionMenuItems(availableRevisions, (revisionId) => - router.push(`${projectURL}/testsets/${revisionId}`, undefined, { - shallow: true, - }), - ) ?? [], - [availableRevisions, router, projectURL], - ) + const revisionMenuItems = useMemo(() => { + // If revisions haven't been requested yet, show a placeholder to keep dropdown enabled + if (!revisionsRequested && availableRevisions.length === 0) { + return [ + { + key: "loading-placeholder", + label: "Loading revisions...", + disabled: true, + }, + ] + } + + // If requested but still loading, show loading indicator + if (loadingRevisions && availableRevisions.length === 0) { + return [ + { + key: "loading", + label: "Loading...", + disabled: true, + }, + ] + } + + // Build menu items from available revisions + const items = buildRevisionMenuItems(availableRevisions, (revisionId) => + router.push(`${projectURL}/testsets/${revisionId}`, undefined, { + shallow: true, + }), + ) + + // If requested, loaded, but no revisions found, show empty state + if (revisionsRequested && !loadingRevisions && (!items || items.length === 0)) { + return [ + { + key: "no-revisions", + label: "No revisions found", + disabled: true, + }, + ] + } + + return items ?? [] + }, [availableRevisions, router, projectURL, revisionsRequested, loadingRevisions]) // Check if this is the only revision (disable delete if so) // v0 is not a valid revision, so we filter it out when counting const validRevisions = availableRevisions.filter((r) => r.version > 0) - const isOnlyRevision = validRevisions.length <= 1 + // Disable delete if: revisions not loaded yet, still loading, or only one revision + const isDeleteDisabled = !revisionsRequested || loadingRevisions || validRevisions.length <= 1 + + // Tooltip explaining why delete is disabled + const deleteDisabledReason = !revisionsRequested + ? "Loading revisions..." + : loadingRevisions + ? "Loading revisions..." + : validRevisions.length <= 1 + ? "Cannot delete the only revision" + : undefined // Header actions dropdown menu items const headerActionsMenuItems = useMemo( @@ -81,20 +189,100 @@ export function TestcaseHeader(props: TestcaseHeaderProps) { icon: , onClick: onOpenRenameModal, }, + { + type: "divider" as const, + }, + { + key: "export-csv", + label: isExporting ? "Exporting..." : "Export as CSV", + icon: , + onClick: () => onExport("csv"), + disabled: isExporting, + }, + { + key: "export-json", + label: isExporting ? "Exporting..." : "Export as JSON", + icon: , + onClick: () => onExport("json"), + disabled: isExporting, + }, + { + type: "divider" as const, + }, { key: "delete-revision", - label: "Delete revision", + label: loadingRevisions ? "Delete revision..." : "Delete revision", icon: , danger: true, - disabled: isOnlyRevision, + disabled: isDeleteDisabled, + title: deleteDisabledReason, onClick: onDeleteRevision, }, ], - [onOpenRenameModal, onDeleteRevision, isOnlyRevision], + [ + onOpenRenameModal, + onDeleteRevision, + isDeleteDisabled, + deleteDisabledReason, + onExport, + loadingRevisions, + isExporting, + ], ) - // Tooltip for ID copy - const tooltipTitle = isIdCopied ? "Copied!" : "Click to copy ID" + // Handler to execute copy action and remember it + const handleCopyAction = useMemo( + () => ({ + "copy-id": () => { + onCopyId() + setLastCopyAction("copy-id") + localStorage.setItem(COPY_ACTION_STORAGE_KEY, "copy-id") + }, + "copy-revision-slug": () => { + onCopyRevisionSlug() + setLastCopyAction("copy-revision-slug") + localStorage.setItem(COPY_ACTION_STORAGE_KEY, "copy-revision-slug") + }, + }), + [onCopyId, onCopyRevisionSlug], + ) + + // Copy dropdown menu items + const copyMenuItems = useMemo( + () => [ + { + key: "copy-id", + label: isIdCopied ? "Copied!" : "Copy ID", + onClick: handleCopyAction["copy-id"], + }, + { + key: "copy-revision-slug", + label: isRevisionSlugCopied ? "Copied!" : "Copy Revision Slug", + onClick: handleCopyAction["copy-revision-slug"], + disabled: !metadata?.revisionSlug, + }, + ], + [isIdCopied, isRevisionSlugCopied, handleCopyAction, metadata?.revisionSlug], + ) + + // Main button click executes last selected action + const handleMainButtonClick = () => { + // If last action was revision slug but it's not available, default to copy ID + if (lastCopyAction === "copy-revision-slug" && !metadata?.revisionSlug) { + handleCopyAction["copy-id"]() + } else { + handleCopyAction[lastCopyAction]() + } + } + + // Get label for main button based on last action + const mainButtonLabel = useMemo(() => { + // If last action was revision slug but it's not available, show ID + if (lastCopyAction === "copy-revision-slug" && metadata?.revisionSlug) { + return isRevisionSlugCopied ? "Copied!" : "Slug" + } + return isIdCopied ? "Copied!" : "ID" + }, [lastCopyAction, isIdCopied, isRevisionSlugCopied, metadata?.revisionSlug]) return (
@@ -108,74 +296,118 @@ export function TestcaseHeader(props: TestcaseHeaderProps) { style: {maxHeight: 400, overflowY: "auto"}, }} trigger={["click"]} - disabled={loadingRevisions || revisionMenuItems.length === 0} + onOpenChange={handleRevisionDropdownOpenChange} > - - + + +
{menu}
} + > + + + +
+
+ +
- - {metadata?.commitMessage && ( -
- - Commit Message - - {metadata.commitMessage} -
- )} - {metadata?.author && ( -
- - Author - - -
- )} - {metadata?.createdAt && ( -
- - Created - - - {new Date(metadata.createdAt).toLocaleString()} - -
- )} - {metadata?.updatedAt && ( -
- - Updated - - - {new Date(metadata.updatedAt).toLocaleString()} - -
- )} -
- } - > - - - {description || - "Specify column names similar to the Input parameters. A column with 'correct_answer' name will be treated as a ground truth column."} - - - + {/* Metadata popover - disabled for new testsets since server data doesn't exist yet */} + {isNewTestset ? ( + + {description || + "Specify column names similar to the Input parameters. A column with 'correct_answer' name will be treated as a ground truth column."} + + ) : ( + + {metadata?.testsetSlug && ( +
+ + Testset Slug + + {metadata.testsetSlug} +
+ )} + {metadata?.revisionSlug && ( +
+ + Revision Slug + + {metadata.revisionSlug} +
+ )} + {metadata?.commitMessage && ( +
+ + Commit Message + + {metadata.commitMessage} +
+ )} + {metadata?.author && ( +
+ + Author + + +
+ )} + {metadata?.createdAt && ( +
+ + Created + + + {new Date(metadata.createdAt).toLocaleString()} + +
+ )} + {metadata?.updatedAt && ( +
+ + Updated + + + {new Date(metadata.updatedAt).toLocaleString()} + +
+ )} +
+ } + > + + + {description || + "Specify column names similar to the Input parameters. A column with 'correct_answer' name will be treated as a ground truth column."} + + + + )}
) } diff --git a/web/oss/src/components/TestcasesTableNew/components/TestcaseModals.tsx b/web/oss/src/components/TestcasesTableNew/components/TestcaseModals.tsx index 8e20f381cc..0f2317ce84 100644 --- a/web/oss/src/components/TestcasesTableNew/components/TestcaseModals.tsx +++ b/web/oss/src/components/TestcasesTableNew/components/TestcaseModals.tsx @@ -1,4 +1,4 @@ -import {useState} from "react" +import {useEffect, useState} from "react" import {Input, Modal, Typography} from "antd" @@ -68,11 +68,13 @@ export function TestcaseModals(props: TestcaseModalsProps) { // Local state for add column modal const [newColumnName, setNewColumnName] = useState("") - // Sync initial values when modal opens - if (isRenameModalOpen && editModalName !== initialTestsetName) { - setEditModalName(initialTestsetName) - setEditModalDescription(initialDescription) - } + // Sync initial values when modal opens (only on open, not on every render) + useEffect(() => { + if (isRenameModalOpen) { + setEditModalName(initialTestsetName) + setEditModalDescription(initialDescription) + } + }, [isRenameModalOpen, initialTestsetName, initialDescription]) const handleRenameConfirm = () => { onRenameConfirm(editModalName, editModalDescription) @@ -155,6 +157,12 @@ export function TestcaseModals(props: TestcaseModalsProps) { onPressEnter={handleAddColumn} autoFocus /> + + Tip: Use dot notation to create nested columns. For example,{" "} + parent.child creates a{" "} + child column under the{" "} + parent group. +
diff --git a/web/oss/src/components/TestcasesTableNew/components/TestcaseSelectionCell.tsx b/web/oss/src/components/TestcasesTableNew/components/TestcaseSelectionCell.tsx index f10f6d37d7..7a15b9b4a2 100644 --- a/web/oss/src/components/TestcasesTableNew/components/TestcaseSelectionCell.tsx +++ b/web/oss/src/components/TestcasesTableNew/components/TestcaseSelectionCell.tsx @@ -1,34 +1,57 @@ -import {memo} from "react" +import {memo, useMemo} from "react" import {useAtomValue} from "jotai" -import {testcaseIsDirtyAtom} from "@/oss/state/entities/testcase/dirtyState" +import {testcase} from "@/oss/state/entities/testcase" interface TestcaseSelectionCellProps { testcaseId: string | undefined rowIndex: number originNode: React.ReactNode + mode?: "edit" | "view" } /** * Custom selection cell that shows row index on hover via title attribute - * Also shows dirty indicator for rows with unsaved changes + * Also shows dirty indicator for rows with unsaved changes via background color + * + * Uses testcaseIsDirtyAtomFamily for reactive dirty state detection: + * - Checks draft state vs server state + * - Accounts for pending column changes (renames/deletes/adds) + * - New rows are always considered dirty + * * Uses native title instead of Tooltip for better scroll performance */ const TestcaseSelectionCell = memo(function TestcaseSelectionCell({ testcaseId, rowIndex, originNode, + mode = "edit", }: TestcaseSelectionCellProps) { - // Check if testcase has unsaved changes - const isDirty = useAtomValue(testcaseIsDirtyAtom(testcaseId || "")) + // Check if testcase has unsaved changes using controller selector + // This includes both draft edits AND pending column changes + const isDirtyAtom = useMemo(() => testcase.selectors.isDirty(testcaseId || ""), [testcaseId]) + // Always call useAtomValue (hooks must be unconditional), then check mode + const isDirtyValue = useAtomValue(isDirtyAtom) + const isDirty = mode === "edit" ? isDirtyValue : false + + // New rows (not yet saved) are always dirty + const isNewRow = testcaseId?.startsWith("new-") || testcaseId?.startsWith("local-") || false + + const showDirtyIndicator = mode === "edit" && (isDirty || isNewRow) // Build tooltip title - always show row number, add dirty indicator if needed - const tooltipTitle = isDirty ? `Row ${rowIndex + 1} (unsaved changes)` : `Row ${rowIndex + 1}` + const tooltipTitle = showDirtyIndicator + ? `Row ${rowIndex + 1} (unsaved changes)` + : `Row ${rowIndex + 1}` return ( -
- {originNode} +
+
{originNode}
) }) diff --git a/web/oss/src/components/TestcasesTableNew/components/TestcasesTableShell.tsx b/web/oss/src/components/TestcasesTableNew/components/TestcasesTableShell.tsx index 241a1136f5..f4d580eb8d 100644 --- a/web/oss/src/components/TestcasesTableNew/components/TestcasesTableShell.tsx +++ b/web/oss/src/components/TestcasesTableNew/components/TestcasesTableShell.tsx @@ -1,20 +1,20 @@ import {useCallback, useMemo, useState} from "react" -import {CaretDown, CaretRight} from "@phosphor-icons/react" -import {PencilSimple, Trash} from "@phosphor-icons/react" -import {Input, Skeleton, Typography} from "antd" +import {MoreOutlined, PlusOutlined} from "@ant-design/icons" +import {CaretDown, CaretRight, Copy, PencilSimple, Trash} from "@phosphor-icons/react" +import {Button, Dropdown, Input, Skeleton, Tooltip} from "antd" +import type {MenuProps} from "antd" import type {ColumnType, ColumnsType} from "antd/es/table" import clsx from "clsx" import {getDefaultStore} from "jotai/vanilla" import { - ColumnVisibilityHeader, - createStandardColumns, + ColumnVisibilityMenuTrigger, InfiniteVirtualTableFeatureShell, type TableScopeConfig, } from "@/oss/components/InfiniteVirtualTable" +import {copyToClipboard} from "@/oss/lib/helpers/copyToClipboard" import type {Column} from "@/oss/state/entities/testcase/columnState" -import {testcaseIsDirtyAtom} from "@/oss/state/entities/testcase/dirtyState" import {message} from "../../AppMessageContext" import {testcasesDatasetStore, type TestcaseTableRow} from "../atoms/tableStore" @@ -37,6 +37,7 @@ export interface TestcasesTableShellProps { size: "small" | "medium" | "large" heightPx: number maxLines: number + menuItems: MenuProps["items"] } selectedRowKeys: React.Key[] onSelectedRowKeysChange: (keys: React.Key[]) => void @@ -56,6 +57,8 @@ export interface TestcasesTableShellProps { scopeIdPrefix?: string /** Maximum number of rows to display (for preview mode) */ maxRows?: number + /** Callback when add column button is clicked (shown in actions column header) */ + onAddColumn?: () => void } /** @@ -93,6 +96,7 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { showRowIndex = false, scopeIdPrefix = "testcases", maxRows, + onAddColumn, } = props // Collapsed groups state (using useState for simplicity - persists only during session) @@ -116,8 +120,7 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { pageSize: maxRows ?? 50, // Use maxRows if provided, otherwise default to 50 enableInfiniteScroll: !maxRows, // Disable infinite scroll when maxRows is set columnVisibilityStorageKey: "testcases:columns", - // Increase exit debounce to prevent infinite loop on scroll-stop-scroll pattern - viewportExitDebounceMs: 300, + viewportTrackingEnabled: true, }), [scopeIdPrefix, revisionIdParam, maxRows], ) @@ -137,23 +140,7 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { columnTitle: showRowIndex ? ( # ) : undefined, - onCell: (record: TestcaseTableRow) => { - // Check if testcase has unsaved changes (for dirty indicator) - const recordKey = String(record.key || record.id) - const isNewRow = - recordKey.startsWith("new-") || recordKey.startsWith("local-") - if (record.id) { - const isDirty = - isNewRow || globalStore.get(testcaseIsDirtyAtom(record.id)) - if (isDirty) { - return { - // Use inline style to override hover styles - style: {backgroundColor: "rgb(255 251 235)"}, // amber-50 - } - } - } - return {} - }, + // Dirty indicator background is now handled reactively in TestcaseSelectionCell renderCell: ( _value: boolean, record: TestcaseTableRow, @@ -164,6 +151,7 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { {index + 1} } @@ -172,12 +160,20 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { ), } : undefined, - [enableSelection, selectedRowKeys, onSelectedRowKeysChange, globalStore, showRowIndex], + [ + enableSelection, + selectedRowKeys, + onSelectedRowKeysChange, + globalStore, + showRowIndex, + mode, + ], ) // Max lines from row height config (already computed by useRowHeight) @@ -193,15 +189,69 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { [], ) + // Empty state columns - shown when user has no data (not loading) + const emptyStateColumns = useMemo(() => [{key: "__empty", name: "No columns yet"}], []) + + // Handle group column rename - renames all nested columns + const handleGroupRename = useCallback( + (groupPath: string, newName: string): boolean => { + // Get all columns that belong to this group + const columnsInGroup = table.columns.filter((col) => + col.key.startsWith(groupPath + "."), + ) + + if (columnsInGroup.length === 0) return false + + // Rename each nested column + let allSucceeded = true + columnsInGroup.forEach((col) => { + // Replace the group prefix with the new group name + const relativePath = col.key.substring(groupPath.length + 1) + const newColumnName = `${newName}.${relativePath}` + const success = table.renameColumn(col.key, newColumnName) + if (!success) allSucceeded = false + }) + + return allSucceeded + }, + [table], + ) + + // Handle group column delete - deletes all nested columns + const handleGroupDelete = useCallback( + (groupPath: string) => { + // Get all columns that belong to this group + const columnsInGroup = table.columns.filter((col) => + col.key.startsWith(groupPath + "."), + ) + + // Delete each nested column + columnsInGroup.forEach((col) => { + table.deleteColumn(col.key) + }) + }, + [table], + ) + // Columns definition // Use TestcaseCell for entity-aware rendering (reads from entity atoms in global store) // Supports grouped columns (e.g., "group.column" renders under "group" header) const columns = useMemo>(() => { const isEditable = mode === "edit" - // Use skeleton columns if actual columns are empty (loading state) - const columnsToRender = table.columns.length > 0 ? table.columns : skeletonColumns - const isShowingSkeleton = table.columns.length === 0 + // Differentiate between loading state and empty state + const hasNoColumns = table.columns.length === 0 + const isActuallyLoading = table.isLoading + + // Use skeleton columns only when loading, empty state columns when truly empty + const columnsToRender = hasNoColumns + ? isActuallyLoading + ? skeletonColumns + : emptyStateColumns + : table.columns + + const isShowingSkeleton = hasNoColumns && isActuallyLoading + const isShowingEmpty = hasNoColumns && !isActuallyLoading // Create column definition for a single column // Wrap title with ColumnVisibilityHeader to enable viewport tracking @@ -211,22 +261,19 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { ): ColumnType => ({ key: col.key, dataIndex: col.key, - title: ( - - {isEditable && !isShowingSkeleton ? ( - - ) : ( - - {displayName} - - )} - - ), + title: + isEditable && !isShowingSkeleton && !isShowingEmpty ? ( + + ) : ( + + {displayName} + + ), width: 200, render: (value: unknown, record: TestcaseTableRow) => { // Show skeleton for skeleton rows or when showing skeleton columns @@ -242,12 +289,21 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { /> ) } + // Show empty state message when no columns exist (not loading) + if (isShowingEmpty) { + return ( +
+ Add a column to get started +
+ ) + } // For rows with id (both new and server rows), use entity-aware cell // This ensures column renames are reflected correctly const rowId = record.id || String(record.key) if (rowId) { return ( => ({ - key: groupName, - dataIndex: groupName, - title: ( - -
toggleGroupCollapse(groupName)} - > - - {groupName} + ): ColumnType => { + const displayName = groupPath.includes(".") + ? groupPath.substring(groupPath.lastIndexOf(".") + 1) + : groupPath + + return { + key: groupPath, + dataIndex: groupPath, + title: ( +
+ { + e.stopPropagation() + e.preventDefault() + toggleGroupCollapse(groupPath) + }} + > + + +
+ handleGroupRename(groupPath, newName)} // prettier-ignore + onDelete={() => handleGroupDelete(groupPath)} + disabled={!isEditable} + inlineActionsMinWidth={80} + /> +
- - ), - width: 200, - render: (_value: unknown, record: TestcaseTableRow) => { - if (record.__isSkeleton || isShowingSkeleton) { - const skeletonHeight = Math.max(24, rowHeight.heightPx - 32) - return ( - - ) - } - const rowId = record.id || String(record.key) - if (rowId) { - // Show the parent column (full JSON object) - return ( - - ) - } - return null - }, - }) + ), + width: 200, + render: (_value: unknown, record: TestcaseTableRow) => { + if (record.__isSkeleton || isShowingSkeleton) { + const skeletonHeight = Math.max(24, rowHeight.heightPx - 32) + return ( + + ) + } + if (isShowingEmpty) { + return ( +
+ Add a column to get started +
+ ) + } + const rowId = record.id || String(record.key) + if (rowId) { + // Show the parent column (full JSON object) + return ( + + ) + } + return null + }, + } + } - // Render group header with collapse/expand icon + // Render group header with collapse/expand icon and editable controls // groupPath is the full path (e.g., "current_rfp.event"), display only the last segment // Wrapped with ColumnVisibilityHeader for viewport tracking const renderGroupHeader = (groupPath: string, isCollapsed: boolean, childCount: number) => { @@ -315,9 +399,9 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { : groupPath return ( - -
+ { e.stopPropagation() e.preventDefault() @@ -331,15 +415,21 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { toggleGroupCollapse(groupPath) } }} - title={groupPath} > - - {isCollapsed ? : } - - {displayName} - ({childCount}) + {isCollapsed ? : } + +
+ handleGroupRename(groupPath, newName)} // prettier-ignore + onDelete={() => handleGroupDelete(groupPath)} + disabled={!isEditable} + inlineActionsMinWidth={80} + />
- + ({childCount}) +
) } @@ -359,40 +449,97 @@ export function TestcasesTableShell(props: TestcasesTableShellProps) { return [...dataColumns] } - const actionsColumn = createStandardColumns([ + // Custom actions column with Add Column button in header + const actionsColumn: ColumnsType = [ { - type: "actions", + title: ( +
+ {onAddColumn && mode === "edit" && ( + +
+ ), + key: "__ui_actions__", // Use reserved key to avoid conflict with user data columns width: 56, - showCopyId: true, - items: [ - { - key: "edit", - label: "Edit", - icon: , - onClick: (record) => { - if (record.id) onRowClick(record) + fixed: "right", + align: "center", + columnVisibilityLocked: true as any, + exportEnabled: false as any, // Exclude from client-side CSV export + render: (_, record) => { + if (record.__isSkeleton || isShowingSkeleton) return null + + const menuItems: any[] = [ + { + key: "edit", + label: "Edit", + icon: , + onClick: (e: any) => { + e.domEvent.stopPropagation() + if (record.id) onRowClick(record) + }, }, - }, - {type: "divider"}, - { - key: "delete", - label: "Delete", - icon: , - danger: true, - onClick: (record) => { - if (record.key) { - table.deleteTestcases([String(record.key)]) - message.success("Deleted testcase. Save to apply changes.") - } + {type: "divider"}, + { + key: "delete", + label: "Delete", + icon: , + danger: true, + onClick: (e: any) => { + e.domEvent.stopPropagation() + if (record.key) { + table.deleteTestcases([String(record.key)]) + message.success("Deleted testcase. Save to apply changes.") + } + }, }, - }, - ], + ] + + // Add copy ID + const recordId = (record as any).id || (record as any).key + if (recordId) { + menuItems.push({type: "divider"}) + menuItems.push({ + key: "copy-id", + label: "Copy ID", + icon: , + onClick: (e: any) => { + e.domEvent.stopPropagation() + copyToClipboard(String(recordId)) + }, + }) + } + + return ( + + +
) } diff --git a/web/oss/src/components/TestcasesTableNew/utils/groupColumns.ts b/web/oss/src/components/TestcasesTableNew/utils/groupColumns.ts index c8883ad063..c92737292b 100644 --- a/web/oss/src/components/TestcasesTableNew/utils/groupColumns.ts +++ b/web/oss/src/components/TestcasesTableNew/utils/groupColumns.ts @@ -72,10 +72,21 @@ export function getLeafColumnName(key: string): string { return key.substring(lastDotIndex + 1) } +/** + * Check if a column is an expanded column (came from object expansion) + * Expanded columns have parentKey property set + */ +function isExpandedColumn(col: Column): boolean { + return "parentKey" in col && typeof (col as any).parentKey === "string" +} + /** * Recursively group columns into nested structure * This handles deeply nested paths like "a.b.c.d" by creating nested group headers * Respects maxDepth to limit nesting for performance + * + * IMPORTANT: Only groups columns that came from object expansion (have parentKey). + * Columns with dots in their flat key names (e.g., "agents.md") are NOT grouped. */ function groupColumnsRecursive( columns: Column[], @@ -98,6 +109,17 @@ function groupColumnsRecursive( // First pass: categorize columns into groups or standalone (leaf columns) columns.forEach((col) => { + // Only group columns that came from object expansion (have parentKey) + // Flat columns with dots in their names (e.g., "agents.md") should NOT be grouped + if (!isExpandedColumn(col) && currentDepth === 0) { + // Top-level flat column - render as-is, even if it has dots + result.push({ + ...createColumnDef(col, col.name), + __order: orderCounter++, + } as ColumnType & {__order: number}) + return + } + // Get the relative key (remove parent path prefix if present) const relativeKey = parentPath ? col.key.substring(parentPath.length + 1) : col.key const parsed = parseGroupedColumnKey(relativeKey) @@ -115,7 +137,8 @@ function groupColumnsRecursive( } } else { // Leaf column - no more dots in relative key - const displayName = getLeafColumnName(col.key) + // Use relativeKey as display name (it's already the name relative to parent group) + const displayName = relativeKey result.push({ ...createColumnDef(col, displayName), __order: orderCounter++, diff --git a/web/oss/src/components/TestsetsTable/TestsetsTable.tsx b/web/oss/src/components/TestsetsTable/TestsetsTable.tsx index 163aa09eee..4658cd2b89 100644 --- a/web/oss/src/components/TestsetsTable/TestsetsTable.tsx +++ b/web/oss/src/components/TestsetsTable/TestsetsTable.tsx @@ -6,10 +6,18 @@ import { PlusOutlined, LoadingOutlined, } from "@ant-design/icons" -import {Copy, Eye, Note, PencilSimple, Trash} from "@phosphor-icons/react" -import {Button, Modal, Tag, Typography} from "antd" +import { + CaretDown, + Copy, + DownloadSimple, + Eye, + Note, + PencilSimple, + Trash, +} from "@phosphor-icons/react" +import {Button, Dropdown, Modal, Space, Tag, Typography} from "antd" import clsx from "clsx" -import {useSetAtom} from "jotai" +import {useAtom, useAtomValue, useSetAtom} from "jotai" import dynamic from "next/dynamic" import { @@ -19,16 +27,18 @@ import { createStandardColumns, TableDescription, } from "@/oss/components/InfiniteVirtualTable" -import {fetchTestsetRevisions} from "@/oss/components/TestsetsTable/atoms/fetchTestsetRevisions" -import { - testsetsDatasetStore, - testsetsRefreshTriggerAtom, - type TestsetTableRow, -} from "@/oss/components/TestsetsTable/atoms/tableStore" +import CommitMessageCell from "@/oss/components/TestsetsTable/components/CommitMessageCell" import TestsetsHeaderFilters from "@/oss/components/TestsetsTable/components/TestsetsHeaderFilters" import useURL from "@/oss/hooks/useURL" import type {TestsetCreationMode} from "@/oss/lib/Types" -import {archiveTestsetRevision} from "@/oss/services/testsets/api" +import { + archiveTestsetRevision, + downloadTestset, + downloadRevision, + type ExportFileType, +} from "@/oss/services/testsets/api" +import {fetchRevisionsList, testset, type TestsetTableRow} from "@/oss/state/entities/testset" +import {projectIdAtom} from "@/oss/state/project" import {message} from "../AppMessageContext" @@ -78,9 +88,10 @@ const TestsetsTable = ({ selectedRevisionId, }: TestsetsTableProps) => { const {projectURL} = useURL() + const projectId = useAtomValue(projectIdAtom) // Refresh trigger for the table - const setRefreshTrigger = useSetAtom(testsetsRefreshTriggerAtom) + const setRefreshTrigger = useSetAtom(testset.paginated.refreshAtom) // Modal state const [isCreateTestsetModalOpen, setIsCreateTestsetModalOpen] = useState(false) @@ -92,7 +103,7 @@ const TestsetsTable = ({ // Refresh table data const mutate = useCallback(() => { - setRefreshTrigger((prev) => prev + 1) + setRefreshTrigger() }, [setRefreshTrigger]) // Track expanded rows and their loaded children @@ -154,7 +165,15 @@ const TestsetsTable = ({ // Fallback: fetch revisions to find latest try { - const revisions = await fetchTestsetRevisions({testsetId: record.id}) + if (!projectId) return + const response = await fetchRevisionsList({ + projectId, + testsetId: record.id, + }) + // Filter out v0 revisions - they are placeholders + const revisions = response.testset_revisions.filter( + (r: any) => r.version !== 0 && r.version !== "0", + ) if (revisions.length > 0) { const latestRevision = revisions[0] const numericVersion = @@ -189,7 +208,12 @@ const TestsetsTable = ({ // Otherwise, fetch revisions to get the latest one try { - const revisions = await fetchTestsetRevisions({testsetId: record.id}) + if (!projectId) return + const response = await fetchRevisionsList({projectId, testsetId: record.id}) + // Filter out v0 revisions - they are placeholders + const revisions = response.testset_revisions.filter( + (r: any) => r.version !== 0 && r.version !== "0", + ) if (revisions.length > 0) { // Navigate to the first revision (latest) window.location.href = `${projectURL}/testsets/${revisions[0].id}` @@ -273,9 +297,66 @@ const TestsetsTable = ({ [childrenCache], ) + // State for tracking which row is being exported + const [exportingRowKey, setExportingRowKey] = useState(null) + + // Export format preference (persisted in localStorage) + const [exportFormat, setExportFormat] = useAtom(testset.filters.exportFormat) + + // Handler to export a testset or revision using the backend endpoint + const handleExportTestset = useCallback( + async (record: TestsetTableRow, format: ExportFileType) => { + const isRevision = (record as any).__isRevision + const version = (record as any).__version + const sanitizedName = record.name.replace(/[^a-zA-Z0-9-_]/g, "-") + const exportKey = `export-${record.key}` + + setExportingRowKey(record.key) + // Show immediate feedback that action was triggered + message.info(`Starting ${format.toUpperCase()} export for "${record.name}"...`) + // Show persistent loading message + message.loading({ + content: "Preparing export. This may take a moment for large testsets...", + key: exportKey, + duration: 0, // Don't auto-dismiss + }) + + try { + if (isRevision) { + // For revision rows, download the specific revision + const filename = `${sanitizedName}-v${version}.${format}` + await downloadRevision(record.id, format, filename) + message.success({ + content: `Revision v${version} exported as ${format.toUpperCase()}`, + key: exportKey, + }) + } else { + // For testset rows, download the latest revision + const filename = `${sanitizedName}.${format}` + await downloadTestset(record.id, format, filename) + message.success({ + content: `Testset exported as ${format.toUpperCase()}`, + key: exportKey, + }) + } + // Update format preference when user explicitly chooses a format + setExportFormat(format) + } catch (error) { + console.error("[TestsetsTable] Failed to export:", error) + message.error({ + content: "Failed to export", + key: exportKey, + }) + } finally { + setExportingRowKey(null) + } + }, + [setExportFormat], + ) + // Table manager - consolidates pagination, selection, row handlers, export, delete buttons const table = useTableManager({ - datasetStore: testsetsDatasetStore, + datasetStore: testset.paginated.store, scopeId, pageSize: 50, rowHeight: 48, @@ -307,6 +388,30 @@ const TestsetsTable = ({ return table.rows.map(addChildren) }, [table.rows, childrenCache]) + // Custom getSelectedRecords that includes both testsets and revisions from childrenCache + const getSelectedRecords = useCallback(() => { + const selectedKeys = table.selectedRowKeys + const records: TestsetTableRow[] = [] + + // Check main rows (testsets) + for (const row of table.rows) { + if (selectedKeys.includes(row.key)) { + records.push(row) + } + } + + // Check children (revisions) from cache + for (const children of childrenCache.values()) { + for (const child of children) { + if (selectedKeys.includes(child.key)) { + records.push(child) + } + } + } + + return records + }, [table.selectedRowKeys, table.rows, childrenCache]) + // Tree expand handler - fetch revisions as children const handleExpand = useCallback( async (expanded: boolean, record: TestsetTableRow) => { @@ -326,7 +431,12 @@ const TestsetsTable = ({ setLoadingRows((prev) => new Set(prev).add(rowKey)) try { // Fetch revisions directly for this testset (skip variants) - const revisions = await fetchTestsetRevisions({testsetId: record.id}) + if (!projectId) return + const response = await fetchRevisionsList({projectId, testsetId: record.id}) + // Filter out v0 revisions - they are placeholders and should not be displayed + const revisions = response.testset_revisions.filter( + (r: any) => r.version !== 0 && r.version !== "0", + ) const childRows: TestsetTableRow[] = revisions.map((revision: any) => ({ key: `${record.id}-${revision.id}`, id: revision.id, @@ -368,7 +478,7 @@ const TestsetsTable = ({ key: "name", title: "Name", width: 300, - fixed: "left", + columnVisibilityLocked: true, render: (_value, record) => { const isRevision = (record as any).__isRevision const isExpanded = expandedRowKeys.includes(record.key) @@ -379,10 +489,15 @@ const TestsetsTable = ({ if (isRevision) { const version = (record as any).__version return ( -
- {record.name} +
+ + {record.name} + {version !== null && version !== undefined && ( - + v{version} )} @@ -392,10 +507,10 @@ const TestsetsTable = ({ // Testset rows (parent) - show expand icon return ( -
+
{!isSkeleton && ( { e.stopPropagation() handleExpand(!isExpanded, record) @@ -410,7 +525,9 @@ const TestsetsTable = ({ )} )} - {record.name} + + {record.name} +
) }, @@ -422,26 +539,41 @@ const TestsetsTable = ({ width: 250, render: (_value, record) => { const isRevision = (record as any).__isRevision + + // For testset rows: use CommitMessageCell to read from atom family + if (!isRevision) { + return + } + + // For revision rows: use __commitMessage from the fetched child data const commitMessage = (record as any).__commitMessage - // Only show commit message for revisions with user-provided messages - // Filter out auto-generated messages that start with "Updated testset:" + // Filter out auto-generated messages const isAutoGenerated = commitMessage?.startsWith("Updated testset:") || - commitMessage?.startsWith("Patched testset") - if (!isRevision || !commitMessage || isAutoGenerated) { + commitMessage?.startsWith("Patched testset") || + commitMessage?.startsWith("Initial commit") + if (!commitMessage || isAutoGenerated) { return } return ( - + {commitMessage} ) }, }, - {type: "date", key: "created_at", title: "Date Created"}, - {type: "user", key: "created_by_id", title: "Created by"}, + { + type: "date", + key: "created_at", + title: "Date Created", + }, + { + type: "user", + key: "created_by_id", + title: "Created by", + }, { type: "actions", width: 48, @@ -502,20 +634,38 @@ const TestsetsTable = ({ onClick: handleDeleteRevision, hidden: (record) => isSelectMode || !(record as any).__isRevision, }, + // Export actions (available for both testsets and revisions in manage mode) + { + type: "divider", + hidden: () => !isManageMode, + }, + { + key: "export-csv", + label: "Export as CSV", + icon: , + onClick: (record) => handleExportTestset(record, "csv"), + hidden: () => !isManageMode || Boolean(exportingRowKey), + }, + { + key: "export-json", + label: "Export as JSON", + icon: , + onClick: (record) => handleExportTestset(record, "json"), + hidden: () => !isManageMode || Boolean(exportingRowKey), + }, ], - onExportRow: isManageMode ? table.handleExportRow : undefined, - isExporting: isManageMode ? Boolean(table.rowExportingKey) : false, getRecordId: (record) => record.id, }, ]), [ actions, - table.handleExportRow, - table.rowExportingKey, + handleExportTestset, + exportingRowKey, expandedRowKeys, loadingRows, handleExpand, handleDeleteRevision, + isManageMode, ], ) @@ -551,6 +701,57 @@ const TestsetsTable = ({ [actions.handleCreate], ) + // Smart export button with dropdown - remembers last used format + const renderExportButton = useCallback( + ({onExport, loading}: {onExport: () => void; loading: boolean}) => { + // Use custom getSelectedRecords that includes both testsets and revisions + const selectedRecords = getSelectedRecords() + const disabled = !selectedRecords.length + + const handleExport = async (format: ExportFileType) => { + // Export all selected records (testsets and/or revisions) + for (const record of selectedRecords) { + await handleExportTestset(record, format) + } + // Update preference + setExportFormat(format) + } + + const menuItems = [ + { + key: "csv", + label: "Export as CSV", + icon: , + onClick: () => handleExport("csv"), + }, + { + key: "json", + label: "Export as JSON", + icon: , + onClick: () => handleExport("json"), + }, + ] + + // Smart button: clicking the main button uses the last format, dropdown allows choosing + return ( + + + + + {message.type === "error" && ( + + )} + +
+ ) +} + +export default EmailFirst diff --git a/web/oss/src/components/pages/auth/EmailPasswordAuth/index.tsx b/web/oss/src/components/pages/auth/EmailPasswordAuth/index.tsx index 00a26c4c3d..a38e5e9589 100644 --- a/web/oss/src/components/pages/auth/EmailPasswordAuth/index.tsx +++ b/web/oss/src/components/pages/auth/EmailPasswordAuth/index.tsx @@ -13,6 +13,7 @@ const EmailPasswordAuth = ({ setMessage, authErrorMsg, initialEmail, + lockEmail = false, }: EmailPasswordAuthProps) => { const {handleAuthSuccess} = usePostAuthRedirect() const [form, setForm] = useState({email: initialEmail || "", password: ""}) @@ -70,7 +71,7 @@ const EmailPasswordAuth = ({ > @@ -80,12 +81,14 @@ const EmailPasswordAuth = ({ value={form.email} placeholder="Enter valid email address" status={message.type === "error" ? "error" : ""} + disabled={lockEmail} + className={lockEmail ? "auth-locked-input" : undefined} onChange={(e) => setForm({...form, email: e.target.value})} /> @@ -106,7 +109,7 @@ const EmailPasswordAuth = ({ className="w-full" loading={isLoading} > - Sign in + Continue with password {message.type == "error" && ( diff --git a/web/oss/src/components/pages/auth/EmailPasswordSignIn/index.tsx b/web/oss/src/components/pages/auth/EmailPasswordSignIn/index.tsx new file mode 100644 index 0000000000..72a781a20a --- /dev/null +++ b/web/oss/src/components/pages/auth/EmailPasswordSignIn/index.tsx @@ -0,0 +1,146 @@ +import {useState} from "react" + +import {Button, Form, FormProps, Input} from "antd" +import {signIn, signUp} from "supertokens-auth-react/recipe/emailpassword" + +import usePostAuthRedirect from "@/oss/hooks/usePostAuthRedirect" + +import ShowErrorMessage from "../assets/ShowErrorMessage" +import {EmailPasswordAuthProps} from "../assets/types" + +const EmailPasswordSignIn = ({ + message, + setMessage, + authErrorMsg, + initialEmail, + lockEmail = false, +}: EmailPasswordAuthProps) => { + const {handleAuthSuccess} = usePostAuthRedirect() + const [form, setForm] = useState({email: initialEmail || "", password: ""}) + const [isLoading, setIsLoading] = useState(false) + + const signInClicked: FormProps<{email: string; password: string}>["onFinish"] = async ( + values, + ) => { + try { + setIsLoading(true) + const response = await signIn({ + formFields: [ + {id: "email", value: values.email}, + {id: "password", value: values.password}, + ], + }) + + if (response.status === "FIELD_ERROR") { + response.formFields.forEach((res) => { + setMessage({message: res.error, type: "error"}) + }) + } else if (response.status === "WRONG_CREDENTIALS_ERROR") { + try { + const signUpResponse = await signUp({ + formFields: [ + {id: "email", value: values.email}, + {id: "password", value: values.password}, + ], + }) + if (signUpResponse.status === "FIELD_ERROR") { + const emailExists = signUpResponse.formFields.some((res) => + res.error.toLowerCase().includes("already exists"), + ) + setMessage({ + message: emailExists + ? "Invalid email or password" + : signUpResponse.formFields[0]?.error || "Unable to sign up", + type: "error", + }) + return + } + if (signUpResponse.status === "SIGN_UP_NOT_ALLOWED") { + setMessage({ + message: + "You need to be invited by the organization owner to gain access.", + type: "error", + }) + return + } + setMessage({message: "Verification successful", type: "success"}) + const {createdNewRecipeUser, user} = signUpResponse as { + createdNewRecipeUser?: boolean + user?: {loginMethods?: unknown[]} + } + await handleAuthSuccess({createdNewRecipeUser, user}) + } catch (signUpError) { + authErrorMsg(signUpError) + } + } else { + setMessage({message: "Verification successful", type: "success"}) + const {createdNewRecipeUser, user} = response as { + createdNewRecipeUser?: boolean + user?: {loginMethods?: unknown[]} + } + await handleAuthSuccess({createdNewRecipeUser, user}) + } + } catch (error) { + authErrorMsg(error) + } finally { + setIsLoading(false) + } + } + + return ( +
+
+ + setForm({...form, email: e.target.value})} + /> + + + setForm({...form, password: e.target.value})} + /> + + + + {message.type == "error" && ( + + )} + +
+ ) +} + +export default EmailPasswordSignIn diff --git a/web/oss/src/components/pages/auth/PasswordlessAuth/index.tsx b/web/oss/src/components/pages/auth/PasswordlessAuth/index.tsx index a461a6137b..1a48e81e0f 100644 --- a/web/oss/src/components/pages/auth/PasswordlessAuth/index.tsx +++ b/web/oss/src/components/pages/auth/PasswordlessAuth/index.tsx @@ -15,6 +15,7 @@ const PasswordlessAuth = ({ authErrorMsg, setIsLoginCodeVisible, disabled, + lockEmail = false, }: PasswordlessAuthProps) => { const sendOTP: FormProps<{email: string}>["onFinish"] = async (values) => { try { @@ -51,6 +52,8 @@ const PasswordlessAuth = ({ type="email" value={email} placeholder="Enter valid email address" + disabled={lockEmail} + className={lockEmail ? "auth-locked-input" : undefined} onChange={(e) => setEmail(e.target.value)} />
@@ -63,7 +66,7 @@ const PasswordlessAuth = ({ loading={isLoading} disabled={disabled} > - Continue with email + Continue with OTP ) diff --git a/web/oss/src/components/pages/auth/SendOTP/index.tsx b/web/oss/src/components/pages/auth/SendOTP/index.tsx index b66c99bb12..fea279b6e4 100644 --- a/web/oss/src/components/pages/auth/SendOTP/index.tsx +++ b/web/oss/src/components/pages/auth/SendOTP/index.tsx @@ -16,7 +16,7 @@ import usePostAuthRedirect from "@/oss/hooks/usePostAuthRedirect" import {useStyles} from "../assets/style" import {SendOTPProps} from "../assets/types" -const {Text, Title} = Typography +const {Text} = Typography const SendOTP = ({ message, @@ -118,26 +118,34 @@ const SendOTP = ({ } return ( -
-
- - Verify your email - - - A 6 digit code has been sent to{" "} - {email} The code is valid for next 15 - minutes. - -
- -
+
+ {message.type == "error" && } + + + + - Next + Continue with OTP -
- {isResendDisabled ? ( -
- - Check your email for the new code - - - Please wait to request new code (60s) - -
- ) : ( - - Didn’t receive the code?{" "} - - - )} - +
+ + {isResendDisabled && ( + + Please wait to request new code (60s) + + )}
) diff --git a/web/oss/src/components/pages/auth/SocialAuth/index.tsx b/web/oss/src/components/pages/auth/SocialAuth/index.tsx index 94f0838b43..e0893b8acc 100644 --- a/web/oss/src/components/pages/auth/SocialAuth/index.tsx +++ b/web/oss/src/components/pages/auth/SocialAuth/index.tsx @@ -1,7 +1,6 @@ import {useRef} from "react" -import {GithubOutlined, GoogleOutlined} from "@ant-design/icons" -import {Button, Divider} from "antd" +import {Button} from "antd" import {useRouter} from "next/router" import {getAuthorisationURLWithQueryParamsAndSetState} from "supertokens-auth-react/recipe/thirdparty" @@ -9,21 +8,27 @@ import {getEnv} from "@/oss/lib/helpers/dynamicEnv" import {SocialAuthProps} from "../assets/types" -const SocialAuth = ({authErrorMsg, isLoading, setIsLoading, disabled}: SocialAuthProps) => { +const SocialAuth = ({ + authErrorMsg, + isLoading, + setIsLoading, + disabled, + providers, +}: SocialAuthProps) => { const router = useRouter() const inFlight = useRef(false) - const googleSignInClicked = async () => { + const providerSignInClicked = async (providerId: string) => { try { if (disabled || isLoading || inFlight.current) return inFlight.current = true setIsLoading(true) const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: "google", + thirdPartyId: providerId, frontendRedirectURI: `${ getEnv("NEXT_PUBLIC_AGENTA_WEB_URL") || getEnv("NEXT_PUBLIC_AGENTA_API_URL") - }/auth/callback/google`, + }/auth/callback/${providerId}`, }) await router.push(authUrl) } catch (err) { @@ -33,53 +38,30 @@ const SocialAuth = ({authErrorMsg, isLoading, setIsLoading, disabled}: SocialAut } } - const githubSignInClicked = async () => { - try { - if (disabled || isLoading || inFlight.current) return - inFlight.current = true - setIsLoading(true) + const hasAnyProvider = providers.length > 0 - const authUrl = await getAuthorisationURLWithQueryParamsAndSetState({ - thirdPartyId: "github", - frontendRedirectURI: `${ - getEnv("NEXT_PUBLIC_AGENTA_WEB_URL") || getEnv("NEXT_PUBLIC_AGENTA_API_URL") - }/auth/callback/github`, - }) - await router.push(authUrl) - } catch (err) { - authErrorMsg(err) - setIsLoading(false) - inFlight.current = false - } + if (!hasAnyProvider) { + return null } return ( <>
- - - + {providers.map((provider) => ( + + ))}
- or ) } diff --git a/web/oss/src/components/pages/auth/assets/types.d.ts b/web/oss/src/components/pages/auth/assets/types.d.ts index 100a5a5a9f..44c7bc896d 100644 --- a/web/oss/src/components/pages/auth/assets/types.d.ts +++ b/web/oss/src/components/pages/auth/assets/types.d.ts @@ -5,6 +5,7 @@ export interface EmailPasswordAuthProps { setMessage: React.Dispatch> authErrorMsg: (error: any) => void initialEmail?: string + lockEmail?: boolean } export interface SendOTPProps { @@ -26,6 +27,7 @@ export interface PasswordlessAuthProps { setIsLoginCodeVisible: React.Dispatch> setIsLoading: React.Dispatch> disabled: boolean + lockEmail?: boolean } export interface SocialAuthProps { @@ -33,4 +35,9 @@ export interface SocialAuthProps { authErrorMsg: (error: any) => void setIsLoading: React.Dispatch> disabled: boolean + providers: { + id: string + label: string + icon?: React.ReactNode + }[] } diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/CreateEvaluatorDrawer/index.tsx b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/CreateEvaluatorDrawer/index.tsx index 108305c2a8..46ae9730c4 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/CreateEvaluatorDrawer/index.tsx +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/CreateEvaluatorDrawer/index.tsx @@ -27,7 +27,9 @@ import { const ConfigureEvaluator = dynamic( () => - import("@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator"), + import( + "@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator" + ), {ssr: false}, ) diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalInner.tsx b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalInner.tsx index 7220303a9d..cac33be7b2 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalInner.tsx +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalInner.tsx @@ -14,7 +14,7 @@ import usePreviewEvaluations from "@/oss/lib/hooks/usePreviewEvaluations" import {createEvaluation} from "@/oss/services/evaluations/api" import {useAppsData} from "@/oss/state/app/hooks" import {appIdentifiersAtom} from "@/oss/state/appState" -import {useTestsetsData} from "@/oss/state/testset" +import {testsetsListQueryAtomFamily} from "@/oss/state/entities/testset" import {buildEvaluationNavigationUrl} from "../../utils" import {DEFAULT_ADVANCE_SETTINGS} from "../assets/constants" @@ -165,7 +165,9 @@ const NewEvaluationModalInner = ({ appId: selectedAppId || appId, skip: false, }) - const {testsets, isLoading: testsetsLoading} = useTestsetsData() + const testsetsQuery = useAtomValue(testsetsListQueryAtomFamily(null)) + const testsets = testsetsQuery.data?.testsets ?? [] + const testsetsLoading = testsetsQuery.isPending const {secrets} = useVaultSecret() diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/EvaluatorTemplateDropdown.tsx b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/EvaluatorTemplateDropdown.tsx index 546722ae07..309b451bd0 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/EvaluatorTemplateDropdown.tsx +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/EvaluatorTemplateDropdown.tsx @@ -154,7 +154,11 @@ const EvaluatorTemplateDropdown = ({ content={popoverContent} placement="bottomRight" arrow={false} - overlayInnerStyle={{padding: 0}} + styles={{ + container: { + padding: 0, + }, + }} > {trigger || defaultTrigger} diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts b/web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts index 24e40c0cd4..52f85faa80 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/assets/styles.ts @@ -4,7 +4,7 @@ import {JSSTheme} from "@/oss/lib/Types" export const useStyles = createUseStyles((theme: JSSTheme) => ({ modalContainer: { - height: 800, + // height: 800, overflowY: "hidden", "& > div": { height: "100%", @@ -49,7 +49,7 @@ export const useStyles = createUseStyles((theme: JSSTheme) => ({ "& .ant-popover-title": { marginBottom: theme.margin, }, - "& .ant-popover-inner": { + "& .ant-popover-container": { padding: `${theme.paddingSM}px ${theme.padding}px`, }, }, diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx b/web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx index 9b9173db28..3c384afd13 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/index.tsx @@ -51,7 +51,7 @@ const NewEvaluationModal = ({ confirmLoading={submitLoading} styles={{ container: { - height: 800, + height: 700, }, }} {...props} diff --git a/web/oss/src/components/pages/evaluations/NewEvaluation/types.ts b/web/oss/src/components/pages/evaluations/NewEvaluation/types.ts index a068971bc4..898bbed7fa 100644 --- a/web/oss/src/components/pages/evaluations/NewEvaluation/types.ts +++ b/web/oss/src/components/pages/evaluations/NewEvaluation/types.ts @@ -114,10 +114,8 @@ export interface AdvancedSettingsProps { preview?: boolean } -export interface NewEvaluationModalGenericProps extends Omit< - NewEvaluationModalProps, - "preview" -> { +export interface NewEvaluationModalGenericProps + extends Omit { preview?: Preview } diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx index bf544d0943..6957d3438a 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx @@ -83,7 +83,7 @@ const AdvancedSettings: React.FC = ({settings, selectedTe ) : (field.type === "string" || field.type === "regex") && selectedTestcase.testcase ? ( option!.value .toUpperCase() diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx index 821a29aee6..f75d653c1c 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx @@ -1,11 +1,4 @@ /** - * DebugSection - Test evaluator configuration - * - * This component handles testing evaluators by: - * 1. Loading testcases from testsets - * 2. Running a variant to generate output - * 3. Running the evaluator on the output - * * State is managed via atoms (see ./state/atoms.ts): * - playgroundSelectedTestcaseAtom: Selected testcase data * - playgroundSelectedVariantAtom: Selected variant for testing @@ -15,10 +8,14 @@ * - playgroundFormRefAtom: Form instance for reading settings * * Data fetching: - * - Testsets: fetched internally via useTestsetsData() * - Variants: fetched internally via useAppVariantRevisions() + * - Apps: fetched internally via useAppsData() + * + * Data fetching: + * - Variants: fetched internally via useAppVariantRevisions() + * - Apps: fetched internally via useAppsData() */ -import {useEffect, useMemo, useRef, useState} from "react" +import {useCallback, useEffect, useMemo, useRef, useState} from "react" import { CheckCircleOutlined, @@ -31,9 +28,11 @@ import {Button, Dropdown, Flex, Space, Tabs, Tooltip, Typography} from "antd" import clsx from "clsx" import {atom, useAtom, useAtomValue, useSetAtom} from "jotai" import yaml from "js-yaml" +import dynamic from "next/dynamic" import {createUseStyles} from "react-jss" import {message} from "@/oss/components/AppMessageContext" +import type {LoadTestsetSelectionPayload} from "@/oss/components/Playground/Components/Modals/LoadTestsetModal/assets/types" import SharedEditor from "@/oss/components/Playground/Components/SharedEditor" import {useAppId} from "@/oss/hooks/useAppId" import {transformTraceKeysInSettings, mapTestcaseAndEvalValues} from "@/oss/lib/evaluations/legacy" @@ -65,16 +64,15 @@ import { } from "@/oss/services/evaluations/api_ee" import {AgentaNodeDTO} from "@/oss/services/observability/types" import {useAppsData} from "@/oss/state/app/hooks" +import {revision} from "@/oss/state/entities/testset" import {customPropertiesByRevisionAtomFamily} from "@/oss/state/newPlayground/core/customProperties" import { stablePromptVariablesAtomFamily, transformedPromptsAtomFamily, } from "@/oss/state/newPlayground/core/prompts" import {variantFlagsAtomFamily} from "@/oss/state/newPlayground/core/variantFlags" -import {useTestsetsData} from "@/oss/state/testset" import {appSchemaAtom, appUriInfoAtom} from "@/oss/state/variant/atoms/fetcher" -import EvaluatorTestcaseModal from "./EvaluatorTestcaseModal" import EvaluatorVariantModal from "./EvaluatorVariantModal" import { playgroundEvaluatorAtom, @@ -82,7 +80,7 @@ import { playgroundLastAppIdAtom, playgroundLastVariantIdAtom, playgroundSelectedTestcaseAtom, - playgroundSelectedTestsetIdAtom, + playgroundSelectedRevisionIdAtom, playgroundSelectedVariantAtom, playgroundTraceTreeAtom, } from "./state/atoms" @@ -137,6 +135,11 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({ }, })) +const LoadTestsetModal = dynamic( + () => import("@/oss/components/Playground/Components/Modals/LoadTestsetModal"), + {ssr: false}, +) + const DebugSection = () => { const appId = useAppId() const classes = useStyles() @@ -144,10 +147,6 @@ const DebugSection = () => { const appSchema = useAtomValue(appSchemaAtom) const {apps: availableApps = []} = useAppsData() - // Fetch testsets internally - const {testsets: fetchedTestsets} = useTestsetsData() - const testsets = fetchedTestsets ?? [] - // ================================================================ // ATOMS - Read/write state from playground atoms // ================================================================ @@ -155,8 +154,8 @@ const DebugSection = () => { const setSelectedTestcase = useSetAtom(playgroundSelectedTestcaseAtom) const _selectedVariant = useAtomValue(playgroundSelectedVariantAtom) const setSelectedVariant = useSetAtom(playgroundSelectedVariantAtom) - const selectedTestset = useAtomValue(playgroundSelectedTestsetIdAtom) - const setSelectedTestset = useSetAtom(playgroundSelectedTestsetIdAtom) + const selectedRevisionId = useAtomValue(playgroundSelectedRevisionIdAtom) + const setSelectedRevisionId = useSetAtom(playgroundSelectedRevisionIdAtom) const traceTree = useAtomValue(playgroundTraceTreeAtom) const setTraceTree = useSetAtom(playgroundTraceTreeAtom) const selectedEvaluator = useAtomValue(playgroundEvaluatorAtom) @@ -184,6 +183,31 @@ const DebugSection = () => { error: false, }) + const handleEvaluatorTestsetData = useCallback( + (payload: LoadTestsetSelectionPayload | null) => { + const testcase = payload?.testcases?.[0] + if (!testcase) { + setSelectedRevisionId("") + setSelectedTestcase({testcase: null}) + return + } + + if (payload?.revisionId) { + setSelectedRevisionId(payload.revisionId) + } + + const sanitized = + typeof testcase === "object" + ? Object.fromEntries( + Object.entries(testcase).filter(([key]) => !key.startsWith("__")), + ) + : testcase + + setSelectedTestcase({testcase: sanitized || null}) + }, + [setSelectedRevisionId, setSelectedTestcase], + ) + const defaultAppId = useMemo(() => { if (_selectedVariant?.appId) return _selectedVariant.appId if (appId) return appId @@ -266,14 +290,6 @@ const DebugSection = () => { if (v.variantId) setLastVariantId(v.variantId) }, [_selectedVariant, setLastAppId, setLastVariantId]) - // Initialize testset selection when testsets are available - useEffect(() => { - if (selectedTestset) return // Already have a selection - if (testsets?.length) { - setSelectedTestset(testsets[0]._id) - } - }, [testsets, selectedTestset, setSelectedTestset]) - // Variant flags (custom/chat) from global atoms for the selected revision const flags = useAtomValue( useMemo( @@ -321,9 +337,27 @@ const DebugSection = () => { ), ) as any - const activeTestset = useMemo(() => { - return testsets?.find((item) => item.id === selectedTestset) - }, [selectedTestset, testsets]) + const activeRevision = useAtomValue( + useMemo( + () => + (selectedRevisionId + ? (revision.selectors.data(selectedRevisionId) as any) + : (atom(null) as any)) as any, + [selectedRevisionId], + ), + ) as any + + const activeTestsetLabel = useMemo(() => { + if (!activeRevision) return null + const version = + typeof activeRevision.version === "number" + ? activeRevision.version + : parseInt(activeRevision.version || "0", 10) + return { + name: activeRevision.name || activeRevision.testset_id || null, + version: Number.isFinite(version) ? version : null, + } + }, [activeRevision]) const isPlainObject = (value: unknown): value is Record => Boolean(value) && typeof value === "object" && !Array.isArray(value) @@ -756,8 +790,8 @@ const DebugSection = () => { } const testcaseEditorKey = useMemo( - () => `testcase-${selectedTestset}-${JSON.stringify(selectedTestcase.testcase ?? {})}`, - [selectedTestset, selectedTestcase.testcase], + () => `testcase-${selectedRevisionId}-${JSON.stringify(selectedTestcase.testcase ?? {})}`, + [selectedRevisionId, selectedTestcase.testcase], ) const _variantOutputEditorKey = useMemo( @@ -802,22 +836,28 @@ const DebugSection = () => { Testcase - {activeTestset && selectedTestcase.testcase && ( + {activeTestsetLabel && selectedTestcase.testcase && ( <> - loaded from {activeTestset.name} + + {activeTestsetLabel.name} + {typeof activeTestsetLabel.version === "number" && ( + + v{activeTestsetLabel.version} + + )} + )} - +
-
@@ -1004,7 +1043,6 @@ const DebugSection = () => { ]} />
-
@@ -1092,19 +1130,13 @@ const DebugSection = () => { if ((v as any)?.variantId) setLastVariantId((v as any).variantId) }} selectedVariant={selectedVariant} - selectedTestsetId={selectedTestset} + selectedRevisionId={selectedRevisionId} + /> + setOpenTestcaseModal(false)} + setTestsetData={handleEvaluatorTestsetData} /> - - {testsets && testsets.length > 0 && ( - setOpenTestcaseModal(false)} - testsets={testsets} - setSelectedTestcase={setSelectedTestcase} - selectedTestset={selectedTestset} - setSelectedTestset={setSelectedTestset} - /> - )}
) } diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx index 9666e67af1..a8128c43e7 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx @@ -11,6 +11,7 @@ import {isValidRegex} from "@/oss/lib/helpers/validators" import {generatePaths} from "@/oss/lib/transformers" import {EvaluationSettingsTemplate, JSSTheme} from "@/oss/lib/Types" +import {FieldsTagsEditor} from "./FieldsTagsEditor" import {JSONSchemaEditor} from "./JSONSchema" import {Messages} from "./Messages" @@ -221,6 +222,8 @@ export const DynamicFormField: React.FC = ({ : JSON.stringify(savedValue ?? {}, null, 2) } /> + ) : type === "fields_tags_editor" ? ( + ) : null} )} diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx deleted file mode 100644 index 239ebdcf04..0000000000 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx +++ /dev/null @@ -1,199 +0,0 @@ -import {useCallback, useEffect, useMemo, useState} from "react" - -import {Button, Modal, Select, Space, Table, Typography} from "antd" -import {useAtomValue} from "jotai" - -import type {testset} from "@/oss/lib/Types" -import {fetchTestcasesPage} from "@/oss/state/entities/testcase/queries" -import {projectIdAtom} from "@/oss/state/project" - -interface EvaluatorTestcaseModalProps { - open: boolean - onCancel: () => void - testsets: testset[] - setSelectedTestcase: (data: {testcase: Record | null}) => void - selectedTestset: string - setSelectedTestset: (id: string) => void -} - -/** - * Modal for selecting a testcase from a testset for evaluator testing - * Uses fetchTestcasesPage for data fetching - */ -const EvaluatorTestcaseModal = ({ - open, - onCancel, - testsets, - setSelectedTestcase, - selectedTestset, - setSelectedTestset, -}: EvaluatorTestcaseModalProps) => { - const projectId = useAtomValue(projectIdAtom) - const [selectedRowKey, setSelectedRowKey] = useState(null) - const [testcases, setTestcases] = useState([]) - const [isLoading, setIsLoading] = useState(false) - - // Get the selected testset's latest revision ID - const activeTestset = useMemo( - () => testsets.find((ts) => ts._id === selectedTestset), - [testsets, selectedTestset], - ) - - // Fetch testcases when testset changes - useEffect(() => { - if (!projectId || !activeTestset?._id || !open) { - setTestcases([]) - return - } - - const fetchData = async () => { - setIsLoading(true) - try { - const result = await fetchTestcasesPage(projectId, activeTestset._id, null) - setTestcases(result.testcases) - } catch (error) { - console.error("Failed to fetch testcases:", error) - setTestcases([]) - } finally { - setIsLoading(false) - } - } - - fetchData() - }, [projectId, activeTestset?._id, open]) - - // Build table columns from testcase data - const columns = useMemo(() => { - if (!testcases.length) return [] - - // Get all unique keys from testcases (excluding metadata) - const allKeys = new Set() - testcases.forEach((tc: any) => { - const data = tc.testcase || tc.data || tc - if (data && typeof data === "object") { - Object.keys(data).forEach((key) => { - // Skip metadata fields - if ( - ![ - "id", - "testset_id", - "set_id", - "created_at", - "updated_at", - "deleted_at", - "created_by_id", - "updated_by_id", - "deleted_by_id", - ].includes(key) - ) { - allKeys.add(key) - } - }) - } - }) - - return Array.from(allKeys).map((key) => ({ - title: key, - dataIndex: key, - key, - ellipsis: true, - width: 150, - render: (value: any) => { - if (value === null || value === undefined) return "-" - if (typeof value === "object") return JSON.stringify(value) - return String(value) - }, - })) - }, [testcases]) - - // Transform testcases to table data - const tableData = useMemo(() => { - return testcases.map((tc: any, index: number) => { - const data = tc.testcase || tc.data || tc - return { - key: tc.id || `row-${index}`, - ...data, - __original: tc, - } - }) - }, [testcases]) - - const handleSelect = useCallback(() => { - if (!selectedRowKey) return - - const selected = tableData.find((row) => row.key === selectedRowKey) - if (selected) { - // Extract just the testcase data (without metadata) - const {key, __original, ...testcaseData} = selected - setSelectedTestcase({testcase: testcaseData}) - } - onCancel() - }, [selectedRowKey, tableData, setSelectedTestcase, onCancel]) - - const testsetOptions = useMemo( - () => - testsets.map((ts) => ({ - label: ts.name, - value: ts._id, - })), - [testsets], - ) - - return ( - - - - - } - > -
-
- Testset: -
setSelectedRowKey(keys[0] as string), - }} - onRow={(record) => ({ - onClick: () => setSelectedRowKey(record.key as string), - style: {cursor: "pointer"}, - })} - locale={{ - emptyText: selectedTestset - ? "No testcases found" - : "Select a testset to view testcases", - }} - /> - - - ) -} - -export default EvaluatorTestcaseModal diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx index 94f4c3bf06..e4c7f400ae 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx @@ -10,12 +10,11 @@ import { type SetStateAction, } from "react" -import {testsetCsvDataQueryAtomFamily} from "@agenta/oss/src/components/Playground/Components/Modals/LoadTestsetModal/assets/testsetCsvData" import {CloseCircleOutlined, CloseOutlined} from "@ant-design/icons" import {Play} from "@phosphor-icons/react" import {Button, Input, Modal, Tabs, Tag, Tooltip, Typography} from "antd" import clsx from "clsx" -import {useAtomValue} from "jotai" +import {atom, useAtomValue} from "jotai" import dynamic from "next/dynamic" import {createUseStyles} from "react-jss" @@ -26,8 +25,8 @@ import useAppVariantRevisions from "@/oss/lib/hooks/useAppVariantRevisions" import type {EnhancedVariant} from "@/oss/lib/shared/variant/transformer/types" import type {JSSTheme, ListAppsItem, Variant} from "@/oss/lib/Types" import {useAppsData} from "@/oss/state/app/hooks" +import {revision} from "@/oss/state/entities/testset" import {stablePromptVariablesAtomFamily} from "@/oss/state/newPlayground/core/prompts" -import {useTestsetsData} from "@/oss/state/testset" import TabLabel from "../../../NewEvaluation/assets/TabLabel" import SelectAppSection from "../../../NewEvaluation/Components/SelectAppSection" @@ -39,7 +38,7 @@ type EvaluatorVariantModalProps = { variants: Variant[] | null setSelectedVariant: Dispatch> selectedVariant: Variant | null - selectedTestsetId?: string + selectedRevisionId?: string } & ComponentProps interface VariantDiagnostics { @@ -115,7 +114,7 @@ const EvaluatorVariantModal = ({ variants: _variants, setSelectedVariant, selectedVariant, - selectedTestsetId, + selectedRevisionId, ...props }: EvaluatorVariantModalProps) => { const classes = useStyles() @@ -167,69 +166,16 @@ const EvaluatorVariantModal = ({ ) }, [appOptions, appSearchTerm]) - const {columnsByTestsetId} = useTestsetsData({enabled: Boolean(props.open)}) - - const testsetCsvQuery = useAtomValue( - useMemo( - () => - testsetCsvDataQueryAtomFamily({ - testsetId: selectedTestsetId || "", - enabled: Boolean(selectedTestsetId && props.open), - }), - [selectedTestsetId, props.open], - ), - ) as any - const testsetCsvData = useMemo( - () => (Array.isArray(testsetCsvQuery?.data) ? (testsetCsvQuery.data as any[]) : []), - [testsetCsvQuery], - ) - - const derivedTestsetColumns = useMemo(() => { - const fromColumns = - selectedTestsetId && columnsByTestsetId?.[selectedTestsetId]?.length - ? (columnsByTestsetId[selectedTestsetId] as string[]) - : [] - - const firstRow = - Array.isArray(testsetCsvData) && testsetCsvData.length > 0 - ? (testsetCsvData[0] as Record) - : undefined - - let normalizedSource: Record | undefined - if (firstRow && typeof firstRow === "object") { - const candidate = - "data" in firstRow && firstRow.data && typeof firstRow.data === "object" - ? (firstRow.data as Record) - : firstRow - normalizedSource = candidate - } - - const fromCsv = normalizedSource ? Object.keys(normalizedSource) : [] - - const merged = new Map() - - const addValue = (value?: string) => { - if (!value) return - const trimmed = value.trim() - if (!trimmed) return - if (!merged.has(trimmed.toLowerCase())) { - merged.set(trimmed.toLowerCase(), trimmed) - } - } - - fromColumns.forEach((col) => addValue(typeof col === "string" ? col : String(col))) - fromCsv.forEach((col) => addValue(typeof col === "string" ? col : String(col))) - - return Array.from(merged.values()) - }, [columnsByTestsetId, selectedTestsetId, testsetCsvData]) - - const normalizedTestsetColumns = useMemo( + // Use revision controller to get normalized testcase columns + // This fetches the revision with testcases included and derives column names + const testcaseColumnsAtom = useMemo( () => - derivedTestsetColumns - .map((col) => (typeof col === "string" ? col.trim().toLowerCase() : "")) - .filter(Boolean), - [derivedTestsetColumns], + selectedRevisionId + ? revision.selectors.testcaseColumnsNormalized(selectedRevisionId) + : atom([]), + [selectedRevisionId], ) + const normalizedTestsetColumns = useAtomValue(testcaseColumnsAtom) const {variants: appVariantRevisions, isLoading: variantsLoading} = useAppVariantRevisions( selectedAppId || null, @@ -476,7 +422,7 @@ const EvaluatorVariantModal = ({ [variables], ) - const columnsKnown = Boolean(selectedTestsetId) && normalizedTestsetColumns.length > 0 + const columnsKnown = normalizedTestsetColumns.length > 0 const missingVariables = useMemo( () => @@ -489,20 +435,17 @@ const EvaluatorVariantModal = ({ ) const hasWarning = - Boolean(selectedTestsetId) && - columnsKnown && - expectedVariables.length > 0 && - missingVariables.length > 0 + columnsKnown && expectedVariables.length > 0 && missingVariables.length > 0 const message = useMemo(() => { - if (!selectedTestsetId || !expectedVariables.length) return undefined + if (!expectedVariables.length) return undefined if (!columnsKnown) return "Analyzing testset columns..." if (missingVariables.length > 0) { const missingList = missingVariables.join(", ") return `The selected testset is missing required inputs for this variant: {{${missingList}}}` } return undefined - }, [columnsKnown, expectedVariables.length, missingVariables, selectedTestsetId]) + }, [columnsKnown, expectedVariables.length, missingVariables]) useEffect(() => { if (!revisionId) return diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/FieldsTagsEditor.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/FieldsTagsEditor.tsx new file mode 100644 index 0000000000..a96a07a37f --- /dev/null +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/FieldsTagsEditor.tsx @@ -0,0 +1,228 @@ +/** + * FieldsTagsEditor - Tag-based editor for JSON field paths + * + * This component provides an add/remove interface for managing JSON field paths. + * Users can: + * - Add fields manually using an input field (supports dot notation for nested paths) + * - Remove fields by clicking the X button on tags + * - Detect fields from the selected testcase using a dedicated button + * + * The component also displays a non-removable "overall" field representing + * the aggregate result across all fields. + * + * Auto-detection behavior: + * - When a testcase is loaded and no fields are configured, fields are auto-detected + */ + +import {useCallback, useEffect, useMemo, useRef, useState} from "react" + +import {PlusOutlined, SearchOutlined} from "@ant-design/icons" +import {Button, Form, Input, Tag, Tooltip, Typography} from "antd" +import type {FormInstance} from "antd/es/form" +import {useAtomValue} from "jotai" + +import {extractJsonPaths, safeParseJson} from "@/oss/lib/helpers/extractJsonPaths" + +import {playgroundSelectedTestcaseAtom} from "./state/atoms" + +const {Text} = Typography + +interface FieldsTagsEditorProps { + value?: string[] + onChange?: (value: string[]) => void + form?: FormInstance + name?: string | string[] + correctAnswerKey?: string +} + +/** + * Tag-based editor for managing JSON field paths with add/remove functionality. + * Includes "Detect from testcase" feature to auto-populate fields. + */ +export const FieldsTagsEditor: React.FC = ({ + value = [], + onChange, + form, + correctAnswerKey = "correct_answer", +}) => { + const [inputValue, setInputValue] = useState("") + // Track if we've already auto-detected to avoid re-triggering + const hasAutoDetectedRef = useRef(false) + + // Read the selected testcase from the playground atom + const testcaseSelection = useAtomValue(playgroundSelectedTestcaseAtom) + const testcase = testcaseSelection?.testcase + + // Watch the correct_answer_key from form to react to changes + // Using Form.useWatch instead of form.getFieldValue for reactivity + const formCorrectAnswerKey = Form.useWatch(["settings_values", "correct_answer_key"], form) + const effectiveKey = formCorrectAnswerKey || correctAnswerKey + + // Check if we can detect fields from testcase + const canDetectFields = useMemo(() => { + if (!testcase) return false + const groundTruthValue = testcase[effectiveKey] + if (!groundTruthValue) return false + const parsed = safeParseJson(groundTruthValue) + return parsed !== null + }, [testcase, effectiveKey]) + + // Extract available fields from the testcase + const detectableFields = useMemo(() => { + if (!testcase) return [] + const groundTruthValue = testcase[effectiveKey] + if (!groundTruthValue) return [] + const parsed = safeParseJson(groundTruthValue) + if (!parsed) return [] + return extractJsonPaths(parsed) + }, [testcase, effectiveKey]) + + // Auto-detect fields when testcase is loaded and no fields are configured + useEffect(() => { + // Only auto-detect if: + // 1. We haven't already auto-detected + // 2. There are no user-defined fields + // 3. We can detect fields from the testcase + if (!hasAutoDetectedRef.current && value.length === 0 && detectableFields.length > 0) { + hasAutoDetectedRef.current = true + onChange?.(detectableFields) + } + }, [detectableFields, value.length, onChange]) + + // Handle adding a new field + const handleAddField = useCallback(() => { + const trimmed = inputValue.trim() + if (!trimmed) return + + // Don't add duplicates + if (value.includes(trimmed)) { + setInputValue("") + return + } + + // Don't allow reserved field names + if (trimmed === "aggregate_score") { + setInputValue("") + return + } + + onChange?.([...value, trimmed]) + setInputValue("") + }, [inputValue, value, onChange]) + + // Handle removing a field + const handleRemoveField = useCallback( + (fieldToRemove: string) => { + onChange?.(value.filter((f) => f !== fieldToRemove)) + }, + [value, onChange], + ) + + // Handle detecting fields from testcase (replaces existing fields) + const handleDetectFields = useCallback(() => { + if (detectableFields.length > 0) { + onChange?.(detectableFields) + } + }, [detectableFields, onChange]) + + // Handle Enter key in input + const handleInputKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault() + handleAddField() + } + }, + [handleAddField], + ) + + // Generate tooltip for disabled detect button + const detectButtonTooltip = useMemo(() => { + if (!testcase) { + return "Select a testcase first to detect fields" + } + if (!canDetectFields) { + return `No JSON object found in the "${effectiveKey}" column` + } + return `Detect ${detectableFields.length} field(s) from testcase (replaces current fields)` + }, [testcase, canDetectFields, effectiveKey, detectableFields.length]) + + return ( +
+ {/* Field Tags Display */} +
+ {/* Non-removable aggregate_score tag */} + + + aggregate_score + + + + {/* User-defined field tags */} + {value.map((field) => ( + handleRemoveField(field)} + className="flex items-center font-mono text-[13px] !m-0" + > + {field} + + ))} + + {/* Empty state message */} + {value.length === 0 && ( + + Add fields to compare or detect them from a testcase + + )} +
+ + {/* Add Field Input */} +
+ setInputValue(e.target.value)} + onKeyDown={handleInputKeyDown} + suffix={ + + + ? + + + } + /> + +
+ + {/* Actions Row */} +
+ + Each field creates a column with value 0 (no match) or 1 (match) + + + + + +
+
+ ) +} + +export default FieldsTagsEditor diff --git a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx index 67043ea2aa..0cae1a3715 100644 --- a/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx +++ b/web/oss/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx @@ -32,17 +32,13 @@ import { const LoadEvaluatorPreset = dynamic( () => - import( - "@/agenta-oss-common/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset" - ), + import("@/agenta-oss-common/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset"), {ssr: false}, ) const DebugSection: any = dynamic( () => - import( - "@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection" - ), + import("@/oss/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection"), {ssr: false}, ) @@ -385,8 +381,23 @@ const ConfigureEvaluator = ({ settings_values: editEvalEditValues.settings_values || {}, name: "", }) + } else if (selectedEvaluator?.settings_template) { + // Create mode: apply default values from the evaluator template + // This is needed because form.resetFields() clears the form but Form.Item initialValue + // only works on first mount, not after resetFields() + const defaultSettings: Record = {} + for (const [key, field] of Object.entries(selectedEvaluator.settings_template)) { + if (field && typeof field === "object" && "default" in field) { + defaultSettings[key] = field.default + } + } + if (Object.keys(defaultSettings).length > 0) { + form.setFieldsValue({ + settings_values: defaultSettings, + }) + } } - }, [editMode, cloneConfig, editEvalEditValues, form]) + }, [editMode, cloneConfig, editEvalEditValues, form, selectedEvaluator]) // Guard: if no evaluator selected, show nothing (shouldn't happen in normal flow) if (!selectedEvaluator) { @@ -573,7 +584,7 @@ const ConfigureEvaluator = ({ {/* Evaluator Name & Actions */}
- {formName || "New evaluator"} + {headerName || "New evaluator"} - +
diff --git a/web/oss/src/components/pages/observability/components/SessionsTable/index.tsx b/web/oss/src/components/pages/observability/components/SessionsTable/index.tsx index 4b42aa8623..79471f62c8 100644 --- a/web/oss/src/components/pages/observability/components/SessionsTable/index.tsx +++ b/web/oss/src/components/pages/observability/components/SessionsTable/index.tsx @@ -10,6 +10,7 @@ import {useSessions} from "@/oss/state/newObservability/hooks/useSessions" import {openSessionDrawerWithUrlAtom} from "@/oss/state/url/session" import {AUTO_REFRESH_INTERVAL} from "../../constants" + import EmptySessions from "./assets/EmptySessions" import {getSessionColumns, SessionRow} from "./assets/getSessionColumns" diff --git a/web/oss/src/components/pages/observability/components/StatusRenderer.tsx b/web/oss/src/components/pages/observability/components/StatusRenderer.tsx index e27505b390..491e217373 100644 --- a/web/oss/src/components/pages/observability/components/StatusRenderer.tsx +++ b/web/oss/src/components/pages/observability/components/StatusRenderer.tsx @@ -34,13 +34,17 @@ const StatusRenderer = ({ const {label, color, icon} = statusMapper(status || StatusCode.STATUS_CODE_UNSET) const errorMsg = status === StatusCode.STATUS_CODE_ERROR ? message : null + const {bordered, variant, ...restTagProps} = tagProps || {} + const resolvedVariant = variant ?? (bordered === false ? "filled" : undefined) + return ( {label} diff --git a/web/oss/src/components/pages/observability/dashboard/AnalyticsDashboard.tsx b/web/oss/src/components/pages/observability/dashboard/AnalyticsDashboard.tsx index ad48f34ac8..58913b7975 100644 --- a/web/oss/src/components/pages/observability/dashboard/AnalyticsDashboard.tsx +++ b/web/oss/src/components/pages/observability/dashboard/AnalyticsDashboard.tsx @@ -1,14 +1,17 @@ import {useMemo, type ComponentProps} from "react" -import {ChartLine} from "@phosphor-icons/react" -import {AreaChart} from "@tremor/react" +import {ChartLineIcon} from "@phosphor-icons/react" import {Spin} from "antd" +import {useAtom} from "jotai" import {createUseStyles} from "react-jss" +import Sort from "@/oss/components/Filters/Sort" import {formatCompactNumber, formatCurrency, formatNumber} from "@/oss/lib/helpers/formatters" import {JSSTheme} from "@/oss/lib/Types" import {useObservabilityDashboard} from "@/oss/state/observability" +import {observabilityDashboardTimeRangeAtom} from "@/oss/state/observability/dashboard" +import CustomAreaChart from "./CustomAreaChart" import WidgetCard from "./widgetCard" const useStyles = createUseStyles((theme: JSSTheme) => ({ @@ -63,7 +66,7 @@ const useStyles = createUseStyles((theme: JSSTheme) => ({ const EmptyChart = ({className}: {className: string}) => (
- + No data
) @@ -75,26 +78,20 @@ interface AnalyticsDashboardProps { const AnalyticsDashboard = ({layout = "grid-2"}: AnalyticsDashboardProps) => { const classes = useStyles() const {data, loading, isFetching} = useObservabilityDashboard() + const [timeRange, setTimeRange] = useAtom(observabilityDashboardTimeRangeAtom) const chartData = useMemo(() => (data?.data?.length ? data.data : []), [data]) const hasData = (data?.total_count ?? 0) > 0 - const defaultGraphProps = useMemo>( + const defaultGraphProps = useMemo>( () => ({ className: "h-[140px]", colors: ["cyan-600", "rose"], - connectNulls: true, - tickGap: 20, - curveType: "monotone", - showGridLines: true, - showLegend: false, + tickCount: 5, index: "timestamp", data: chartData, categories: [], valueFormatter: (value) => formatCompactNumber(value), - yAxisWidth: 48, - showXAxis: true, - showYAxis: true, }), [chartData], ) @@ -102,129 +99,142 @@ const AnalyticsDashboard = ({layout = "grid-2"}: AnalyticsDashboardProps) => { const gridClassName = layout === "grid-4" ? classes.gridLayout4 : classes.gridLayout2 return ( - -
- - Total: - - {data?.total_count ? formatNumber(data?.total_count) : "-"} - -
- } - rightSubHeading={ - (data?.failure_rate ?? 0) > 0 && ( -
- Failed: +
+
+ +
+ +
+ + Total: - {data?.failure_rate - ? `${formatNumber(data?.failure_rate)}%` - : "-"} + {data?.total_count ? formatNumber(data?.total_count) : "-"}
- ) - } - > - {hasData ? ( - 0 - ? ["success_count", "failure_count"] - : ["success_count"] - } - /> - ) : ( - - )} - + } + rightSubHeading={ + (data?.failure_rate ?? 0) > 0 && ( +
+ Failed: + + {data?.failure_rate + ? `${formatNumber(data?.failure_rate)}%` + : "-"} + +
+ ) + } + > + {hasData ? ( + 0 + ? ["success_count", "failure_count"] + : ["success_count"] + } + /> + ) : ( + + )} + - - Avg: - - {data?.avg_latency ? `${formatNumber(data.avg_latency)}ms` : "-"} - -
- } - > - {hasData ? ( - `${formatCompactNumber(value)}ms`} - /> - ) : ( - - )} - + + Avg: + + {data?.avg_latency + ? `${formatNumber(data.avg_latency)}ms` + : "-"} + +
+ } + > + {hasData ? ( + `${formatCompactNumber(value)}ms`} + /> + ) : ( + + )} + - - Total: - - {data?.total_cost ? formatCurrency(data.total_cost) : "-"} - - - } - rightSubHeading={ -
- Avg: - - {data?.total_cost ? formatCurrency(data.avg_cost) : "-"} - -
- } - > - {hasData ? ( - formatCurrency(value)} - /> - ) : ( - - )} -
+ + Total: + + {data?.total_cost ? formatCurrency(data.total_cost) : "-"} + + + } + rightSubHeading={ +
+ Avg: + + {data?.total_cost ? formatCurrency(data.avg_cost) : "-"} + +
+ } + > + {hasData ? ( + formatCurrency(value)} + /> + ) : ( + + )} +
- - Total: - - {data?.total_tokens ? formatNumber(data?.total_tokens) : "-"} - - - } - rightSubHeading={ -
- Avg: - - {data?.avg_tokens ? formatNumber(data?.avg_tokens) : "-"} - -
- } - > - {hasData ? ( - - ) : ( - - )} -
- -
+ + Total: + + {data?.total_tokens ? formatNumber(data?.total_tokens) : "-"} + + + } + rightSubHeading={ +
+ Avg: + + {data?.avg_tokens ? formatNumber(data?.avg_tokens) : "-"} + +
+ } + > + {hasData ? ( + + ) : ( + + )} +
+ + + ) } diff --git a/web/oss/src/components/pages/observability/dashboard/CustomAreaChart.tsx b/web/oss/src/components/pages/observability/dashboard/CustomAreaChart.tsx new file mode 100644 index 0000000000..c7869bdfa9 --- /dev/null +++ b/web/oss/src/components/pages/observability/dashboard/CustomAreaChart.tsx @@ -0,0 +1,126 @@ +import React from "react" + +import {theme} from "antd" +import { + Area, + CartesianGrid, + AreaChart as ReAreaChart, + ResponsiveContainer, + Tooltip, + XAxis, + YAxis, +} from "recharts" + +import {formatCompactNumber} from "@/oss/lib/helpers/formatters" + +interface CustomAreaChartProps { + data: any[] + categories: string[] + index: string + colors?: string[] + valueFormatter?: (value: number) => string + tickCount?: number + allowDecimals?: boolean + className?: string +} + +// Map Tremor-like color names to hex values (simplified for this specific use case) +// You might want to expand this or import from a central theme file if available +const colorMap: Record = { + "cyan-600": "#0891b2", + rose: "#e11d48", + gray: "#6b7280", +} + +const CustomAreaChart: React.FC = ({ + data, + categories, + index, + colors = ["cyan-600"], + valueFormatter = (value: number) => formatCompactNumber(value), + tickCount = 5, + allowDecimals = false, + className, +}) => { + const {token} = theme.useToken() + + return ( +
+ + + + {categories.map((category, idx) => { + const colorKey = colors[idx % colors.length] + const color = colorMap[colorKey] || colorKey + return ( + + + + + ) + })} + + + + + [valueFormatter(value), ""]} + /> + {categories.map((category, idx) => { + const colorKey = colors[idx % colors.length] + const color = colorMap[colorKey] || colorKey + return ( + + ) + })} + + +
+ ) +} + +export default CustomAreaChart diff --git a/web/oss/src/components/pages/settings/APIKeys/APIKeys.tsx b/web/oss/src/components/pages/settings/APIKeys/APIKeys.tsx index 50e4189aee..4f3998011f 100644 --- a/web/oss/src/components/pages/settings/APIKeys/APIKeys.tsx +++ b/web/oss/src/components/pages/settings/APIKeys/APIKeys.tsx @@ -1,7 +1,8 @@ import {useCallback, useEffect, useState} from "react" -import {CopyOutlined, DeleteOutlined, PlusOutlined} from "@ant-design/icons" -import {Alert, Button, Modal, Table, Tooltip, Typography, theme} from "antd" +import {CopyOutlined, DeleteOutlined} from "@ant-design/icons" +import {Plus} from "@phosphor-icons/react" +import {Button, Modal, Table, Tooltip, Typography, theme} from "antd" import AlertPopup from "@/oss/components/AlertPopup/AlertPopup" import {useLoading} from "@/oss/hooks/useLoading" @@ -12,7 +13,7 @@ import {useOrgData} from "@/oss/state/org" import {Loading} from "./assets/constants" -const {Title, Text} = Typography +const {Text} = Typography const APIKeys: React.FC = () => { const [keys, setKeys] = useState([]) @@ -101,38 +102,15 @@ const APIKeys: React.FC = () => { }, []) return ( -
- - API Keys - - - An API key can be used to access Agenta APIs securely. You can manage your - API Keys from here. -
-
- Your API key should be passed in as an{" "} - Authorization header in the - requests. You can find examples of how to consume our APIs on the endpoints - page of an app or visit our{" "} - docs. - - } - type="info" - /> - +
diff --git a/web/oss/src/components/pages/settings/Organization/index.tsx b/web/oss/src/components/pages/settings/Organization/index.tsx new file mode 100644 index 0000000000..d1ebf97d4f --- /dev/null +++ b/web/oss/src/components/pages/settings/Organization/index.tsx @@ -0,0 +1,1239 @@ +import {type FC, useState, useCallback, useMemo} from "react" + +import { + PlusOutlined, + CheckCircleOutlined, + ClockCircleOutlined, + DeleteOutlined, + EditOutlined, + InfoCircleOutlined, + ReloadOutlined, +} from "@ant-design/icons" +import {useQueryClient, useQuery, useMutation} from "@tanstack/react-query" +import { + Card, + Descriptions, + Input, + Modal, + Radio, + Space, + Typography, + message, + Table, + Button, + Form, + Tag, + Popconfirm, + Alert, + Tooltip, +} from "antd" + +import TooltipWithCopyAction from "@/oss/components/EnhancedUIs/Tooltip" +import {getAgentaWebUrl} from "@/oss/lib/helpers/api" +import { + updateOrganization, + fetchOrganizationDomains, + createOrganizationDomain, + verifyOrganizationDomain, + refreshOrganizationDomainToken, + deleteOrganizationDomain, + type OrganizationDomain, + fetchOrganizationProviders, + createOrganizationProvider, + updateOrganizationProvider, + testOrganizationProvider, + deleteOrganizationProvider, + type OrganizationProvider, +} from "@/oss/services/organization/api" +import {useOrgData} from "@/oss/state/org" + +const {Title, Text} = Typography + +const Organization: FC = () => { + const {selectedOrg, loading, refetch} = useOrgData() + const queryClient = useQueryClient() + const [slugValue, setSlugValue] = useState("") + const [slugModalVisible, setSlugModalVisible] = useState(false) + const [updating, setUpdating] = useState(false) + const [domainModalVisible, setDomainModalVisible] = useState(false) + const [domainForm] = Form.useForm() + const [providerModalVisible, setProviderModalVisible] = useState(false) + const [providerForm] = Form.useForm() + const [editingProvider, setEditingProvider] = useState(null) + + const handleUpdateOrganization = useCallback( + async ( + payload: {slug?: string; name?: string; description?: string; flags?: any}, + options?: {ignoreAxiosError?: boolean}, + ) => { + if (!selectedOrg?.id) return + + setUpdating(true) + try { + const updated = await updateOrganization( + selectedOrg.id, + payload, + options?.ignoreAxiosError ?? false, + ) + if (updated) { + queryClient.setQueryData(["selectedOrg", selectedOrg.id], updated) + queryClient.setQueriesData(["orgs"], (old: any) => { + if (!Array.isArray(old)) return old + return old.map((org) => + org.id === updated.id ? {...org, ...updated} : org, + ) + }) + } + message.success("Organization updated successfully") + // Invalidate and refetch organization data + await queryClient.invalidateQueries({queryKey: ["organizations"]}) + await refetch() + } catch (error: any) { + message.error(error?.response?.data?.detail || "Failed to update organization") + console.error("Failed to update organization:", error) + } finally { + setUpdating(false) + } + }, + [selectedOrg?.id, queryClient, refetch], + ) + + // Domain Verification queries and mutations + const {data: domains = [], refetch: refetchDomains} = useQuery({ + queryKey: ["organization-domains", selectedOrg?.id], + queryFn: fetchOrganizationDomains, + enabled: !!selectedOrg?.id, + }) + const hasVerifiedDomain = useMemo( + () => domains.some((domain) => domain.flags?.is_verified), + [domains], + ) + + const handleSlugSave = useCallback(() => { + if (!slugValue.trim()) return + handleUpdateOrganization({slug: slugValue.trim()}, {ignoreAxiosError: true}) + setSlugModalVisible(false) + }, [slugValue, handleUpdateOrganization]) + + const createDomainMutation = useMutation({ + mutationFn: createOrganizationDomain, + onSuccess: () => { + message.success("Domain added successfully. Token is available in the table.") + refetchDomains() + setDomainModalVisible(false) + domainForm.resetFields() + }, + onError: (error: any) => { + message.error(error?.response?.data?.detail || "Failed to add domain") + }, + }) + + const verifyDomainMutation = useMutation({ + mutationFn: verifyOrganizationDomain, + onSuccess: () => { + message.success("Domain verified successfully") + refetchDomains() + }, + onError: (error: any) => { + const errorMessage = + error?.response?.data?.detail || error?.message || "Failed to verify domain" + message.error(errorMessage) + }, + }) + + const refreshDomainTokenMutation = useMutation({ + mutationFn: refreshOrganizationDomainToken, + onSuccess: () => { + message.success("Token refreshed successfully") + refetchDomains() + }, + onError: (error: any) => { + message.error(error?.response?.data?.detail || "Failed to refresh token") + }, + }) + + const deleteDomainMutation = useMutation({ + mutationFn: deleteOrganizationDomain, + onSuccess: () => { + message.success("Domain deleted successfully") + refetchDomains() + }, + onError: (error: any) => { + message.error(error?.response?.data?.detail || "Failed to delete domain") + }, + }) + + const handleAddDomain = useCallback(() => { + domainForm.validateFields().then((values) => { + createDomainMutation.mutate({ + domain: values.domain, + }) + }) + }, [domainForm, createDomainMutation]) + + const domainColumns = [ + { + title: "Domain", + dataIndex: "slug", + key: "slug", + ellipsis: true, + }, + { + title: "Expiration", + key: "expires_at", + render: (_: any, record: OrganizationDomain) => { + if (record.flags?.is_verified) { + return - + } + // Calculate expiration: created_at + 48 hours + const createdAt = new Date(record.created_at) + const expiresAt = new Date(createdAt.getTime() + 48 * 60 * 60 * 1000) + const now = new Date() + const isExpired = now > expiresAt + + return ( + + {expiresAt.toLocaleString()} + {isExpired && " (Expired)"} + + ) + }, + }, + { + title: "Status", + dataIndex: ["flags", "is_verified"], + key: "is_verified", + render: (_: any, record: OrganizationDomain) => { + const isVerified = record.flags?.is_verified || false + return isVerified ? ( + } color="success"> + Verified + + ) : ( + } color="warning"> + Pending + + ) + }, + }, + { + title: "Actions", + key: "actions", + render: (_: any, record: OrganizationDomain) => ( + + {!record.flags?.is_verified && ( + + )} + + )} + + deleteProviderMutation.mutate(record.id)} + okText="Delete" + okType="danger" + cancelText="Cancel" + > + +
+ + ) + } + + return ( + + + +
+ + Access Controls + +
+ + + + handleFlagChange("allow_email", e.target.value === "yes") + } + disabled={updating} + > + Allow + Deny + + + + + handleFlagChange("allow_social", e.target.value === "yes") + } + disabled={updating} + > + Allow + Deny + + + + + handleFlagChange("allow_sso", e.target.value === "yes") + } + disabled={updating} + > + + + + Allow + + + + Deny + + + + + handleFlagChange("allow_root", e.target.value === "yes") + } + disabled={updating} + > + Allow + + + + Deny + + + + + + + + handleFlagChange("domains_only", e.target.value === "no") + } + disabled={updating} + > + Allow + + + + Deny + + + + + + + + handleFlagChange("auto_join", e.target.value === "yes") + } + disabled={updating} + > + + + + Allow + + + + Deny + + + +
+
+ + + +
+
+ + Verified Domains + +
+ +
+ +
{ + // Only show DNS instructions for unverified domains with a token + if (record.flags?.is_verified || !record.token) { + return null + } + + const txtRecordName = `_agenta-verification.${record.slug}` + const txtRecordValue = `_agenta-verification=${record.token}` + + return ( + + Verification Instructions + + } + description={ + + + 1. Add the following DNS TXT record: + + + + Type + + } + > + + TXT + + + + Host + + } + > + + + {txtRecordName} + + + + + Value + + } + > + + + {txtRecordValue} + + + + + + 2. Wait a few minutes for DNS propagation. + + + 3. Click the "Verify" button. + + + } + type="info" + icon={} + showIcon + /> + ) + }, + rowExpandable: (record: OrganizationDomain) => + !record.flags?.is_verified && !!record.token, + expandIcon: () => null, + }} + /> + + + { + setDomainModalVisible(false) + domainForm.resetFields() + }} + confirmLoading={createDomainMutation.isPending} + okText="Add" + > +
+ + + + + After adding the domain, please follow the verification instructions. + + +
+ + + + +
+
+ + SSO Providers + +
+ +
+ + +
+ {selectedOrg.slug ? ( + {selectedOrg.slug} + ) : ( + Please set slug to enable SSO + )} +
+
+
+ setSlugModalVisible(false)} + confirmLoading={updating} + > + + The slug is used in SSO callbacks and cannot be unset or edited once + saved. + + setSlugValue(e.target.value)} + placeholder="organization-slug" + /> + + {!selectedOrg?.slug && ( + + )} + +
{ + // Only show configuration instructions for providers that are not valid + if (record.flags?.is_valid !== false) { + return null + } + + if (!selectedOrg?.slug) { + return null + } + + const callbackUrl = `${getAgentaWebUrl()}/auth/callback/sso:${selectedOrg.slug}:${record.slug}` + const expectedScopes = "openid email profile" + + return ( + + Configuration Instructions + + } + description={ + + + 1. Edit your IdP with the following details: + + + + Callback URL + + } + > + + + {callbackUrl} + + + + + Scopes + + } + > + + + {expectedScopes} + + + + + + 2. Ensure your SSO provider's OIDC discovery + endpoint is accessible. + + + 3. Click the "Enable" button. + + + } + type="info" + icon={} + showIcon + /> + ) + }, + rowExpandable: (record: OrganizationProvider) => + record.flags?.is_valid === false, + expandIcon: () => null, + }} + /> + + + { + setProviderModalVisible(false) + setEditingProvider(null) + providerForm.resetFields() + }} + confirmLoading={ + createProviderMutation.isPending || updateProviderMutation.isPending + } + okText={editingProvider ? "Update" : "Add"} + width={600} + > +
+ + + + prev.slug !== next.slug} + > + {() => { + const slug = providerForm.getFieldValue("slug") + const callbackUrl = + selectedOrg?.slug && slug + ? `${getAgentaWebUrl()}/auth/callback/sso:${selectedOrg.slug}:${slug}` + : "" + return ( + + ) + }} + + + + + + + + + + + + + + + After adding the provider, use the "Test" button to verify the + connection. + + +
+ + + ) +} + +export default Organization diff --git a/web/oss/src/components/pages/settings/WorkspaceManage/Modals/InviteUsersModal.tsx b/web/oss/src/components/pages/settings/WorkspaceManage/Modals/InviteUsersModal.tsx index b9e4d6697f..420ab3e5d4 100644 --- a/web/oss/src/components/pages/settings/WorkspaceManage/Modals/InviteUsersModal.tsx +++ b/web/oss/src/components/pages/settings/WorkspaceManage/Modals/InviteUsersModal.tsx @@ -8,9 +8,9 @@ import Link from "next/link" import {message} from "@/oss/components/AppMessageContext" import useLazyEffect from "@/oss/hooks/useLazyEffect" import {workspaceRolesAtom} from "@/oss/lib/atoms/organization" +import {isEmailInvitationsEnabled} from "@/oss/lib/helpers/isEE" import {useSubscriptionDataWrapper} from "@/oss/lib/helpers/useSubscriptionDataWrapper" import {isDemo, snakeToTitle} from "@/oss/lib/helpers/utils" -import {isEmailInvitationsEnabled} from "@/oss/lib/helpers/isEE" import {Plan} from "@/oss/lib/Types" import {inviteToWorkspace} from "@/oss/services/workspace/api" import {useOrgData} from "@/oss/state/org" @@ -47,7 +47,7 @@ const InviteForm: FC = ({onSuccess, workspaceId, form, setLoadi })), organizationId, workspaceId, - }) + }, true) .then((responses) => { if (!isEmailInvitationsEnabled() && typeof responses.url === "string") { onSuccess?.({ @@ -62,7 +62,14 @@ const InviteForm: FC = ({onSuccess, workspaceId, form, setLoadi form.resetFields() }) - .catch(console.error) + .catch((error: any) => { + const detail = error?.response?.data?.detail + const detailMessage = + typeof detail === "string" + ? detail + : detail?.message || "Failed to send invitations" + message.error(detailMessage) + }) .finally(() => setLoading(false)) }, [organizationId], diff --git a/web/oss/src/components/pages/settings/WorkspaceManage/WorkspaceManage.tsx b/web/oss/src/components/pages/settings/WorkspaceManage/WorkspaceManage.tsx index fd79f17599..a79ab8e1dd 100644 --- a/web/oss/src/components/pages/settings/WorkspaceManage/WorkspaceManage.tsx +++ b/web/oss/src/components/pages/settings/WorkspaceManage/WorkspaceManage.tsx @@ -1,18 +1,19 @@ -import {useEffect, useMemo, useState, type FC} from "react" +import {useMemo, useState, type FC} from "react" -import {GearSix, PencilSimple, Plus} from "@phosphor-icons/react" +import {PlusOutlined} from "@ant-design/icons" +import {GearSix, Plus} from "@phosphor-icons/react" import {Button, Input, Space, Spin, Table, Tag, Typography} from "antd" import {ColumnsType} from "antd/es/table" import dynamic from "next/dynamic" import {useQueryParam} from "@/oss/hooks/useQuery" import {formatDay} from "@/oss/lib/helpers/dateTimeHelper" +import {isEmailInvitationsEnabled, isEE} from "@/oss/lib/helpers/isEE" import {getUsernameFromEmail, isDemo} from "@/oss/lib/helpers/utils" -import {isEmailInvitationsEnabled} from "@/oss/lib/helpers/isEE" import {WorkspaceMember} from "@/oss/lib/Types" -import {useOrgData} from "@/oss/state/org" +import {useOrgData, isPersonalOrg} from "@/oss/state/org" import {useProfileData} from "@/oss/state/profile" -import {useUpdateWorkspaceName, useWorkspaceMembers} from "@/oss/state/workspace" +import {useWorkspaceMembers} from "@/oss/state/workspace" import AvatarWithLabel from "./assets/AvatarWithLabel" import {Actions, Roles} from "./cellRenderers" @@ -23,7 +24,6 @@ const InviteUsersModal = dynamic(() => import("./Modals/InviteUsersModal"), {ssr const WorkspaceManage: FC = () => { const {user: signedInUser} = useProfileData() const {selectedOrg, loading, refetch} = useOrgData() - const {updateWorkspaceName} = useUpdateWorkspaceName() const {filteredMembers, searchTerm, setSearchTerm} = useWorkspaceMembers() const [isInviteModalOpen, setIsInviteModalOpen] = useState(false) const [isInvitedUserLinkModalOpen, setIsInvitedUserLinkModalOpen] = useState(false) @@ -35,14 +35,6 @@ const WorkspaceManage: FC = () => { const organizationId = selectedOrg?.id const workspaceId = selectedOrg?.default_workspace?.id - const workspace = selectedOrg?.default_workspace - - const [isEditingName, setIsEditingName] = useState(false) - const [workspaceNameInput, setWorkspaceNameInput] = useState(workspace?.name || "") - - useEffect(() => { - setWorkspaceNameInput(workspace?.name || "") - }, [workspace?.name]) const columns = useMemo( () => @@ -72,6 +64,9 @@ const WorkspaceManage: FC = () => { dataIndex: ["user", "email"], key: "email", title: "Email", + render: (_, member) => ( + {member.user?.email} + ), }, isDemo() ? { @@ -122,13 +117,15 @@ const WorkspaceManage: FC = () => { fixed: "right", align: "center", render: (_, member) => { + const isSelf = + member.user?.id === signedInUser?.id || + member.user?.email === signedInUser?.email + const isOwner = member.user?.id === selectedOrg?.owner_id return (