diff --git a/.all-contributorsrc b/.all-contributorsrc
index 0bbf050227..df1ad81797 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -512,6 +512,15 @@
"doc",
"example"
]
+ },
+ {
+ "login": "adityadewan22-hub",
+ "name": "adityadewan22-hub",
+ "avatar_url": "https://avatars.githubusercontent.com/u/225586510?v=4",
+ "profile": "https://github.com/adityadewan22-hub",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/AGENTS.md b/AGENTS.md
index e76a9c540d..63dd92b381 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -377,6 +377,119 @@ export const createItemAtom = atom(
---
+### Entity Controller Pattern
+
+For entities requiring CRUD operations with draft state, loading indicators, and cache management, use the **Entity Controller Pattern**. This provides a unified API that abstracts multiple atoms into a single cohesive interface.
+
+**Full documentation:** `web/oss/src/state/entities/shared/README.md`
+
+**Quick Decision - Which API to Use:**
+
+| Need | API | Returns |
+|------|-----|---------|
+| Full state + actions | `entity.controller(id)` | `[state, dispatch]` |
+| Data only | `entity.selectors.data(id)` | `T \| null` |
+| Loading/error | `entity.selectors.query(id)` | `QueryState` |
+| Dirty indicator | `entity.selectors.isDirty(id)` | `boolean` |
+| Single cell (tables) | `entity.selectors.cell({id, col})` | `unknown` |
+| Dispatch in atoms | `entity.actions.update/discard` | Write atom |
+
+**Basic Usage:**
+
+```typescript
+import {testcase} from "@/oss/state/entities/testcase"
+
+// Full controller - state + dispatch
+function TestcaseEditor({testcaseId}: {testcaseId: string}) {
+ const [state, dispatch] = useAtom(testcase.controller(testcaseId))
+
+ if (state.isPending) return
+ if (!state.data) return
+
+ return (
+ dispatch({
+ type: "update",
+ changes: {input: e.target.value}
+ })}
+ />
+ )
+}
+
+// Fine-grained selector - only re-renders on data change
+function TestcaseDisplay({testcaseId}: {testcaseId: string}) {
+ const data = useAtomValue(testcase.selectors.data(testcaseId))
+ if (!data) return null
+ return {data.input}
+}
+```
+
+**Reading Multiple Entities:**
+
+```typescript
+// Create a derived atom that subscribes to all selected entities
+const useMultipleTestcases = (ids: string[]) => {
+ const dataAtom = useMemo(
+ () => atom((get) => ids.map(id => get(testcase.selectors.data(id))).filter(Boolean)),
+ [ids.join(",")]
+ )
+ return useAtomValue(dataAtom)
+}
+```
+
+**Anti-Patterns to Avoid:**
+
+```typescript
+// BAD - No reactivity, snapshot read
+const globalStore = getDefaultStore()
+const data = globalStore.get(testcase.selectors.data(id))
+
+// GOOD - Proper subscription
+const data = useAtomValue(testcase.selectors.data(id))
+```
+
+```typescript
+// BAD - Variable shadowing
+import {testcase} from "@/oss/state/entities/testcase"
+const {testcase, ...rest} = entity // Shadows import!
+
+// GOOD - Rename destructured variable
+const {testcase: testcaseField, ...rest} = entity
+```
+
+**Available Controllers:**
+
+| Entity | Import | Description |
+|--------|--------|-------------|
+| Testcase | `testcase` from `@/oss/state/entities/testcase` | Testcase with cell subscriptions + drill-in |
+| Trace Span | `traceSpan` from `@/oss/state/entities/trace` | Trace span with attribute drill-in |
+| Revision | `revision` from `@/oss/state/entities/testset` | Revision with column management |
+| Testset | `testset` from `@/oss/state/entities/testset` | Testset with list/detail queries |
+
+**Architecture:**
+
+```
+┌─────────────────────────────────────────────────────────────────┐
+│ Controller │
+│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
+│ │ Query │ │ Draft │ │ isDirty │ │
+│ │ (server) │→ │ (local) │→ │ (derived) │ │
+│ └─────────────┘ └─────────────┘ └─────────────┘ │
+│ ↓ ↓ │
+│ ┌─────────────────────────────────────────────────────────────┐│
+│ │ Entity Atom (merged) ││
+│ └─────────────────────────────────────────────────────────────┘│
+└─────────────────────────────────────────────────────────────────┘
+```
+
+- **Query atoms** are the single source of truth for server data
+- **Draft atoms** store local changes only
+- **Entity atoms** merge: `query.data + draft → merged entity`
+- **Dirty detection** compares draft to server data
+
+---
+
**Legacy: SWR Pattern (avoid for new code)**
We previously used SWR with Axios for data fetching. This pattern is still present in older code but should not be used for new features.
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000000..f6097ac718
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,7 @@
+# Instructions for Claude
+
+Please read and follow all instructions in:
+
+@AGENTS.md
+
+Project conventions, guidelines, and best practices are documented there.
diff --git a/README.md b/README.md
index d87e7616aa..26322bb662 100644
--- a/README.md
+++ b/README.md
@@ -191,7 +191,7 @@ We welcome contributions of all kinds — from filing issues and sharing ideas t
## Contributors ✨
-[](#contributors-)
+[](#contributors-)
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
@@ -270,6 +270,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Vahant Sharma 📖 |
 Muhammad Muzammil 💻 |
 Sirous Namjoo 📖 💡 |
+  adityadewan22-hub 💻 |
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
index ac96ece37d..3734374db4 100644
--- a/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
@@ -38,8 +38,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
@@ -81,9 +81,10 @@ async def _fetch_project_owner(
WorkspaceMemberDBE.role == "owner",
ProjectDBE.id == project_id,
)
+ .order_by(WorkspaceMemberDBE.created_at.asc())
)
result = await connection.execute(workspace_owner_query)
- owner = result.scalar_one_or_none()
+ owner = result.scalars().first()
return owner
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
index 90dca62adf..a3fbfa164c 100644
--- a/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
@@ -69,9 +69,10 @@ async def _fetch_project_owner(
WorkspaceMemberDBE.role == "owner",
ProjectDBE.id == project_id,
)
+ .order_by(WorkspaceMemberDBE.created_at.asc())
)
result = await connection.execute(workspace_owner_query)
- owner = result.scalar_one_or_none()
+ owner = result.scalars().first()
return owner
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
index cca11d88e0..22c34367a4 100644
--- a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
@@ -28,7 +28,7 @@ def get_or_create_workspace_default_project(
if project is None:
statement = insert(ProjectDB).values(
- project_name="Default Project",
+ project_name="Default",
is_default=True,
workspace_id=workspace.id,
organization_id=workspace.organization_id,
diff --git a/api/ee/databases/postgres/migrations/core/utils.py b/api/ee/databases/postgres/migrations/core/utils.py
index 58e4b75fb8..11d92b2114 100644
--- a/api/ee/databases/postgres/migrations/core/utils.py
+++ b/api/ee/databases/postgres/migrations/core/utils.py
@@ -118,7 +118,9 @@ async def get_pending_migration_head():
def run_alembic_migration():
"""
- Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ Applies migration for first-time users and also checks the environment variable
+ "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether
+ to apply migrations for returning users.
"""
try:
diff --git a/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py b/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py
new file mode 100644
index 0000000000..a22123f1e7
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py
@@ -0,0 +1,51 @@
+"""add slug to organizations
+
+Revision ID: 12d23a8f7dde
+Revises: 59b85eb7516c
+Create Date: 2025-12-25 00:00:00.000000+00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "12d23a8f7dde"
+down_revision: Union[str, None] = "59b85eb7516c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # Add slug column to organizations table
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # Add unique constraint on slug
+ op.create_unique_constraint(
+ "uq_organizations_slug",
+ "organizations",
+ ["slug"],
+ )
+
+ # Add index for faster lookups
+ op.create_index(
+ "ix_organizations_slug",
+ "organizations",
+ ["slug"],
+ )
+
+
+def downgrade() -> None:
+ # Drop in reverse order
+ op.drop_index("ix_organizations_slug", table_name="organizations")
+ op.drop_constraint("uq_organizations_slug", "organizations", type_="unique")
+ op.drop_column("organizations", "slug")
diff --git a/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py b/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py
new file mode 100644
index 0000000000..5a9743f1d3
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py
@@ -0,0 +1,361 @@
+"""add sso oidc tables
+
+Revision ID: 59b85eb7516c
+Revises: 80910d2fa9a4
+Create Date: 2025-12-10 08:53:56.000000+00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "59b85eb7516c"
+down_revision: Union[str, None] = "80910d2fa9a4"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # 1. user_identities table
+ op.create_table(
+ "user_identities",
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "user_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "method",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "subject",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "domain",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ondelete="CASCADE",
+ ),
+ sa.UniqueConstraint(
+ "method",
+ "subject",
+ name="uq_user_identities_method_subject",
+ ),
+ sa.Index(
+ "ix_user_identities_user_method",
+ "user_id",
+ "method",
+ ),
+ sa.Index(
+ "ix_user_identities_domain",
+ "domain",
+ ),
+ )
+
+ # 2. organization_domains table
+ op.create_table(
+ "organization_domains",
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "organization_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "token",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_organization_domains_org",
+ "organization_id",
+ ),
+ sa.Index(
+ "ix_organization_domains_flags",
+ "flags",
+ postgresql_using="gin",
+ ),
+ )
+ op.create_index(
+ "uq_organization_domains_slug_verified",
+ "organization_domains",
+ ["slug"],
+ unique=True,
+ postgresql_where=sa.text("(flags->>'is_verified') = 'true'"),
+ )
+
+ # 3. organization_providers table
+ op.create_table(
+ "organization_providers",
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "organization_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "secret_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(
+ ["secret_id"],
+ ["secrets.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ondelete="CASCADE",
+ ),
+ sa.UniqueConstraint(
+ "organization_id",
+ "slug",
+ name="uq_organization_providers_org_slug",
+ ),
+ sa.Index(
+ "ix_organization_providers_org",
+ "organization_id",
+ ),
+ sa.Index(
+ "ix_organization_providers_flags",
+ "flags",
+ postgresql_using="gin",
+ ),
+ )
+
+ # 4. Add is_active to users table
+ op.add_column(
+ "users",
+ sa.Column(
+ "is_active",
+ sa.Boolean(),
+ nullable=False,
+ server_default="true",
+ ),
+ )
+
+
+def downgrade() -> None:
+ # Drop in reverse order
+ op.drop_column("users", "is_active")
+
+ op.drop_index(
+ "ix_organization_providers_flags",
+ table_name="organization_providers",
+ )
+ op.drop_index(
+ "ix_organization_providers_org",
+ table_name="organization_providers",
+ )
+ op.drop_table("organization_providers")
+
+ op.drop_index(
+ "uq_organization_domains_slug_verified",
+ table_name="organization_domains",
+ )
+ op.drop_index(
+ "ix_organization_domains_flags",
+ table_name="organization_domains",
+ )
+ op.drop_index(
+ "ix_organization_domains_org",
+ table_name="organization_domains",
+ )
+ op.drop_table("organization_domains")
+
+ op.drop_index(
+ "ix_user_identities_domain",
+ table_name="user_identities",
+ )
+ op.drop_index(
+ "ix_user_identities_user_method",
+ table_name="user_identities",
+ )
+ op.drop_table("user_identities")
diff --git a/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
index 1edfdda8cc..57582fe92b 100644
--- a/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
+++ b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
@@ -14,6 +14,7 @@
from alembic import context
from sqlalchemy import Connection, func, insert, select, update
+from sqlalchemy.orm import load_only
import stripe
@@ -21,10 +22,10 @@
from oss.src.utils.env import env
from oss.src.models.db_models import UserDB
from oss.src.models.db_models import AppDB
-from oss.src.models.db_models import OrganizationDB
from ee.src.models.db_models import OrganizationMemberDB
from oss.src.models.db_models import ProjectDB
from ee.src.models.db_models import ProjectMemberDB
+from ee.src.models.extended.deprecated_models import DeprecatedOrganizationDB
from ee.src.dbs.postgres.subscriptions.dbes import SubscriptionDBE
from ee.src.dbs.postgres.meters.dbes import MeterDBE
from ee.src.core.subscriptions.types import FREE_PLAN
@@ -48,7 +49,7 @@ def upgrade() -> None:
now = datetime.now(timezone.utc)
# --> GET ORGANIZATION COUNT
- query = select(func.count()).select_from(OrganizationDB)
+ query = select(func.count()).select_from(DeprecatedOrganizationDB)
nof_organizations = session.execute(query).scalar()
# <-- GET ORGANIZATION COUNT
@@ -60,7 +61,12 @@ def upgrade() -> None:
while True:
# --> GET ORGANIZATION BATCH
query = (
- select(OrganizationDB)
+ select(DeprecatedOrganizationDB)
+ .options(
+ load_only(
+ DeprecatedOrganizationDB.id, DeprecatedOrganizationDB.owner
+ )
+ )
.limit(organization_batch_size)
.offset(organization_batch_index * organization_batch_size)
)
diff --git a/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py b/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py
new file mode 100644
index 0000000000..727e3b362e
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py
@@ -0,0 +1,620 @@
+"""clean up organizations
+
+Revision ID: a9f3e8b7c5d1
+Revises: 12d23a8f7dde
+Create Date: 2025-12-26 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy import text
+from oss.src.utils.env import env
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "a9f3e8b7c5d1"
+down_revision: Union[str, None] = "12d23a8f7dde"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """
+ Clean up organizations table and introduce new schema.
+
+ Changes:
+ - Add flags (JSONB, nullable) with is_personal and is_demo fields
+ - Migrate type='view-only' to flags.is_demo=true
+ - Drop type column
+ - Convert owner (String) to owner_id (UUID, NOT NULL)
+ - Add created_by_id (UUID, NOT NULL)
+ - Ensure created_at is NOT NULL, remove default from updated_at
+ - Add updated_by_id (UUID, nullable)
+ - Add deleted_at (DateTime, nullable)
+ - Add deleted_by_id (UUID, nullable)
+ - Add role field to organization_members (String, default="member")
+ - Populate role='owner' for organization owners
+ - Add LegacyLifecycle fields to organization_members (created_at, updated_at, updated_by_id - all nullable)
+ - Add updated_by_id to workspace_members (nullable)
+ - Add updated_by_id to project_members (nullable)
+ - Drop user_organizations table (replaced by organization_members)
+ - Drop invitations table (obsolete)
+
+ EE Mode:
+ - Organizations with >1 member → is_personal=false
+ - Organizations with =1 member and user owns it → is_personal=true
+ - Create missing personal orgs for users without one
+ - Normalize names: personal orgs → "Personal", slug → NULL
+ """
+ conn = op.get_bind()
+
+ # Step 1: Add JSONB columns (flags, tags, meta - all nullable)
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "flags",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "meta",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+
+ # Step 2: Add new UUID columns (all nullable initially for migration)
+ op.add_column(
+ "organizations",
+ sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("created_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("updated_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("deleted_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+
+ # Step 3: Get member counts for all organizations
+ conn.execute(
+ text("""
+ CREATE TEMP TABLE org_member_counts AS
+ SELECT
+ o.id as org_id,
+ COUNT(om.id) as member_count,
+ o.owner as owner_str
+ FROM organizations o
+ LEFT JOIN organization_members om ON om.organization_id = o.id
+ GROUP BY o.id, o.owner
+ """)
+ )
+
+ # Step 4: Migrate type='view-only' to is_demo=true for all orgs
+ # and mark multi-member orgs as is_personal=false
+ conn.execute(
+ text("""
+ UPDATE organizations o
+ SET flags = jsonb_build_object(
+ 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END,
+ 'is_personal', false
+ )
+ FROM org_member_counts omc
+ WHERE o.id = omc.org_id
+ AND omc.member_count > 1
+ """)
+ )
+
+ # Step 5: Mark single-member orgs owned by that member as personal
+ # NOTE: owner is String type, needs casting for comparison
+ conn.execute(
+ text("""
+ UPDATE organizations o
+ SET flags = jsonb_build_object(
+ 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END,
+ 'is_personal', true
+ )
+ FROM org_member_counts omc
+ WHERE o.id = omc.org_id
+ AND omc.member_count = 1
+ AND EXISTS (
+ SELECT 1 FROM organization_members om
+ WHERE om.organization_id = o.id
+ AND om.user_id::text = o.owner
+ )
+ """)
+ )
+
+ # Step 6: Mark remaining single-member orgs as collaborative (is_personal=false)
+ conn.execute(
+ text("""
+ UPDATE organizations o
+ SET flags = jsonb_build_object(
+ 'is_demo', CASE WHEN o.type = 'view-only' THEN true ELSE false END,
+ 'is_personal', false
+ )
+ FROM org_member_counts omc
+ WHERE o.id = omc.org_id
+ AND omc.member_count = 1
+ AND (o.flags IS NULL OR o.flags = '{}'::jsonb)
+ """)
+ )
+
+ # Step 7: Migrate owner (String) to owner_id (UUID)
+ # Set owner_id = owner::uuid for existing orgs
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET owner_id = owner::uuid
+ WHERE owner IS NOT NULL
+ """)
+ )
+
+ # Step 8: Set created_by_id = owner_id for existing orgs
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET created_by_id = owner_id
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Step 9: Set updated_by_id = owner_id for existing orgs
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET updated_by_id = owner_id
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Step 10: Create missing personal organizations for users without one
+ conn.execute(
+ text("""
+ INSERT INTO organizations (
+ id,
+ name,
+ slug,
+ description,
+ owner,
+ owner_id,
+ created_at,
+ created_by_id,
+ updated_at,
+ updated_by_id,
+ flags
+ )
+ SELECT
+ gen_random_uuid(),
+ 'Personal',
+ NULL,
+ NULL,
+ u.id::text,
+ u.id,
+ NOW(),
+ u.id,
+ NOW(),
+ u.id,
+ '{"is_demo": false, "is_personal": true}'::jsonb
+ FROM users u
+ WHERE NOT EXISTS (
+ SELECT 1 FROM organizations o
+ WHERE o.owner_id = u.id
+ AND o.flags->>'is_personal' = 'true'
+ )
+ """)
+ )
+
+ # Step 10b: Add role column to organization_members
+ op.add_column(
+ "organization_members",
+ sa.Column(
+ "role",
+ sa.String(),
+ nullable=False,
+ server_default="member",
+ ),
+ )
+
+ # Step 10c: Set role='owner' for organization owners based on owner_id
+ conn.execute(
+ text("""
+ UPDATE organization_members om
+ SET role = 'owner'
+ FROM organizations o
+ WHERE om.organization_id = o.id
+ AND om.user_id = o.owner_id
+ """)
+ )
+
+ # Step 10d: Add LegacyLifecycle fields to organization_members
+ op.add_column(
+ "organization_members",
+ sa.Column("created_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ )
+ op.add_column(
+ "organization_members",
+ sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ )
+ op.add_column(
+ "organization_members",
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ )
+
+ # Step 10e: Add updated_by_id to workspace_members
+ op.add_column(
+ "workspace_members",
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ )
+
+ # Step 10f: Add updated_by_id to project_members
+ op.add_column(
+ "project_members",
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ )
+
+ # Step 11: Add users as members to their new personal orgs
+ conn.execute(
+ text("""
+ INSERT INTO organization_members (id, user_id, organization_id, role)
+ SELECT
+ gen_random_uuid(),
+ o.owner_id,
+ o.id,
+ 'owner'
+ FROM organizations o
+ WHERE o.flags->>'is_personal' = 'true'
+ AND NOT EXISTS (
+ SELECT 1 FROM organization_members om
+ WHERE om.organization_id = o.id
+ AND om.user_id = o.owner_id
+ )
+ """)
+ )
+
+ # Step 12: Normalize personal organizations
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET
+ name = 'Personal',
+ slug = NULL
+ WHERE flags->>'is_personal' = 'true'
+ """)
+ )
+
+ # Step 13: Ensure any remaining orgs have flags set
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET flags = jsonb_build_object(
+ 'is_demo', CASE WHEN type = 'view-only' THEN true ELSE false END,
+ 'is_personal', false
+ )
+ WHERE flags IS NULL OR flags = '{}'::jsonb
+ """)
+ )
+
+ # Step 13b: Ensure all organizations have complete flag defaults
+ # This ensures all auth and access control flags are set with defaults
+ allow_email_default = "true" if env.auth.email_enabled else "false"
+ allow_social_default = "true" if env.auth.oidc_enabled else "false"
+ allow_sso_default = "false"
+ allow_root_default = "false"
+
+ conn.execute(
+ text(f"""
+ UPDATE organizations
+ SET flags = flags ||
+ jsonb_build_object(
+ 'allow_email', COALESCE((flags->>'allow_email')::boolean, {allow_email_default}),
+ 'allow_social', COALESCE((flags->>'allow_social')::boolean, {allow_social_default}),
+ 'allow_sso', COALESCE((flags->>'allow_sso')::boolean, {allow_sso_default}),
+ 'allow_root', COALESCE((flags->>'allow_root')::boolean, {allow_root_default}),
+ 'domains_only', COALESCE((flags->>'domains_only')::boolean, false),
+ 'auto_join', COALESCE((flags->>'auto_join')::boolean, false)
+ )
+ WHERE flags IS NOT NULL
+ """)
+ )
+
+ # Step 13c: Add unique constraint: one personal org per owner
+ op.create_index(
+ "uq_organizations_owner_personal",
+ "organizations",
+ ["owner_id"],
+ unique=True,
+ postgresql_where=sa.text("(flags->>'is_personal') = 'true'"),
+ )
+
+ # Clean up temp table
+ conn.execute(text("DROP TABLE IF EXISTS org_member_counts"))
+
+ # Step 14: Ensure created_at has a value for all existing records
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET created_at = COALESCE(created_at, NOW())
+ WHERE created_at IS NULL
+ """)
+ )
+
+ # Step 15: Make owner_id, created_by_id, and created_at NOT NULL; remove updated_at default
+ op.alter_column("organizations", "owner_id", nullable=False)
+ op.alter_column("organizations", "created_by_id", nullable=False)
+ op.alter_column("organizations", "created_at", nullable=False)
+ op.alter_column("organizations", "updated_at", server_default=None)
+
+ # Step 16: Add foreign key constraints
+ op.create_foreign_key(
+ "fk_organizations_owner_id_users",
+ "organizations",
+ "users",
+ ["owner_id"],
+ ["id"],
+ ondelete="RESTRICT",
+ )
+ op.create_foreign_key(
+ "fk_organizations_created_by_id_users",
+ "organizations",
+ "users",
+ ["created_by_id"],
+ ["id"],
+ ondelete="RESTRICT",
+ )
+ op.create_foreign_key(
+ "fk_organizations_updated_by_id_users",
+ "organizations",
+ "users",
+ ["updated_by_id"],
+ ["id"],
+ ondelete="SET NULL",
+ )
+ op.create_foreign_key(
+ "fk_organizations_deleted_by_id_users",
+ "organizations",
+ "users",
+ ["deleted_by_id"],
+ ["id"],
+ ondelete="SET NULL",
+ )
+
+ # Step 16b: Ensure organization_members cascade on organization delete
+ op.drop_constraint(
+ "organization_members_organization_id_fkey",
+ "organization_members",
+ type_="foreignkey",
+ )
+ op.create_foreign_key(
+ "organization_members_organization_id_fkey",
+ "organization_members",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Step 16c: Ensure workspaces cascade on organization delete
+ try:
+ op.drop_constraint(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Step 16c2: Ensure workspace_members cascade on workspace delete
+ try:
+ op.drop_constraint(
+ "workspace_members_workspace_id_fkey",
+ "workspace_members",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "workspace_members_workspace_id_fkey",
+ "workspace_members",
+ "workspaces",
+ ["workspace_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Step 16d: Ensure projects cascade on organization delete
+ try:
+ op.drop_constraint(
+ "projects_organization_id_fkey",
+ "projects",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "projects_organization_id_fkey",
+ "projects",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Note: Other tables (testsets, evaluations, scenarios, etc.) are linked to
+ # organizations via projects, so they will cascade delete through projects.
+ # They should keep SET NULL on organization_id for direct references.
+
+ # Step 17: Drop type and owner columns
+ op.drop_column("organizations", "type")
+ op.drop_column("organizations", "owner")
+
+ # Step 18: Drop obsolete tables
+ conn.execute(text("DROP TABLE IF EXISTS user_organizations CASCADE"))
+ conn.execute(text("DROP TABLE IF EXISTS invitations CASCADE"))
+
+
+def downgrade() -> None:
+ """Restore organizations type and owner columns and revert schema changes."""
+ conn = op.get_bind()
+
+ # Drop foreign key constraints
+ op.drop_constraint(
+ "fk_organizations_deleted_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_updated_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_created_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_owner_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "organization_members_organization_id_fkey",
+ "organization_members",
+ type_="foreignkey",
+ )
+
+ # Recreate type column
+ op.add_column("organizations", sa.Column("type", sa.String(), nullable=True))
+
+ # Migrate flags back to type
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET type = CASE
+ WHEN flags->>'is_demo' = 'true' THEN 'view-only'
+ ELSE 'default'
+ END
+ """)
+ )
+
+ op.alter_column("organizations", "type", nullable=False)
+
+ # Recreate owner column
+ op.add_column("organizations", sa.Column("owner", sa.String(), nullable=True))
+
+ # Migrate owner_id back to owner (UUID to String)
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET owner = owner_id::text
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Restore updated_at default
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET updated_at = COALESCE(updated_at, NOW())
+ WHERE updated_at IS NULL
+ """)
+ )
+ op.alter_column(
+ "organizations",
+ "updated_at",
+ server_default=sa.text("NOW()"),
+ nullable=False,
+ )
+
+ # Restore organization_members FK without cascade
+ op.create_foreign_key(
+ "organization_members_organization_id_fkey",
+ "organization_members",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ )
+
+ # Restore workspaces FK without cascade
+ op.drop_constraint(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ type_="foreignkey",
+ )
+ op.create_foreign_key(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ )
+
+ # Restore projects FK without cascade
+ op.drop_constraint(
+ "projects_organization_id_fkey",
+ "projects",
+ type_="foreignkey",
+ )
+ op.create_foreign_key(
+ "projects_organization_id_fkey",
+ "projects",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ )
+
+ # Drop unique constraint for personal orgs
+ op.drop_index(
+ "uq_organizations_owner_personal",
+ table_name="organizations",
+ )
+
+ # Drop role column from organization_members
+ op.drop_column("organization_members", "role")
+
+ # Drop LegacyLifecycle columns from organization_members
+ op.drop_column("organization_members", "updated_by_id")
+ op.drop_column("organization_members", "updated_at")
+ op.drop_column("organization_members", "created_at")
+
+ # Drop updated_by_id from workspace_members
+ op.drop_column("workspace_members", "updated_by_id")
+
+ # Drop updated_by_id from project_members
+ op.drop_column("project_members", "updated_by_id")
+
+ # Drop new columns
+ op.drop_column("organizations", "deleted_by_id")
+ op.drop_column("organizations", "deleted_at")
+ op.drop_column("organizations", "updated_by_id")
+ op.drop_column("organizations", "created_by_id")
+ op.drop_column("organizations", "owner_id")
+ op.drop_column("organizations", "meta")
+ op.drop_column("organizations", "tags")
+ op.drop_column("organizations", "flags")
+
+ # Note: We don't recreate user_organizations and invitations tables
+ # as they contain no data at this point
diff --git a/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py b/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py
new file mode 100644
index 0000000000..3a97162a1d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py
@@ -0,0 +1,161 @@
+"""add organization scope to secrets and link sso providers
+
+Revision ID: c3b2a1d4e5f6
+Revises: a9f3e8b7c5d1
+Create Date: 2025-01-10 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+import json
+
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy import text
+import uuid_utils.compat as uuid
+
+from oss.src.utils.env import env
+
+
+# revision identifiers, used by Alembic.
+revision: str = "c3b2a1d4e5f6"
+down_revision: Union[str, None] = "a9f3e8b7c5d1"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ connection = op.get_bind()
+
+ op.execute("ALTER TYPE secretkind_enum ADD VALUE IF NOT EXISTS 'SSO_PROVIDER'")
+
+ inspector = sa.inspect(connection)
+ secrets_columns = {col["name"] for col in inspector.get_columns("secrets")}
+
+ if "organization_id" not in secrets_columns:
+ op.add_column("secrets", sa.Column("organization_id", sa.UUID(), nullable=True))
+
+ op.alter_column("secrets", "project_id", nullable=True)
+
+ secrets_fks = {fk["name"] for fk in inspector.get_foreign_keys("secrets")}
+ if "secrets_organization_id_fkey" not in secrets_fks:
+ op.create_foreign_key(
+ "secrets_organization_id_fkey",
+ "secrets",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ org_providers_columns = {
+ col["name"] for col in inspector.get_columns("organization_providers")
+ }
+ if "secret_id" not in org_providers_columns:
+ op.add_column(
+ "organization_providers", sa.Column("secret_id", sa.UUID(), nullable=True)
+ )
+
+ org_providers_fks = {
+ fk["name"] for fk in inspector.get_foreign_keys("organization_providers")
+ }
+ if "organization_providers_secret_id_fkey" not in org_providers_fks:
+ op.create_foreign_key(
+ "organization_providers_secret_id_fkey",
+ "organization_providers",
+ "secrets",
+ ["secret_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ if "settings" in org_providers_columns:
+ encryption_key = env.agenta.crypt_key
+ if not encryption_key:
+ raise RuntimeError(
+ "Encryption key not found. Cannot migrate organization provider secrets."
+ )
+
+ providers = connection.execute(
+ sa.text(
+ """
+ SELECT id, organization_id, slug, name, description, settings, created_at, updated_at
+ FROM organization_providers
+ WHERE secret_id IS NULL
+ """
+ )
+ ).fetchall()
+
+ for provider in providers:
+ settings = provider.settings or {}
+ settings.setdefault("client_id", "")
+ settings.setdefault("client_secret", "")
+ settings.setdefault("issuer_url", "")
+ settings.setdefault("scopes", [])
+ settings.setdefault("extra", {})
+
+ secret_data = json.dumps({"provider": settings})
+ secret_id = uuid.uuid7()
+
+ connection.execute(
+ text(
+ """
+ INSERT INTO secrets (
+ id, kind, data, organization_id, project_id, created_at, updated_at, name, description
+ )
+ VALUES (
+ :id,
+ 'SSO_PROVIDER',
+ pgp_sym_encrypt(:data, :key),
+ :organization_id,
+ NULL,
+ :created_at,
+ :updated_at,
+ :name,
+ :description
+ )
+ """
+ ),
+ {
+ "id": secret_id,
+ "data": secret_data,
+ "key": encryption_key,
+ "organization_id": provider.organization_id,
+ "created_at": provider.created_at,
+ "updated_at": provider.updated_at,
+ "name": provider.slug,
+ "description": provider.description,
+ },
+ )
+
+ connection.execute(
+ sa.text(
+ "UPDATE organization_providers SET secret_id = :secret_id WHERE id = :provider_id"
+ ),
+ {"secret_id": secret_id, "provider_id": provider.id},
+ )
+
+ op.drop_column("organization_providers", "settings")
+
+ op.alter_column("organization_providers", "secret_id", nullable=False)
+
+
+def downgrade() -> None:
+ op.drop_constraint(
+ "organization_providers_secret_id_fkey",
+ "organization_providers",
+ type_="foreignkey",
+ )
+ op.add_column(
+ "organization_providers",
+ sa.Column(
+ "settings",
+ sa.JSON(),
+ nullable=True,
+ ),
+ )
+ op.drop_column("organization_providers", "secret_id")
+
+ op.drop_constraint("secrets_organization_id_fkey", "secrets", type_="foreignkey")
+ op.drop_column("secrets", "organization_id")
+ op.alter_column("secrets", "project_id", nullable=False)
diff --git a/api/ee/databases/postgres/migrations/tracing/utils.py b/api/ee/databases/postgres/migrations/tracing/utils.py
index 00f55a7315..13a32a0679 100644
--- a/api/ee/databases/postgres/migrations/tracing/utils.py
+++ b/api/ee/databases/postgres/migrations/tracing/utils.py
@@ -110,7 +110,9 @@ async def get_pending_migration_head():
def run_alembic_migration():
"""
- Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ Applies migration for first-time users and also checks the environment variable
+ "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether
+ to apply migrations for returning users.
"""
try:
diff --git a/api/ee/docs/ORGANIZATION_FLAGS.md b/api/ee/docs/ORGANIZATION_FLAGS.md
new file mode 100644
index 0000000000..34234deae4
--- /dev/null
+++ b/api/ee/docs/ORGANIZATION_FLAGS.md
@@ -0,0 +1,76 @@
+# Organization Flags Reference
+
+This document defines the canonical default values for all organization flags in the system.
+
+## Flag Definitions
+
+### Identity Flags
+- **`is_demo`**: `false` - Marks the organization as a demo organization
+- **`is_personal`**: `false` - Marks the organization as a personal organization (single-user)
+
+### Authentication Method Flags
+- **`allow_email`**: defaults to `env.auth.email_enabled` - Allow email/password or email/OTP authentication
+- **`allow_social`**: defaults to `env.auth.oidc_enabled` - Allow social authentication (Google, GitHub, etc.)
+- **`allow_sso`**: `false` - Allow SSO/OIDC authentication
+
+### Access Control Flags
+- **`allow_root`**: `false` - Allow organization owner to bypass authentication restrictions
+- **`domains_only`**: `false` - Restrict access to verified email domains only
+- **`auto_join`**: `false` - Allow users with verified email domains to automatically join the organization (when `true`)
+
+## Default Behavior
+
+### When flags is `null` or missing
+All flags default to their specified default values above.
+
+### When flags is partially populated
+- Flags explicitly set to `null` use the default value
+- Flags with non-null values use those values
+- Missing flags use the default value
+
+### Example
+```json
+{
+ "flags": {
+ "is_demo": true,
+ "is_personal": false
+ // All other flags default as specified above
+ }
+}
+```
+This would result in:
+- `is_demo`: `true` (explicit)
+- `is_personal`: `false` (explicit)
+- `allow_email`: defaults to `env.auth.email_enabled`
+- `allow_social`: defaults to `env.auth.oidc_enabled`
+- `allow_sso`: `false` (default)
+- `allow_root`: `false` (default)
+- `domains_only`: `false` (default)
+- `auto_join`: `false` (default)
+
+## Implementation Notes
+
+### Backend
+- Auth service uses `.get(key, default_value)` pattern for all flags
+- See: `api/oss/src/core/auth/service.py`
+
+### Frontend
+- UI components use `?? default_value` pattern for all flags
+- See: `web/oss/src/components/pages/settings/Organization/index.tsx`
+
+### Safety Mechanisms
+- If all authentication methods are disabled (`allow_email`, `allow_social`, `allow_sso` all `false`), the system automatically enables `allow_root` to prevent complete lockout
+- A confirmation dialog warns users when attempting to disable all auth methods
+
+## Related Files
+
+### Backend
+- `api/ee/src/models/api/organization_models.py` - API models
+- `api/oss/src/core/auth/service.py` - Authentication service with flag logic
+- `api/ee/src/services/db_manager_ee.py` - Organization update logic with validation
+- `api/ee/src/routers/organization_router.py` - Organization API endpoints
+
+### Frontend
+- `web/oss/src/components/pages/settings/Organization/index.tsx` - Organization settings UI
+- `web/oss/src/services/organization/api/index.ts` - API client functions
+- `web/oss/src/lib/Types.ts` - TypeScript type definitions
diff --git a/api/ee/src/apis/fastapi/billing/router.py b/api/ee/src/apis/fastapi/billing/router.py
index 6917870490..0c1f2ff64c 100644
--- a/api/ee/src/apis/fastapi/billing/router.py
+++ b/api/ee/src/apis/fastapi/billing/router.py
@@ -263,7 +263,7 @@ async def handle_events(
organization_id = metadata.get("organization_id")
log.info(
- "Stripe event: %s | %s | %s",
+ "[billing] [stripe] %s | %s | %s",
organization_id,
stripe_event.type,
target,
@@ -446,7 +446,7 @@ async def create_checkout(
)
user = await get_user_with_id(
- user_id=organization.owner,
+ user_id=str(organization.owner_id),
)
if not user:
diff --git a/api/oss/src/services/security/__init__.py b/api/ee/src/apis/fastapi/organizations/__init__.py
similarity index 100%
rename from api/oss/src/services/security/__init__.py
rename to api/ee/src/apis/fastapi/organizations/__init__.py
diff --git a/api/ee/src/apis/fastapi/organizations/models.py b/api/ee/src/apis/fastapi/organizations/models.py
new file mode 100644
index 0000000000..d113d0cbb7
--- /dev/null
+++ b/api/ee/src/apis/fastapi/organizations/models.py
@@ -0,0 +1,84 @@
+"""API models for organization security features (domains and SSO providers)."""
+
+from typing import Optional
+from datetime import datetime
+from pydantic import BaseModel, Field
+
+
+# Domain Verification Models
+class OrganizationDomainCreate(BaseModel):
+ """Request model for creating a domain."""
+
+ domain: str = Field(..., description="Domain name to verify (e.g., 'company.com')")
+ name: Optional[str] = Field(None, description="Friendly name for the domain")
+ description: Optional[str] = Field(None, description="Optional description")
+
+
+class OrganizationDomainVerify(BaseModel):
+ """Request model for verifying a domain."""
+
+ domain_id: str = Field(..., description="ID of the domain to verify")
+
+
+class OrganizationDomainResponse(BaseModel):
+ """Response model for a domain."""
+
+ id: str
+ organization_id: str
+ slug: str # The actual domain (e.g., "company.com")
+ name: Optional[str]
+ description: Optional[str]
+ token: Optional[str] # Verification token
+ flags: dict # Contains is_verified flag
+ created_at: datetime
+ updated_at: Optional[datetime]
+
+ class Config:
+ from_attributes = True
+
+
+# SSO Provider Models
+class OrganizationProviderCreate(BaseModel):
+ """Request model for creating an SSO provider."""
+
+ slug: str = Field(
+ ...,
+ description="Provider slug (lowercase letters and hyphens only)",
+ pattern="^[a-z-]+$",
+ )
+ name: Optional[str] = Field(None, description="Friendly name for the provider")
+ description: Optional[str] = Field(None, description="Optional description")
+ settings: dict = Field(
+ ...,
+ description="Provider settings (client_id, client_secret, issuer_url, scopes)",
+ )
+ flags: Optional[dict] = Field(
+ default=None, description="Provider flags (is_active, is_valid)"
+ )
+
+
+class OrganizationProviderUpdate(BaseModel):
+ """Request model for updating an SSO provider."""
+
+ slug: Optional[str] = Field(None, description="Provider slug", pattern="^[a-z-]+$")
+ name: Optional[str] = None
+ description: Optional[str] = None
+ settings: Optional[dict] = None
+ flags: Optional[dict] = None
+
+
+class OrganizationProviderResponse(BaseModel):
+ """Response model for an SSO provider."""
+
+ id: str
+ organization_id: str
+ slug: str # Provider identifier
+ name: Optional[str]
+ description: Optional[str]
+ settings: dict # Contains client_id, client_secret, issuer_url, scopes
+ flags: dict # Contains is_valid, is_active
+ created_at: datetime
+ updated_at: Optional[datetime]
+
+ class Config:
+ from_attributes = True
diff --git a/api/ee/src/apis/fastapi/organizations/router.py b/api/ee/src/apis/fastapi/organizations/router.py
new file mode 100644
index 0000000000..458e2abe4b
--- /dev/null
+++ b/api/ee/src/apis/fastapi/organizations/router.py
@@ -0,0 +1,283 @@
+"""FastAPI router for organization security features."""
+
+from typing import List
+from fastapi import APIRouter, Request, HTTPException
+from fastapi.responses import JSONResponse, Response
+
+from ee.src.apis.fastapi.organizations.models import (
+ OrganizationDomainCreate,
+ OrganizationDomainVerify,
+ OrganizationDomainResponse,
+ OrganizationProviderCreate,
+ OrganizationProviderUpdate,
+ OrganizationProviderResponse,
+)
+from ee.src.services.organization_security_service import (
+ DomainVerificationService,
+ SSOProviderService,
+)
+from ee.src.services import db_manager_ee
+from ee.src.utils.permissions import check_user_org_access
+from ee.src.services.selectors import get_user_org_and_workspace_id
+
+
+router = APIRouter()
+domain_service = DomainVerificationService()
+provider_service = SSOProviderService()
+
+
+async def verify_user_org_access(user_id: str, organization_id: str) -> None:
+ """Helper to verify user has access to organization."""
+ user_org_data = await get_user_org_and_workspace_id(user_id)
+ has_access = await check_user_org_access(user_org_data, organization_id)
+ if not has_access:
+ raise HTTPException(
+ status_code=403, detail="You do not have access to this organization"
+ )
+
+
+async def require_email_or_social_or_root_enabled(organization_id: str) -> None:
+ """Block domain/provider changes when SSO is the only allowed method."""
+ organization = await db_manager_ee.get_organization(organization_id)
+ flags = organization.flags or {}
+ allow_email = flags.get("allow_email", False)
+ allow_social = flags.get("allow_social", False)
+ allow_root = flags.get("allow_root", False)
+ if not (allow_email or allow_social or allow_root):
+ raise HTTPException(
+ status_code=400,
+ detail=(
+ "To modify domains or SSO providers, enable email or social authentication "
+ "for this organization, or enable root access for owners."
+ ),
+ )
+
+
+async def require_domains_and_auto_join_disabled(organization_id: str) -> None:
+ """Block edits to verified domains when domains-only or auto-join is enabled."""
+ organization = await db_manager_ee.get_organization(organization_id)
+ flags = organization.flags or {}
+ if flags.get("domains_only") or flags.get("auto_join"):
+ raise HTTPException(
+ status_code=400,
+ detail=(
+ "Disable domains-only and auto-join before modifying verified domains."
+ ),
+ )
+
+
+# Domain Verification Endpoints
+
+
+@router.post("/domains", response_model=OrganizationDomainResponse, status_code=201)
+async def create_domain(
+ payload: OrganizationDomainCreate,
+ request: Request,
+):
+ """
+ Create a new domain for verification.
+
+ This endpoint initiates the domain verification process by:
+ 1. Creating a domain record
+ 2. Generating a unique verification token
+ 3. Returning DNS configuration instructions
+
+ The user must add a DNS TXT record to verify ownership.
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+
+ domain = await domain_service.create_domain(organization_id, payload, user_id)
+
+ return JSONResponse(
+ status_code=201,
+ content=domain.model_dump(mode="json"),
+ )
+
+
+@router.post("/domains/verify", response_model=OrganizationDomainResponse)
+async def verify_domain(
+ payload: OrganizationDomainVerify,
+ request: Request,
+):
+ """
+ Verify domain ownership via DNS TXT record.
+
+ This endpoint checks for the presence of the verification TXT record
+ and marks the domain as verified if found.
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_domains_and_auto_join_disabled(organization_id)
+
+ return await domain_service.verify_domain(
+ organization_id, payload.domain_id, user_id
+ )
+
+
+@router.get("/domains", response_model=List[OrganizationDomainResponse])
+async def list_domains(
+ request: Request,
+):
+ """List all domains for the organization."""
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+
+ return await domain_service.list_domains(organization_id)
+
+
+@router.post("/domains/{domain_id}/refresh", response_model=OrganizationDomainResponse)
+async def refresh_domain_token(
+ domain_id: str,
+ request: Request,
+):
+ """
+ Refresh the verification token for an unverified domain.
+
+ Generates a new token and resets the 48-hour expiry window.
+ This is useful when the original token has expired.
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_domains_and_auto_join_disabled(organization_id)
+
+ return await domain_service.refresh_token(organization_id, domain_id, user_id)
+
+
+@router.post("/domains/{domain_id}/reset", response_model=OrganizationDomainResponse)
+async def reset_domain(
+ domain_id: str,
+ request: Request,
+):
+ """
+ Reset a verified domain to unverified state for re-verification.
+
+ Generates a new token and marks the domain as unverified.
+ This allows re-verification of already verified domains.
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_domains_and_auto_join_disabled(organization_id)
+
+ return await domain_service.reset_domain(organization_id, domain_id, user_id)
+
+
+@router.delete("/domains/{domain_id}", status_code=204)
+async def delete_domain(
+ domain_id: str,
+ request: Request,
+):
+ """Delete a domain."""
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_domains_and_auto_join_disabled(organization_id)
+
+ await domain_service.delete_domain(organization_id, domain_id, user_id)
+ return Response(status_code=204)
+
+
+# SSO Provider Endpoints
+
+
+@router.post("/providers", response_model=OrganizationProviderResponse, status_code=201)
+async def create_provider(
+ payload: OrganizationProviderCreate,
+ request: Request,
+):
+ """
+ Create a new SSO provider configuration.
+
+ Supported provider types:
+ - oidc: OpenID Connect
+ - saml: SAML 2.0 (coming soon)
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_email_or_social_or_root_enabled(organization_id)
+
+ return await provider_service.create_provider(organization_id, payload, user_id)
+
+
+@router.patch("/providers/{provider_id}", response_model=OrganizationProviderResponse)
+async def update_provider(
+ provider_id: str,
+ payload: OrganizationProviderUpdate,
+ request: Request,
+):
+ """Update an SSO provider configuration."""
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_email_or_social_or_root_enabled(organization_id)
+
+ return await provider_service.update_provider(
+ organization_id, provider_id, payload, user_id
+ )
+
+
+@router.get("/providers", response_model=List[OrganizationProviderResponse])
+async def list_providers(
+ request: Request,
+):
+ """List all SSO providers for the organization."""
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+
+ return await provider_service.list_providers(organization_id)
+
+
+@router.post(
+ "/providers/{provider_id}/test", response_model=OrganizationProviderResponse
+)
+async def test_provider(
+ provider_id: str,
+ request: Request,
+):
+ """
+ Test SSO provider connection.
+
+ This endpoint tests the OIDC provider configuration by fetching the
+ discovery document and validating required endpoints exist.
+ If successful, marks the provider as valid (is_valid=true).
+ If failed, marks as invalid and deactivates (is_valid=false, is_active=false).
+ """
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_email_or_social_or_root_enabled(organization_id)
+
+ return await provider_service.test_provider(organization_id, provider_id, user_id)
+
+
+@router.delete("/providers/{provider_id}", status_code=204)
+async def delete_provider(
+ provider_id: str,
+ request: Request,
+):
+ """Delete an SSO provider configuration."""
+ organization_id = request.state.organization_id
+ user_id = request.state.user_id
+
+ await verify_user_org_access(user_id, organization_id)
+ await require_email_or_social_or_root_enabled(organization_id)
+
+ await provider_service.delete_provider(organization_id, provider_id, user_id)
+ return Response(status_code=204)
diff --git a/api/ee/src/core/organizations/__init__.py b/api/ee/src/core/organizations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/core/organizations/types.py b/api/ee/src/core/organizations/types.py
new file mode 100644
index 0000000000..639ba5065f
--- /dev/null
+++ b/api/ee/src/core/organizations/types.py
@@ -0,0 +1,79 @@
+from datetime import datetime
+from uuid import UUID
+from pydantic import BaseModel
+from typing import Optional, Dict, Any
+
+
+# ============================================================================
+# ORGANIZATION DOMAINS
+# ============================================================================
+
+
+class OrganizationDomain(BaseModel):
+ id: UUID
+ organization_id: UUID
+ slug: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ token: Optional[str] = None
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+ created_at: datetime
+ updated_at: Optional[datetime] = None
+
+ class Config:
+ from_attributes = True
+
+
+class OrganizationDomainCreate(BaseModel):
+ organization_id: UUID
+ slug: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ token: Optional[str] = None
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+
+
+# ============================================================================
+# ORGANIZATION PROVIDERS
+# ============================================================================
+
+
+class OrganizationProvider(BaseModel):
+ id: UUID
+ organization_id: UUID
+ slug: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ settings: Dict[str, Any]
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+ created_at: datetime
+ updated_at: Optional[datetime] = None
+
+ class Config:
+ from_attributes = True
+
+
+class OrganizationProviderCreate(BaseModel):
+ organization_id: UUID
+ slug: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ settings: Dict[str, Any]
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+
+
+class OrganizationProviderUpdate(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+ settings: Optional[Dict[str, Any]] = None
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
diff --git a/api/ee/src/core/subscriptions/service.py b/api/ee/src/core/subscriptions/service.py
index 5247cc283f..ffdc35c3c6 100644
--- a/api/ee/src/core/subscriptions/service.py
+++ b/api/ee/src/core/subscriptions/service.py
@@ -153,6 +153,39 @@ async def start_reverse_trial(
return subscription
+ async def start_free_plan(
+ self,
+ *,
+ organization_id: str,
+ ) -> Optional[SubscriptionDTO]:
+ """Start a free/hobby plan for an organization without trial.
+
+ Args:
+ organization_id: The organization ID
+
+ Returns:
+ SubscriptionDTO: The created subscription or None if already exists
+ """
+ now = datetime.now(tz=timezone.utc)
+
+ subscription = await self.read(organization_id=organization_id)
+
+ if subscription:
+ return None
+
+ subscription = await self.create(
+ subscription=SubscriptionDTO(
+ organization_id=organization_id,
+ plan=FREE_PLAN,
+ active=True,
+ anchor=now.day,
+ )
+ )
+
+ log.info("✓ Free plan started for organization %s", organization_id)
+
+ return subscription
+
async def process_event(
self,
*,
@@ -165,7 +198,7 @@ async def process_event(
**kwargs,
) -> SubscriptionDTO:
log.info(
- "Billing event: %s | %s | %s",
+ "[billing] [internal] %s | %s | %s",
organization_id,
event,
plan,
diff --git a/api/ee/src/crons/meters.sh b/api/ee/src/crons/meters.sh
index e5144d6947..f04812643d 100644
--- a/api/ee/src/crons/meters.sh
+++ b/api/ee/src/crons/meters.sh
@@ -1,7 +1,8 @@
#!/bin/sh
set -eu
-AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-)
+AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2- || true)
+AGENTA_AUTH_KEY="${AGENTA_AUTH_KEY:-replace-me}"
echo "--------------------------------------------------------"
echo "[$(date)] meters.sh running from cron" >> /proc/1/fd/1
@@ -36,4 +37,4 @@ else
fi
fi
-echo "[$(date)] meters.sh done" >> /proc/1/fd/1
\ No newline at end of file
+echo "[$(date)] meters.sh done" >> /proc/1/fd/1
diff --git a/api/ee/src/dbs/postgres/organizations/__init__.py b/api/ee/src/dbs/postgres/organizations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/organizations/dao.py b/api/ee/src/dbs/postgres/organizations/dao.py
new file mode 100644
index 0000000000..faf8acad6d
--- /dev/null
+++ b/api/ee/src/dbs/postgres/organizations/dao.py
@@ -0,0 +1,399 @@
+"""Data Access Objects for organization domains and SSO providers."""
+
+from typing import Optional, List
+from sqlalchemy import select, and_
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from oss.src.dbs.postgres.shared.engine import engine
+from ee.src.dbs.postgres.organizations.dbes import (
+ OrganizationDomainDBE,
+ OrganizationProviderDBE,
+)
+
+
+class OrganizationDomainsDAO:
+ """DAO for organization_domains table.
+
+ Can be used in two ways:
+ 1. With a session (for service layer): OrganizationDomainsDAO(session)
+ 2. Without a session (creates own sessions): OrganizationDomainsDAO()
+ """
+
+ def __init__(self, session: Optional[AsyncSession] = None):
+ self.session = session
+
+ async def create(
+ self,
+ organization_id: str,
+ slug: str,
+ name: Optional[str],
+ description: Optional[str],
+ token: str,
+ created_by_id: str,
+ ) -> OrganizationDomainDBE:
+ """Create a new domain for an organization."""
+ if self.session:
+ domain = OrganizationDomainDBE(
+ organization_id=organization_id,
+ slug=slug,
+ name=name,
+ description=description,
+ token=token,
+ flags={"is_verified": False},
+ created_by_id=created_by_id,
+ )
+ self.session.add(domain)
+ await self.session.flush()
+ await self.session.refresh(domain)
+ return domain
+ else:
+ async with engine.core_session() as session:
+ domain = OrganizationDomainDBE(
+ organization_id=organization_id,
+ slug=slug,
+ name=name,
+ description=description,
+ token=token,
+ flags={"is_verified": False},
+ created_by_id=created_by_id,
+ )
+ session.add(domain)
+ await session.commit()
+ await session.refresh(domain)
+ return domain
+
+ async def get_by_id(
+ self, domain_id: str, organization_id: str
+ ) -> Optional[OrganizationDomainDBE]:
+ """Get a domain by ID."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.id == domain_id,
+ OrganizationDomainDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.id == domain_id,
+ OrganizationDomainDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+
+ async def get_by_slug(
+ self, slug: str, organization_id: str
+ ) -> Optional[OrganizationDomainDBE]:
+ """Get a domain by slug (domain name)."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.slug == slug,
+ OrganizationDomainDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.slug == slug,
+ OrganizationDomainDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+
+ async def get_verified_by_slug(self, slug: str) -> Optional[OrganizationDomainDBE]:
+ """Get a verified domain by slug (domain name), across organizations."""
+ is_verified = OrganizationDomainDBE.flags["is_verified"].astext == "true"
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.slug == slug,
+ is_verified,
+ )
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDomainDBE).where(
+ and_(
+ OrganizationDomainDBE.slug == slug,
+ is_verified,
+ )
+ )
+ )
+ return result.scalars().first()
+
+ async def list_by_organization(
+ self, organization_id: str
+ ) -> List[OrganizationDomainDBE]:
+ """List all domains for an organization."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationDomainDBE).where(
+ OrganizationDomainDBE.organization_id == organization_id
+ )
+ )
+ return list(result.scalars().all())
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDomainDBE).where(
+ OrganizationDomainDBE.organization_id == organization_id
+ )
+ )
+ return list(result.scalars().all())
+
+ async def update_flags(
+ self, domain_id: str, flags: dict, updated_by_id: str
+ ) -> Optional[OrganizationDomainDBE]:
+ """Update domain flags (e.g., mark as verified)."""
+ domain = await self.get_by_id(domain_id, organization_id="")
+ if self.session:
+ if domain:
+ domain.flags = flags
+ domain.updated_by_id = updated_by_id
+ await self.session.flush()
+ await self.session.refresh(domain)
+ return domain
+ else:
+ async with engine.core_session() as session:
+ if domain:
+ # Re-attach to new session
+ domain = await session.get(OrganizationDomainDBE, domain_id)
+ if domain:
+ domain.flags = flags
+ domain.updated_by_id = updated_by_id
+ await session.commit()
+ await session.refresh(domain)
+ return domain
+
+ async def delete(self, domain_id: str, deleted_by_id: str) -> bool:
+ """Hard delete a domain."""
+ if self.session:
+ domain = await self.session.get(OrganizationDomainDBE, domain_id)
+ if domain:
+ await self.session.delete(domain)
+ await self.session.flush()
+ return True
+ return False
+ else:
+ async with engine.core_session() as session:
+ domain = await session.get(OrganizationDomainDBE, domain_id)
+ if domain:
+ await session.delete(domain)
+ await session.commit()
+ return True
+ return False
+
+
+class OrganizationProvidersDAO:
+ """DAO for organization_providers table.
+
+ Can be used in two ways:
+ 1. With a session (for service layer): OrganizationProvidersDAO(session)
+ 2. Without a session (creates own sessions): OrganizationProvidersDAO()
+ """
+
+ def __init__(self, session: Optional[AsyncSession] = None):
+ self.session = session
+
+ async def create(
+ self,
+ organization_id: str,
+ slug: str,
+ secret_id: str,
+ created_by_id: str,
+ name: Optional[str],
+ description: Optional[str] = None,
+ flags: Optional[dict] = None,
+ ) -> OrganizationProviderDBE:
+ """Create a new SSO provider for an organization."""
+ if self.session:
+ provider = OrganizationProviderDBE(
+ organization_id=organization_id,
+ slug=slug,
+ name=name,
+ description=description,
+ secret_id=secret_id,
+ flags=flags or {"is_active": True, "is_valid": False},
+ created_by_id=created_by_id,
+ )
+ self.session.add(provider)
+ await self.session.flush()
+ await self.session.refresh(provider)
+ return provider
+ else:
+ async with engine.core_session() as session:
+ provider = OrganizationProviderDBE(
+ organization_id=organization_id,
+ slug=slug,
+ name=name,
+ description=description,
+ secret_id=secret_id,
+ flags=flags or {"is_active": True, "is_valid": False},
+ created_by_id=created_by_id,
+ )
+ session.add(provider)
+ await session.commit()
+ await session.refresh(provider)
+ return provider
+
+ async def get_by_id(
+ self, provider_id: str, organization_id: str
+ ) -> Optional[OrganizationProviderDBE]:
+ """Get a provider by ID."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationProviderDBE).where(
+ and_(
+ OrganizationProviderDBE.id == provider_id,
+ OrganizationProviderDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationProviderDBE).where(
+ and_(
+ OrganizationProviderDBE.id == provider_id,
+ OrganizationProviderDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+
+ async def get_by_id_any(
+ self, provider_id: str
+ ) -> Optional[OrganizationProviderDBE]:
+ """Get a provider by ID without organization scoping."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationProviderDBE).where(
+ OrganizationProviderDBE.id == provider_id
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationProviderDBE).where(
+ OrganizationProviderDBE.id == provider_id
+ )
+ )
+ return result.scalars().first()
+
+ async def get_by_slug(
+ self, slug: str, organization_id: str
+ ) -> Optional[OrganizationProviderDBE]:
+ """Get a provider by slug."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationProviderDBE).where(
+ and_(
+ OrganizationProviderDBE.slug == slug,
+ OrganizationProviderDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationProviderDBE).where(
+ and_(
+ OrganizationProviderDBE.slug == slug,
+ OrganizationProviderDBE.organization_id == organization_id,
+ )
+ )
+ )
+ return result.scalars().first()
+
+ async def list_by_organization(
+ self, organization_id: str
+ ) -> List[OrganizationProviderDBE]:
+ """List all SSO providers for an organization."""
+ if self.session:
+ result = await self.session.execute(
+ select(OrganizationProviderDBE).where(
+ OrganizationProviderDBE.organization_id == organization_id
+ )
+ )
+ return list(result.scalars().all())
+ else:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationProviderDBE).where(
+ OrganizationProviderDBE.organization_id == organization_id
+ )
+ )
+ return list(result.scalars().all())
+
+ async def update(
+ self,
+ provider_id: str,
+ secret_id: Optional[str] = None,
+ flags: Optional[dict] = None,
+ updated_by_id: Optional[str] = None,
+ ) -> Optional[OrganizationProviderDBE]:
+ """Update a provider's secret reference or flags."""
+ if self.session:
+ provider = await self.session.get(OrganizationProviderDBE, provider_id)
+ if provider:
+ if secret_id is not None:
+ provider.secret_id = secret_id
+ if flags is not None:
+ provider.flags = flags
+ if updated_by_id:
+ provider.updated_by_id = updated_by_id
+ await self.session.flush()
+ await self.session.refresh(provider)
+ return provider
+ else:
+ async with engine.core_session() as session:
+ provider = await session.get(OrganizationProviderDBE, provider_id)
+ if provider:
+ if secret_id is not None:
+ provider.secret_id = secret_id
+ if flags is not None:
+ provider.flags = flags
+ if updated_by_id:
+ provider.updated_by_id = updated_by_id
+ await session.commit()
+ await session.refresh(provider)
+ return provider
+
+ async def delete(self, provider_id: str, deleted_by_id: str) -> bool:
+ """Hard delete a provider."""
+ if self.session:
+ provider = await self.session.get(OrganizationProviderDBE, provider_id)
+ if provider:
+ await self.session.delete(provider)
+ await self.session.flush()
+ return True
+ return False
+ else:
+ async with engine.core_session() as session:
+ provider = await session.get(OrganizationProviderDBE, provider_id)
+ if provider:
+ await session.delete(provider)
+ await session.commit()
+ return True
+ return False
diff --git a/api/ee/src/dbs/postgres/organizations/dbas.py b/api/ee/src/dbs/postgres/organizations/dbas.py
new file mode 100644
index 0000000000..31fcc1cd5f
--- /dev/null
+++ b/api/ee/src/dbs/postgres/organizations/dbas.py
@@ -0,0 +1,81 @@
+import uuid_utils.compat as uuid
+from sqlalchemy import Column, String, UUID
+from sqlalchemy.dialects.postgresql import JSONB
+
+from oss.src.dbs.postgres.shared.dbas import (
+ LifecycleDBA,
+ HeaderDBA,
+ OrganizationScopeDBA,
+)
+
+
+class OrganizationDomainDBA(OrganizationScopeDBA, LifecycleDBA):
+ __abstract__ = True
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ slug = Column(
+ String,
+ nullable=False,
+ )
+ name = Column(
+ String,
+ nullable=False,
+ )
+ description = Column(
+ String,
+ nullable=True,
+ )
+ token = Column(
+ String,
+ nullable=True,
+ )
+ flags = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
+ tags = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
+ meta = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
+
+
+class OrganizationProviderDBA(OrganizationScopeDBA, HeaderDBA, LifecycleDBA):
+ __abstract__ = True
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ slug = Column(
+ String,
+ nullable=False,
+ )
+ secret_id = Column(
+ UUID(as_uuid=True),
+ nullable=False,
+ )
+ flags = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
+ tags = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
+ meta = Column(
+ JSONB(none_as_null=True),
+ nullable=True,
+ )
diff --git a/api/ee/src/dbs/postgres/organizations/dbes.py b/api/ee/src/dbs/postgres/organizations/dbes.py
new file mode 100644
index 0000000000..84abab7703
--- /dev/null
+++ b/api/ee/src/dbs/postgres/organizations/dbes.py
@@ -0,0 +1,70 @@
+from sqlalchemy import (
+ ForeignKeyConstraint,
+ UniqueConstraint,
+ Index,
+ text,
+)
+
+from oss.src.dbs.postgres.shared.base import Base
+from ee.src.dbs.postgres.organizations.dbas import (
+ OrganizationDomainDBA,
+ OrganizationProviderDBA,
+)
+
+
+class OrganizationDomainDBE(Base, OrganizationDomainDBA):
+ __tablename__ = "organization_domains"
+
+ __table_args__ = (
+ ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ondelete="CASCADE",
+ ),
+ Index(
+ "uq_organization_domains_slug_verified",
+ "slug",
+ unique=True,
+ postgresql_where=text("(flags->>'is_verified') = 'true'"),
+ ),
+ Index(
+ "ix_organization_domains_org",
+ "organization_id",
+ ),
+ Index(
+ "ix_organization_domains_flags",
+ "flags",
+ postgresql_using="gin",
+ ),
+ )
+
+
+class OrganizationProviderDBE(Base, OrganizationProviderDBA):
+ __tablename__ = "organization_providers"
+
+ __table_args__ = (
+ ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ondelete="CASCADE",
+ ),
+ ForeignKeyConstraint(
+ ["secret_id"],
+ ["secrets.id"],
+ ondelete="CASCADE",
+ ),
+ UniqueConstraint(
+ "organization_id",
+ "slug",
+ name="uq_organization_providers_org_slug",
+ ),
+ Index(
+ "ix_organization_providers_org",
+ "organization_id",
+ ),
+ Index(
+ "ix_organization_providers_flags",
+ "flags",
+ postgresql_using="gin",
+ ),
+ )
diff --git a/api/ee/src/main.py b/api/ee/src/main.py
index 036bda6f0f..499ef9137d 100644
--- a/api/ee/src/main.py
+++ b/api/ee/src/main.py
@@ -11,6 +11,10 @@
from ee.src.core.subscriptions.service import SubscriptionsService
from ee.src.apis.fastapi.billing.router import SubscriptionsRouter
+from ee.src.apis.fastapi.organizations.router import (
+ router as organization_security_router,
+)
+from oss.src.apis.fastapi.auth.router import auth_router
# DBS --------------------------------------------------------------------------
@@ -56,6 +60,14 @@ def extend_main(app: FastAPI):
# ROUTES (more) ------------------------------------------------------------
+ # Register security router BEFORE organization router to avoid route conflicts
+ # (specific routes must come before catch-all /{organization_id} route)
+ app.include_router(
+ organization_security_router,
+ prefix="/organizations",
+ tags=["Organizations", "Security"],
+ )
+
app.include_router(
organization_router.router,
prefix="/organizations",
@@ -66,6 +78,14 @@ def extend_main(app: FastAPI):
prefix="/workspaces",
)
+ # Auth router at root level (no /api prefix) for OAuth callbacks
+ app.include_router(
+ auth_router,
+ prefix="/auth",
+ tags=["Auth"],
+ include_in_schema=False,
+ )
+
# --------------------------------------------------------------------------
return app
diff --git a/api/ee/src/models/api/organization_models.py b/api/ee/src/models/api/organization_models.py
index 1002d81b5a..7d1a60735f 100644
--- a/api/ee/src/models/api/organization_models.py
+++ b/api/ee/src/models/api/organization_models.py
@@ -1,32 +1,51 @@
-from typing import Optional, List
+from typing import Optional, List, Dict, Any
+from uuid import UUID
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, ConfigDict
class Organization(BaseModel):
- id: str
- name: str
- description: str
- type: Optional[str] = None
- owner: str
- workspaces: List[str] = Field(default_factory=list)
+ model_config = ConfigDict(from_attributes=True)
+ id: UUID
+ slug: Optional[str] = None
+ #
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+ #
+ owner_id: UUID
+ #
members: List[str] = Field(default_factory=list)
invitations: List = Field(default_factory=list)
+ workspaces: List[str] = Field(default_factory=list)
class CreateOrganization(BaseModel):
- name: str
- owner: str
+ name: Optional[str] = None
description: Optional[str] = None
- type: Optional[str] = None
+ #
+ is_demo: bool = False
+ is_personal: bool = False
+ #
+ owner_id: UUID
class OrganizationUpdate(BaseModel):
+ slug: Optional[str] = None
name: Optional[str] = None
description: Optional[str] = None
+ flags: Optional[Dict[str, Any]] = None
updated_at: Optional[str] = None
class OrganizationOutput(BaseModel):
id: str
- name: str
+ name: Optional[str] = None
+
+
+class CreateCollaborativeOrganization(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
diff --git a/api/ee/src/models/api/workspace_models.py b/api/ee/src/models/api/workspace_models.py
index 56218eb38a..56f5768681 100644
--- a/api/ee/src/models/api/workspace_models.py
+++ b/api/ee/src/models/api/workspace_models.py
@@ -25,7 +25,7 @@ class WorkspaceMemberResponse(BaseModel):
class Workspace(BaseModel):
id: Optional[str] = None
- name: str
+ name: Optional[str] = None
description: Optional[str] = None
type: Optional[str]
members: Optional[List[WorkspaceMember]] = None
@@ -33,7 +33,7 @@ class Workspace(BaseModel):
class WorkspaceResponse(TimestampModel):
id: str
- name: str
+ name: Optional[str] = None
description: Optional[str] = None
type: Optional[str]
organization: str
@@ -41,7 +41,7 @@ class WorkspaceResponse(TimestampModel):
class CreateWorkspace(BaseModel):
- name: str
+ name: Optional[str] = None
description: Optional[str] = None
type: Optional[str] = None
diff --git a/api/ee/src/models/db_models.py b/api/ee/src/models/db_models.py
index fb57520c92..bc78201a61 100644
--- a/api/ee/src/models/db_models.py
+++ b/api/ee/src/models/db_models.py
@@ -28,6 +28,11 @@ class OrganizationMemberDB(Base):
UUID(as_uuid=True),
ForeignKey("organizations.id", ondelete="CASCADE"),
)
+ role = Column(
+ String,
+ nullable=False,
+ server_default="member",
+ )
user = relationship(
"oss.src.models.db_models.UserDB",
diff --git a/api/ee/src/models/extended/deprecated_models.py b/api/ee/src/models/extended/deprecated_models.py
index c68a07e851..79993c6089 100644
--- a/api/ee/src/models/extended/deprecated_models.py
+++ b/api/ee/src/models/extended/deprecated_models.py
@@ -78,6 +78,32 @@ class UserOrganizationDB(DeprecatedBase):
organization_id = Column(UUID(as_uuid=True), ForeignKey("organizations.id"))
+class DeprecatedOrganizationDB(DeprecatedBase):
+ """
+ Deprecated OrganizationDB model with 'owner' field.
+ Used by migrations that ran before the schema was changed to use 'owner_id'.
+ """
+
+ __tablename__ = "organizations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ name = Column(String)
+ owner = Column(String) # Deprecated: replaced by owner_id (UUID)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+
class OldInvitationDB(DeprecatedBase):
__tablename__ = "invitations"
__table_args__ = {"extend_existing": True}
diff --git a/api/ee/src/routers/organization_router.py b/api/ee/src/routers/organization_router.py
index 91302c9b7e..2f205dd04d 100644
--- a/api/ee/src/routers/organization_router.py
+++ b/api/ee/src/routers/organization_router.py
@@ -21,12 +21,24 @@
check_rbac_permission,
)
from ee.src.models.api.organization_models import (
+ Organization,
OrganizationUpdate,
OrganizationOutput,
+ CreateCollaborativeOrganization,
)
from ee.src.services.organization_service import (
update_an_organization,
get_organization_details,
+ transfer_organization_ownership as transfer_ownership_service,
+)
+from ee.src.services.organization_security_service import SSOProviderService
+from ee.src.dbs.postgres.organizations.dao import (
+ OrganizationDomainsDAO,
+)
+from ee.src.core.organizations.types import (
+ OrganizationDomainCreate,
+ OrganizationProviderCreate,
+ OrganizationProviderUpdate,
)
@@ -68,30 +80,46 @@ async def fetch_organization_details(
"""
try:
- workspace_id = await db_manager_ee.get_default_workspace_id_from_organization(
- organization_id=organization_id
- )
+ # Get workspace and project IDs for permission checking
+ workspace_id = None
+ project_id = None
+ try:
+ workspace_id = (
+ await db_manager_ee.get_default_workspace_id_from_organization(
+ organization_id=organization_id
+ )
+ )
+ project_id = await db_manager.get_default_project_id_from_workspace(
+ workspace_id=workspace_id
+ )
+ except Exception:
+ # Organization has no workspace or project - check org-level permission directly
+ log.warning(
+ f"Organization {organization_id} has no workspace or project, checking org-level access",
+ exc_info=True,
+ )
- project_id = await db_manager.get_default_project_id_from_workspace(
- workspace_id=workspace_id
- )
+ # If we have a project, check project membership
+ if project_id:
+ project_memberships = (
+ await db_manager_ee.fetch_project_memberships_by_user_id(
+ user_id=str(request.state.user_id)
+ )
+ )
- project_memberships = await db_manager_ee.fetch_project_memberships_by_user_id(
- user_id=str(request.state.user_id)
- )
+ membership = None
+ for project_membership in project_memberships:
+ if str(project_membership.project_id) == project_id:
+ membership = project_membership
+ break
- membership = None
- for project_membership in project_memberships:
- if str(project_membership.project_id) == project_id:
- membership = project_membership
- break
-
- if not membership:
- return JSONResponse(
- status_code=403,
- content={"detail": "You do not have access to this organization"},
- )
+ if not membership:
+ return JSONResponse(
+ status_code=403,
+ content={"detail": "You do not have access to this organization"},
+ )
+ # Check org-level access
user_org_workspace_data = await get_user_org_and_workspace_id(
request.state.user_id
)
@@ -118,15 +146,29 @@ async def fetch_organization_details(
)
-@router.put("/{organization_id}/", operation_id="update_organization")
+@router.put(
+ "/{organization_id}/",
+ operation_id="update_organization",
+ response_model=Organization,
+)
+@router.patch(
+ "/{organization_id}/",
+ operation_id="patch_organization",
+ response_model=Organization,
+)
async def update_organization(
organization_id: str,
payload: OrganizationUpdate,
request: Request,
):
- if not payload.name and not payload.description:
+ if (
+ not payload.slug
+ and not payload.name
+ and not payload.description
+ and not payload.flags
+ ):
return JSONResponse(
- {"detail": "Please provide a name or description to update"},
+ {"detail": "Please provide a field to update"},
status_code=400,
)
@@ -147,7 +189,23 @@ async def update_organization(
return organization
+ except ValueError as e:
+ # Slug validation errors (format, immutability, personal org, etc.)
+ return JSONResponse(
+ {"detail": str(e)},
+ status_code=400,
+ )
except Exception as e:
+ # Check for unique constraint violation (duplicate slug)
+ from sqlalchemy.exc import IntegrityError
+
+ if isinstance(e, IntegrityError) and "uq_organizations_slug" in str(e):
+ return JSONResponse(
+ {
+ "detail": "Slug already in use. Please select another slug or contact your administrator."
+ },
+ status_code=409,
+ )
raise HTTPException(
status_code=500,
detail=str(e),
@@ -234,3 +292,614 @@ async def update_workspace(
status_code=500,
detail=str(e),
)
+
+
+@router.post(
+ "/{organization_id}/transfer/{new_owner_id}",
+ operation_id="transfer_organization_ownership",
+)
+async def transfer_organization_ownership(
+ organization_id: str,
+ new_owner_id: str,
+ request: Request,
+):
+ """Transfer organization ownership to another member."""
+ try:
+ user_id = request.state.user_id
+
+ # Check if current user is the owner of the organization
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(user_id)
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only the organization owner can transfer ownership"},
+ status_code=403,
+ )
+
+ # Transfer ownership via service layer
+ organization = await transfer_ownership_service(
+ organization_id=organization_id,
+ new_owner_id=new_owner_id,
+ current_user_id=str(user_id),
+ )
+
+ return JSONResponse(
+ {
+ "organization_id": str(organization.id),
+ "owner_id": str(organization.owner_id),
+ },
+ status_code=200,
+ )
+
+ except ValueError as e:
+ # New owner not a member or organization not found
+ return JSONResponse(
+ {"detail": str(e)},
+ status_code=400,
+ )
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.post("/", operation_id="create_collaborative_organization")
+async def create_collaborative_organization(
+ payload: CreateCollaborativeOrganization,
+ request: Request,
+):
+ """Create a new collaborative organization."""
+ try:
+ from uuid import UUID
+ from ee.src.services.commoners import create_organization_with_subscription
+
+ user = await db_manager.get_user(request.state.user_id)
+ if not user:
+ return JSONResponse(
+ {"detail": "User not found"},
+ status_code=404,
+ )
+
+ organization = await create_organization_with_subscription(
+ user_id=UUID(str(user.id)),
+ organization_email=user.email,
+ organization_name=payload.name,
+ organization_description=payload.description,
+ is_personal=False, # Collaborative organization
+ use_reverse_trial=False, # Use hobby plan instead
+ )
+
+ log.info(
+ "[organization] collaborative organization created",
+ organization_id=organization.id,
+ user_id=user.id,
+ )
+
+ return JSONResponse(
+ {
+ "id": str(organization.id),
+ "name": organization.name,
+ "description": organization.description,
+ },
+ status_code=201,
+ )
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.delete("/{organization_id}/", operation_id="delete_organization")
+async def delete_organization(
+ organization_id: str,
+ request: Request,
+):
+ """Delete an organization (owner only)."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have permission to perform this action"},
+ status_code=403,
+ )
+
+ await db_manager_ee.delete_organization(organization_id)
+
+ log.info(
+ "[organization] organization deleted",
+ organization_id=organization_id,
+ user_id=request.state.user_id,
+ )
+
+ return JSONResponse(
+ {"detail": "Organization deleted successfully"},
+ status_code=200,
+ )
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+# ============================================================================
+# Domain Verification Endpoints
+# ============================================================================
+
+
+@router.get("/{organization_id}/domains/", operation_id="list_organization_domains")
+async def list_organization_domains(
+ organization_id: str,
+ request: Request,
+):
+ """List all domains for an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have access to this organization"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ domains_dao = OrganizationDomainsDAO()
+ domains = await domains_dao.list_by_organization(UUID(organization_id))
+
+ return [
+ {
+ "id": str(domain.id),
+ "slug": domain.slug,
+ "organization_id": str(domain.organization_id),
+ "flags": domain.flags,
+ "created_at": domain.created_at.isoformat()
+ if domain.created_at
+ else None,
+ "updated_at": domain.updated_at.isoformat()
+ if domain.updated_at
+ else None,
+ }
+ for domain in domains
+ ]
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post("/{organization_id}/domains/", operation_id="create_organization_domain")
+async def create_organization_domain(
+ organization_id: str,
+ request: Request,
+ domain: str,
+):
+ """Add a new domain to an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can add domains"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ domains_dao = OrganizationDomainsDAO()
+ domain_create = OrganizationDomainCreate(
+ slug=domain,
+ organization_id=UUID(organization_id),
+ )
+ created_domain = await domains_dao.create(domain_create)
+
+ return {
+ "id": str(created_domain.id),
+ "slug": created_domain.slug,
+ "organization_id": str(created_domain.organization_id),
+ "flags": created_domain.flags,
+ "created_at": created_domain.created_at.isoformat()
+ if created_domain.created_at
+ else None,
+ "updated_at": created_domain.updated_at.isoformat()
+ if created_domain.updated_at
+ else None,
+ }
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get(
+ "/{organization_id}/domains/{domain_id}", operation_id="get_organization_domain"
+)
+async def get_organization_domain(
+ organization_id: str,
+ domain_id: str,
+ request: Request,
+):
+ """Get a single domain by ID."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have access to this organization"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ domains_dao = OrganizationDomainsDAO()
+ domain = await domains_dao.get_by_id(UUID(domain_id))
+
+ if not domain:
+ return JSONResponse(
+ {"detail": "Domain not found"},
+ status_code=404,
+ )
+
+ return {
+ "id": str(domain.id),
+ "slug": domain.slug,
+ "organization_id": str(domain.organization_id),
+ "flags": domain.flags,
+ "created_at": domain.created_at.isoformat() if domain.created_at else None,
+ "updated_at": domain.updated_at.isoformat() if domain.updated_at else None,
+ }
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete(
+ "/{organization_id}/domains/{domain_id}", operation_id="delete_organization_domain"
+)
+async def delete_organization_domain(
+ organization_id: str,
+ domain_id: str,
+ request: Request,
+):
+ """Delete a domain from an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can delete domains"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ domains_dao = OrganizationDomainsDAO()
+ # TODO: Implement delete method in DAO
+ # await domains_dao.delete(UUID(domain_id))
+
+ return JSONResponse(
+ {"detail": "Domain deleted successfully"},
+ status_code=200,
+ )
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post(
+ "/{organization_id}/domains/{domain_id}/verify",
+ operation_id="verify_organization_domain",
+)
+async def verify_organization_domain(
+ organization_id: str,
+ domain_id: str,
+ request: Request,
+):
+ """Verify a domain (marks it as verified)."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can verify domains"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ domains_dao = OrganizationDomainsDAO()
+ verified_domain = await domains_dao.mark_verified(UUID(domain_id))
+
+ if not verified_domain:
+ return JSONResponse(
+ {"detail": "Domain not found"},
+ status_code=404,
+ )
+
+ return {
+ "id": str(verified_domain.id),
+ "slug": verified_domain.slug,
+ "organization_id": str(verified_domain.organization_id),
+ "flags": verified_domain.flags,
+ "created_at": verified_domain.created_at.isoformat()
+ if verified_domain.created_at
+ else None,
+ "updated_at": verified_domain.updated_at.isoformat()
+ if verified_domain.updated_at
+ else None,
+ }
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# SSO/OIDC Provider Endpoints
+# ============================================================================
+
+
+@router.get("/{organization_id}/providers/", operation_id="list_organization_providers")
+async def list_organization_providers(
+ organization_id: str,
+ request: Request,
+):
+ """List all SSO providers for an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have access to this organization"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ provider_service = SSOProviderService()
+ return await provider_service.list_providers(organization_id)
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post(
+ "/{organization_id}/providers/", operation_id="create_organization_provider"
+)
+async def create_organization_provider(
+ organization_id: str,
+ request: Request,
+ payload: dict,
+):
+ """Add a new SSO provider to an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can add SSO providers"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ provider_service = SSOProviderService()
+ provider_create = OrganizationProviderCreate(
+ slug=payload.get("slug"),
+ organization_id=UUID(organization_id),
+ name=payload.get("name"),
+ description=payload.get("description"),
+ settings=payload.get("settings"),
+ flags=payload.get("flags"),
+ tags=payload.get("tags"),
+ meta=payload.get("meta"),
+ )
+ created_provider = await provider_service.create_provider(
+ organization_id=organization_id,
+ payload=provider_create,
+ user_id=str(request.state.user_id),
+ )
+
+ return created_provider
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get(
+ "/{organization_id}/providers/{provider_id}",
+ operation_id="get_organization_provider",
+)
+async def get_organization_provider(
+ organization_id: str,
+ provider_id: str,
+ request: Request,
+):
+ """Get a single SSO provider by ID."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have access to this organization"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ provider_service = SSOProviderService()
+ return await provider_service.get_provider(
+ organization_id=organization_id,
+ provider_id=provider_id,
+ )
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.patch(
+ "/{organization_id}/providers/{provider_id}",
+ operation_id="update_organization_provider",
+)
+async def update_organization_provider(
+ organization_id: str,
+ provider_id: str,
+ request: Request,
+ payload: dict,
+):
+ """Update an SSO provider."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can update SSO providers"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ provider_service = SSOProviderService()
+ provider_update = OrganizationProviderUpdate(
+ name=payload.get("name"),
+ description=payload.get("description"),
+ settings=payload.get("settings"),
+ flags=payload.get("flags"),
+ tags=payload.get("tags"),
+ meta=payload.get("meta"),
+ )
+ updated_provider = await provider_service.update_provider(
+ organization_id=organization_id,
+ provider_id=provider_id,
+ payload=provider_update,
+ user_id=str(request.state.user_id),
+ )
+
+ return updated_provider
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete(
+ "/{organization_id}/providers/{provider_id}",
+ operation_id="delete_organization_provider",
+)
+async def delete_organization_provider(
+ organization_id: str,
+ provider_id: str,
+ request: Request,
+):
+ """Delete an SSO provider from an organization."""
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, organization_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "Only organization owners can delete SSO providers"},
+ status_code=403,
+ )
+
+ from uuid import UUID
+
+ provider_service = SSOProviderService()
+ await provider_service.delete_provider(
+ organization_id=organization_id,
+ provider_id=provider_id,
+ user_id=str(request.state.user_id),
+ )
+
+ return JSONResponse(
+ {"detail": "Provider deleted successfully"},
+ status_code=200,
+ )
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/api/ee/src/services/admin_manager.py b/api/ee/src/services/admin_manager.py
index ec3b9bd61b..1716f4c7ce 100644
--- a/api/ee/src/services/admin_manager.py
+++ b/api/ee/src/services/admin_manager.py
@@ -60,21 +60,27 @@ class UserRequest(BaseModel):
class OrganizationRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
+ is_demo: bool = False
+ is_personal: bool = False
+ #
+ owner_id: UUID
class WorkspaceRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
is_default: bool
#
organization_ref: Reference
class ProjectRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
is_default: bool
#
workspace_ref: Reference
@@ -169,13 +175,13 @@ async def create_user(
session.add(user_db)
+ await session.commit()
+
log.info(
"[scopes] user created",
user_id=user_db.id,
)
- await session.commit()
-
response = Reference(id=user_db.id)
return response
@@ -186,24 +192,25 @@ async def create_organization(
) -> Reference:
async with engine.core_session() as session:
organization_db = OrganizationDB(
- # id=uuid7() # use default
- #
name=request.name,
description=request.description,
- #
- owner="", # move 'owner' from here to membership 'role'
- # type=... # remove 'type'
+ flags={
+ "is_demo": False,
+ "is_personal": request.is_personal,
+ },
+ owner_id=request.owner_id,
+ created_by_id=request.owner_id,
)
session.add(organization_db)
+ await session.commit()
+
log.info(
"[scopes] organization created",
organization_id=organization_db.id,
)
- await session.commit()
-
response = Reference(id=organization_db.id)
return response
@@ -225,14 +232,14 @@ async def create_workspace(
session.add(workspace_db)
+ await session.commit()
+
log.info(
"[scopes] workspace created",
organization_id=workspace_db.organization_id,
workspace_id=workspace_db.id,
)
- await session.commit()
-
response = Reference(id=workspace_db.id)
return response
@@ -255,6 +262,8 @@ async def create_project(
session.add(project_db)
+ await session.commit()
+
log.info(
"[scopes] project created",
organization_id=project_db.organization_id,
@@ -262,8 +271,6 @@ async def create_project(
project_id=project_db.id,
)
- await session.commit()
-
response = Reference(id=project_db.id)
return response
@@ -285,6 +292,8 @@ async def create_organization_membership(
session.add(membership_db)
+ await session.commit()
+
log.info(
"[scopes] organization membership created",
organization_id=request.organization_ref.id,
@@ -292,8 +301,6 @@ async def create_organization_membership(
membership_id=membership_db.id,
)
- await session.commit()
-
if request.role == "owner":
result = await session.execute(
select(OrganizationDB).filter_by(
@@ -303,7 +310,7 @@ async def create_organization_membership(
organization_db = result.scalars().first()
- organization_db.owner = str(request.user_ref.id)
+ organization_db.owner_id = request.user_ref.id
await session.commit()
@@ -335,6 +342,8 @@ async def create_workspace_membership(
session.add(membership_db)
+ await session.commit()
+
log.info(
"[scopes] workspace membership created",
organization_id=workspace_db.organization_id,
@@ -343,8 +352,6 @@ async def create_workspace_membership(
membership_id=membership_db.id,
)
- await session.commit()
-
response = Reference(id=membership_db.id)
return response
@@ -373,6 +380,8 @@ async def create_project_membership(
session.add(membership_db)
+ await session.commit()
+
log.info(
"[scopes] project membership created",
organization_id=project_db.organization_id,
@@ -382,8 +391,6 @@ async def create_project_membership(
membership_id=membership_db.id,
)
- await session.commit()
-
response = Reference(id=membership_db.id)
return response
diff --git a/api/ee/src/services/commoners.py b/api/ee/src/services/commoners.py
index 2f76c7facc..6aa65877af 100644
--- a/api/ee/src/services/commoners.py
+++ b/api/ee/src/services/commoners.py
@@ -1,7 +1,8 @@
from os import getenv
from json import loads
-from typing import List
+from typing import List, Optional
from traceback import format_exc
+from uuid import UUID
from pydantic import BaseModel
@@ -19,7 +20,12 @@
user_exists,
)
from ee.src.models.api.organization_models import CreateOrganization
-from oss.src.services.user_service import create_new_user
+from oss.src.services.user_service import (
+ create_new_user,
+ check_user_exists,
+ delete_user,
+)
+from oss.src.models.db_models import UserDB, OrganizationDB
from ee.src.services.email_helper import (
add_contact_to_loops,
)
@@ -30,6 +36,8 @@
from ee.src.core.subscriptions.service import SubscriptionsService
from ee.src.dbs.postgres.meters.dao import MetersDAO
from ee.src.core.meters.service import MetersService
+from oss.src.utils.caching import set_cache, get_cache
+from sqlalchemy.exc import IntegrityError
subscription_service = SubscriptionsService(
subscriptions_dao=SubscriptionsDAO(),
@@ -108,12 +116,20 @@ async def add_user_to_demos(user_id: str) -> None:
raise exc # TODO: handle exceptions
-async def create_accounts(payload: dict):
+async def create_accounts(
+ payload: dict,
+ organization_name: Optional[str] = None,
+ is_personal: bool = True,
+ use_reverse_trial: bool = True,
+):
"""Creates a user account and an associated organization based on the
provided payload.
Arguments:
payload (dict): The required payload. It consists of; user_id and user_email
+ organization_name (str): Name for the organization. Default: "Personal"
+ is_personal (bool): Whether this is a personal org. Default: True
+ use_reverse_trial (bool): Use reverse trial (True) or hobby plan (False). Default: True
"""
# Only keep fields expected by UserDB to avoid TypeErrors (e.g., organization_id)
@@ -123,59 +139,214 @@ async def create_accounts(payload: dict):
"username": payload["email"].split("@")[0],
}
- user = await db_manager.get_user_with_email(email=user_dict["email"])
- if user is None:
- log.info("[scopes] Yey! A new user is signing up!")
+ email = user_dict["email"]
- # Create user first
- user = await create_new_user(user_dict)
+ # Atomically acquire a distributed lock to prevent race conditions
+ # where multiple concurrent requests create duplicate accounts
+ from oss.src.utils.caching import acquire_lock, release_lock
- log.info("[scopes] User [%s] created", user.id)
+ lock_acquired = await acquire_lock(
+ namespace="account-creation",
+ key=email,
+ )
- # Prepare payload to create organization
- create_org_payload = CreateOrganization(
- name=user_dict["username"],
- description="Default Organization",
- owner=str(user.id),
- type="default",
- )
+ if not lock_acquired:
+ # Another request is already creating this account - just return the existing user
+ log.info("[scopes] account creation lock already taken")
+ user = await db_manager.get_user_with_email(email=email)
+ return user
+
+ # We have the lock - proceed with account creation
+ log.info("[scopes] account creation lock acquired")
+
+ try:
+ # Get or create user
+ user = await db_manager.get_user_with_email(email=user_dict["email"])
+ user_is_new = user is None
+
+ if user is None:
+ # Create user (idempotent - returns existing if found)
+ user = await create_new_user(user_dict)
+ log.info("[scopes] User [%s] created", user.id)
+
+ # Check if user already has organizations (to detect if setup already ran)
+ user_organizations = await db_manager.get_user_organizations(str(user.id))
+ user_has_organization = len(user_organizations) > 0
+
+ # Only run setup if user is new AND doesn't have organizations
+ if user_is_new and not user_has_organization:
+ # We successfully created the user and they have no orgs, proceed with setup
+ # If setup fails, delete the user to avoid orphaned records
+ try:
+ # Add the user to demos
+ await add_user_to_demos(str(user.id))
+
+ # Create organization with workspace and subscription
+ await create_organization_with_subscription(
+ user_id=UUID(str(user.id)),
+ organization_email=user_dict["email"],
+ organization_name="Personal",
+ organization_description=None,
+ is_personal=is_personal,
+ use_reverse_trial=use_reverse_trial,
+ )
+ except Exception as e:
+ # Setup failed - delete the user to avoid orphaned state
+ log.error(
+ "[scopes] setup failed for user [%s], deleting user: %s",
+ user.id,
+ str(e),
+ )
+ try:
+ await delete_user(str(user.id))
+ except Exception as delete_error:
+ log.error(
+ "[scopes] failed to delete user [%s]: %s",
+ user.id,
+ str(delete_error),
+ )
+ # Re-raise the original error
+ raise
+ else:
+ # User already has organization(s) - skip setup
+ if user_has_organization:
+ log.info(
+ "[scopes] User [%s] already has organization, skipping setup",
+ user.id,
+ )
+
+ log.info("[scopes] User [%s] authenticated", user.id)
+
+ try:
+ from oss.src.core.auth.service import AuthService
+
+ await AuthService().enforce_domain_policies(
+ email=user_dict["email"],
+ user_id=user.id,
+ )
+ except Exception as e:
+ log.debug("Error enforcing domain policies after signup: %s", e)
- # Create the user's default organization and workspace
+ if is_ee():
+ try:
+ # Adds contact to loops for marketing emails. TODO: Add opt-in checkbox to supertokens
+ add_contact_to_loops(user_dict["email"]) # type: ignore
+ except ConnectionError as ex:
+ log.warn("error adding contact to loops %s", ex)
+
+ return user
+
+ finally:
+ # Always release the lock when done (or on error)
+ released = await release_lock(
+ namespace="account-creation",
+ key=email,
+ )
+ if released:
+ log.info("[scopes] account creation lock released")
+ else:
+ log.warn("[scopes] account creation lock already expired")
+
+
+async def create_organization_with_subscription(
+ user_id: UUID,
+ organization_email: str,
+ organization_name: Optional[str] = None,
+ organization_description: Optional[str] = None,
+ is_personal: bool = False,
+ use_reverse_trial: bool = False,
+) -> OrganizationDB:
+ """Create an organization with workspace and subscription for an existing user.
+
+ Args:
+ user_id: The user's UUID
+ organization_email: The user's email for subscription
+ organization_name: Name for the organization
+ organization_description: Optional description
+ is_personal: Whether this is a personal org (default: False for collaborative)
+ use_reverse_trial: Use reverse trial (True) or hobby plan (False)
+
+ Returns:
+ OrganizationDB: The created organization
+ """
+ # Get user object
+ user = await db_manager.get_user(str(user_id))
+ if not user:
+ raise ValueError(f"User {user_id} not found")
+
+ if is_personal:
+ existing_orgs = await db_manager.get_user_organizations(str(user_id))
+ existing_personal = next(
+ (org for org in existing_orgs if (org.flags or {}).get("is_personal")),
+ None,
+ )
+ if existing_personal:
+ log.info(
+ "[scopes] Personal organization already exists",
+ organization_id=existing_personal.id,
+ user_id=user_id,
+ )
+ return existing_personal
+
+ # Prepare payload to create organization
+ create_org_payload = CreateOrganization(
+ name=organization_name,
+ description=organization_description,
+ is_demo=False,
+ is_personal=is_personal,
+ owner_id=user_id,
+ )
+
+ # Create organization and workspace
+ try:
organization = await create_organization(
payload=create_org_payload,
user=user,
)
+ except IntegrityError:
+ if is_personal:
+ existing_orgs = await db_manager.get_user_organizations(str(user_id))
+ existing_personal = next(
+ (org for org in existing_orgs if (org.flags or {}).get("is_personal")),
+ None,
+ )
+ if existing_personal:
+ log.info(
+ "[scopes] Personal organization already exists (race)",
+ organization_id=existing_personal.id,
+ user_id=user_id,
+ )
+ return existing_personal
+ raise
- log.info("[scopes] Organization [%s] created", organization.id)
-
- # Add the user to demos
- await add_user_to_demos(str(user.id))
+ log.info("[scopes] Organization [%s] created", organization.id)
- # Start reverse trial
- try:
+ # Start subscription based on type
+ try:
+ if use_reverse_trial:
await subscription_service.start_reverse_trial(
organization_id=str(organization.id),
organization_name=organization.name,
- organization_email=user_dict["email"],
+ organization_email=organization_email,
)
-
- except Exception as exc:
- raise exc # TODO: handle exceptions
- # await subscription_service.start_free_plan(
- # organization_id=str(organization.id),
- # )
-
- await check_entitlements(
- organization_id=str(organization.id),
- key=Gauge.USERS,
- delta=1,
+ else:
+ # Start hobby/free plan
+ await subscription_service.start_free_plan(
+ organization_id=str(organization.id),
+ )
+ except Exception as exc:
+ log.error(
+ "[scopes] Failed to create subscription for organization [%s]: %s",
+ organization.id,
+ exc,
)
+ raise exc
- log.info("[scopes] User [%s] authenticated", user.id)
+ # Check entitlements
+ await check_entitlements(
+ organization_id=str(organization.id),
+ key=Gauge.USERS,
+ delta=1,
+ )
- if is_ee():
- try:
- # Adds contact to loops for marketing emails. TODO: Add opt-in checkbox to supertokens
- add_contact_to_loops(user_dict["email"]) # type: ignore
- except ConnectionError as ex:
- log.warn("Error adding contact to loops %s", ex)
+ return organization
diff --git a/api/ee/src/services/db_manager_ee.py b/api/ee/src/services/db_manager_ee.py
index 55f3e55470..239cd4806c 100644
--- a/api/ee/src/services/db_manager_ee.py
+++ b/api/ee/src/services/db_manager_ee.py
@@ -42,6 +42,10 @@
UserDB,
InvitationDB,
)
+from ee.src.dbs.postgres.organizations.dao import (
+ OrganizationProvidersDAO,
+ OrganizationDomainsDAO,
+)
from ee.src.services.converters import get_workspace_in_format
from ee.src.services.selectors import get_org_default_workspace
@@ -127,7 +131,9 @@ async def get_organization_workspaces(organization_id: str):
result = await session.execute(
select(WorkspaceDB)
.filter_by(organization_id=uuid.UUID(organization_id))
- .options(load_only(WorkspaceDB.organization_id)) # type: ignore
+ .options( # type: ignore
+ load_only(WorkspaceDB.id, WorkspaceDB.organization_id)
+ )
)
workspaces = result.scalars().all()
return workspaces
@@ -203,6 +209,8 @@ async def create_project(
session.add(project_db)
+ await session.commit()
+
log.info(
"[scopes] project created",
organization_id=organization_id,
@@ -210,8 +218,6 @@ async def create_project(
project_id=project_db.id,
)
- await session.commit()
-
return project_db
@@ -231,7 +237,7 @@ async def create_default_project(
"""
project_db = await create_project(
- "Default Project",
+ "Default",
workspace_id=workspace_id,
organization_id=organization_id,
session=session,
@@ -296,13 +302,11 @@ async def _sync(db_session: AsyncSession) -> None:
member.user_id: member for member in existing_members_result.scalars().all()
}
- updated = False
for member in workspace_members:
project_member = existing_members.get(member.user_id)
if project_member:
if project_member.role != member.role:
project_member.role = member.role
- updated = True
continue
project_member = ProjectMemberDB(
@@ -311,6 +315,9 @@ async def _sync(db_session: AsyncSession) -> None:
role=member.role,
)
db_session.add(project_member)
+
+ await db_session.commit()
+
log.info(
"[scopes] project membership created",
organization_id=str(project.organization_id),
@@ -319,10 +326,6 @@ async def _sync(db_session: AsyncSession) -> None:
user_id=str(member.user_id),
membership_id=project_member.id,
)
- updated = True
-
- if updated:
- await db_session.commit()
if session is not None:
await _sync(session)
@@ -422,6 +425,8 @@ async def create_project_member(
session.add(project_member)
+ await session.commit()
+
log.info(
"[scopes] project membership created",
organization_id=project.organization_id,
@@ -431,8 +436,6 @@ async def create_project_member(
membership_id=project_member.id,
)
- await session.commit()
-
async def fetch_project_memberships_by_user_id(
user_id: str,
@@ -478,14 +481,14 @@ async def create_workspace_db_object(
session.add(workspace)
+ await session.commit()
+
log.info(
"[scopes] workspace created",
organization_id=organization.id,
workspace_id=workspace.id,
)
- await session.commit()
-
# add user as a member to the workspace with the owner role
workspace_member = WorkspaceMemberDB(
user_id=user.id,
@@ -494,6 +497,10 @@ async def create_workspace_db_object(
)
session.add(workspace_member)
+
+ await session.commit()
+ await session.refresh(workspace, attribute_names=["organization"])
+
log.info(
"[scopes] workspace membership created",
organization_id=workspace.organization_id,
@@ -502,10 +509,6 @@ async def create_workspace_db_object(
membership_id=workspace_member.id,
)
- await session.commit()
-
- await session.refresh(workspace, attribute_names=["organization"])
-
project_db = await create_default_project(
organization_id=str(organization.id),
workspace_id=str(workspace.id),
@@ -741,8 +744,11 @@ async def add_user_to_workspace_and_org(
user_organization = OrganizationMemberDB(
user_id=user.id, organization_id=organization.id
)
+
session.add(user_organization)
+ await session.commit()
+
log.info(
"[scopes] organization membership created",
organization_id=organization.id,
@@ -759,6 +765,8 @@ async def add_user_to_workspace_and_org(
session.add(workspace_member)
+ await session.commit()
+
log.info(
"[scopes] workspace membership created",
organization_id=organization.id,
@@ -793,8 +801,11 @@ async def add_user_to_workspace_and_org(
project_id=project.id,
role=role,
)
+
session.add(project_member)
+ await session.commit()
+
log.info(
"[scopes] project membership created",
organization_id=str(project.organization_id),
@@ -804,7 +815,6 @@ async def add_user_to_workspace_and_org(
membership_id=project_member.id,
)
- await session.commit()
return True
@@ -969,46 +979,58 @@ async def create_organization(
async with engine.core_session() as session:
create_org_data = payload.model_dump(exclude_unset=True)
- if "owner" not in create_org_data:
- create_org_data["owner"] = str(user.id)
+
+ is_demo = create_org_data.pop("is_demo", False)
+ is_personal = create_org_data.pop("is_personal", False)
+
+ create_org_data["flags"] = {
+ "is_demo": is_demo,
+ "is_personal": is_personal,
+ "allow_email": env.auth.email_enabled,
+ "allow_social": env.auth.oidc_enabled,
+ "allow_sso": False,
+ "allow_root": False,
+ "domains_only": False,
+ "auto_join": False,
+ }
+
+ # Set required audit fields
+ create_org_data["owner_id"] = user.id
+ create_org_data["created_by_id"] = user.id
# create organization
organization_db = OrganizationDB(**create_org_data)
session.add(organization_db)
+ await session.commit()
+
log.info(
"[scopes] organization created",
organization_id=organization_db.id,
)
- await session.commit()
-
# create joined organization for user
user_organization = OrganizationMemberDB(
- user_id=user.id, organization_id=organization_db.id
+ user_id=user.id,
+ organization_id=organization_db.id,
+ role="owner",
)
session.add(user_organization)
+ await session.commit()
+
log.info(
"[scopes] organization membership created",
organization_id=organization_db.id,
user_id=user.id,
+ role="owner",
membership_id=user_organization.id,
)
- await session.commit()
-
# construct workspace payload
workspace_payload = CreateWorkspace(
- name=payload.name,
- type=payload.type if payload.type else "",
- description=(
- "Default Workspace"
- if payload.type == "default"
- else payload.description
- if payload.description
- else ""
- ),
+ name="Default",
+ type="default",
)
# create workspace
@@ -1055,7 +1077,137 @@ async def update_organization(
if not organization:
raise NoResultFound(f"Organization with id {organization_id} not found")
- for key, value in payload.model_dump(exclude_unset=True).items():
+ # Validate slug updates before applying
+ payload_dict = payload.model_dump(exclude_unset=True)
+ if "slug" in payload_dict:
+ new_slug = payload_dict["slug"]
+
+ # Slug format validation: only lowercase letters and hyphens, max 64 characters
+ if new_slug is not None:
+ import re
+
+ if len(new_slug) > 64:
+ raise ValueError("Organization slug cannot exceed 64 characters.")
+ if not re.match(r"^[a-z-]+$", new_slug):
+ raise ValueError(
+ "Organization slug can only contain lowercase letters (a-z) and hyphens (-)."
+ )
+
+ # Personal organizations cannot have slugs
+ is_personal = organization.flags and organization.flags.get(
+ "is_personal", False
+ )
+ if is_personal:
+ raise ValueError(
+ "Personal organizations cannot have slugs. "
+ "Slugs are only available for collaborative organizations."
+ )
+
+ # Slug immutability: once set, cannot be changed
+ if organization.slug is not None and new_slug != organization.slug:
+ raise ValueError(
+ f"Organization slug cannot be changed once set. "
+ f"Current slug: '{organization.slug}'"
+ )
+
+ # Special handling for flags: merge instead of replace
+ if "flags" in payload_dict:
+ new_flags = payload_dict["flags"]
+ if new_flags is not None:
+ # Get existing flags or initialize with defaults
+ existing_flags = organization.flags or {}
+
+ # Start with complete defaults
+ default_flags = {
+ "is_demo": False,
+ "is_personal": False,
+ "allow_email": env.auth.email_enabled,
+ "allow_social": env.auth.oidc_enabled,
+ "allow_sso": False,
+ "allow_root": False,
+ "domains_only": False,
+ "auto_join": False,
+ }
+
+ # Merge: defaults <- existing <- new
+ merged_flags = {**default_flags, **existing_flags, **new_flags}
+
+ # VALIDATION: Ensure at least one auth method is enabled OR allow_root is true
+ # This prevents organizations from being locked out
+ allow_email = merged_flags.get("allow_email", False)
+ allow_social = merged_flags.get("allow_social", False)
+ allow_sso = merged_flags.get("allow_sso", False)
+ allow_root = merged_flags.get("allow_root", False)
+
+ changing_auth_flags = any(
+ key in new_flags
+ for key in ("allow_email", "allow_social", "allow_sso")
+ )
+ changing_auto_join = "auto_join" in new_flags
+ changing_domains_only = "domains_only" in new_flags
+
+ if changing_auth_flags and allow_sso:
+ providers_dao = OrganizationProvidersDAO(session)
+ providers = await providers_dao.list_by_organization(
+ organization_id
+ )
+ active_valid = [
+ provider
+ for provider in providers
+ if (provider.flags or {}).get("is_active")
+ and (provider.flags or {}).get("is_valid")
+ ]
+ if not active_valid:
+ raise ValueError(
+ "SSO cannot be enabled until at least one SSO provider is "
+ "active and verified."
+ )
+ if not allow_email and not allow_social:
+ if not active_valid:
+ raise ValueError(
+ "SSO-only authentication requires at least one SSO provider to "
+ "be active and verified."
+ )
+
+ if changing_auto_join and merged_flags.get("auto_join", False):
+ domains_dao = OrganizationDomainsDAO(session)
+ domains = await domains_dao.list_by_organization(organization_id)
+ has_verified_domain = any(
+ (domain.flags or {}).get("is_verified") for domain in domains
+ )
+ if not has_verified_domain:
+ raise ValueError(
+ "Auto-join requires at least one verified domain."
+ )
+
+ if changing_domains_only and merged_flags.get("domains_only", False):
+ domains_dao = OrganizationDomainsDAO(session)
+ domains = await domains_dao.list_by_organization(organization_id)
+ has_verified_domain = any(
+ (domain.flags or {}).get("is_verified") for domain in domains
+ )
+ if not has_verified_domain:
+ raise ValueError(
+ "Domains-only requires at least one verified domain."
+ )
+
+ # Check if all auth methods are disabled
+ all_auth_disabled = not (allow_email or allow_social or allow_sso)
+
+ if all_auth_disabled and not allow_root:
+ # Auto-enable allow_root to prevent lockout
+ merged_flags["allow_root"] = True
+ log.warning(
+ f"All authentication methods disabled for organization {organization_id}. "
+ f"Auto-enabling allow_root to prevent lockout."
+ )
+
+ organization.flags = merged_flags
+ # Remove flags from payload_dict to avoid setting it again below
+ del payload_dict["flags"]
+
+ # Set all other attributes
+ for key, value in payload_dict.items():
if hasattr(organization, key):
setattr(organization, key, value)
@@ -1064,6 +1216,33 @@ async def update_organization(
return organization
+async def delete_organization(organization_id: str) -> bool:
+ """
+ Delete an organization and all its related data.
+
+ Args:
+ organization_id (str): The organization ID to delete.
+
+ Returns:
+ bool: True if deletion was successful.
+
+ Raises:
+ NoResultFound: If organization not found.
+ """
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDB).filter_by(id=uuid.UUID(organization_id))
+ )
+ organization = result.scalars().first()
+
+ if not organization:
+ raise NoResultFound(f"Organization with id {organization_id} not found")
+
+ await session.delete(organization)
+ await session.commit()
+ return True
+
+
async def delete_invitation(invitation_id: str) -> bool:
"""
Delete an invitation from an organization.
@@ -1171,10 +1350,11 @@ async def get_org_details(organization: Organization) -> dict:
sample_organization = {
"id": str(organization.id),
+ "slug": organization.slug,
"name": organization.name,
"description": organization.description,
- "type": organization.type,
- "owner": organization.owner,
+ "flags": organization.flags,
+ "owner_id": str(organization.owner_id),
"workspaces": [str(workspace.id) for workspace in workspaces],
"default_workspace": default_workspace,
}
@@ -1206,22 +1386,6 @@ async def get_workspace_details(workspace: WorkspaceDB) -> WorkspaceResponse:
raise e
-async def get_organization_invitations(organization_id: str):
- """
- Gets the organization invitations.
-
- Args:
- organization_id (str): The ID of the organization
- """
-
- async with engine.core_session() as session:
- result = await session.execute(
- select(InvitationDB).filter_by(organization_id=organization_id)
- )
- invitations = result.scalars().all()
- return invitations
-
-
async def get_project_invitations(project_id: str, **kwargs):
"""
Gets the project invitations.
@@ -1376,25 +1540,28 @@ async def get_all_workspace_roles() -> List[WorkspaceRole]:
async def add_user_to_organization(
organization_id: str,
user_id: str,
+ role: str = "member",
# is_demo: bool = False,
) -> None:
async with engine.core_session() as session:
organization_member = OrganizationMemberDB(
user_id=user_id,
organization_id=organization_id,
+ role=role,
)
session.add(organization_member)
+ await session.commit()
+
log.info(
"[scopes] organization membership created",
organization_id=organization_id,
user_id=user_id,
+ role=role,
membership_id=organization_member.id,
)
- await session.commit()
-
async def add_user_to_workspace(
workspace_id: str,
@@ -1419,7 +1586,8 @@ async def add_user_to_workspace(
session.add(workspace_member)
- # TODO: add organization_id
+ await session.commit()
+
log.info(
"[scopes] workspace membership created",
organization_id=workspace.organization_id,
@@ -1428,8 +1596,6 @@ async def add_user_to_workspace(
membership_id=workspace_member.id,
)
- await session.commit()
-
async def add_user_to_project(
project_id: str,
@@ -1454,6 +1620,8 @@ async def add_user_to_project(
session.add(project_member)
+ await session.commit()
+
log.info(
"[scopes] project membership created",
organization_id=project.organization_id,
@@ -1463,4 +1631,148 @@ async def add_user_to_project(
membership_id=project_member.id,
)
+
+async def transfer_organization_ownership(
+ organization_id: str,
+ new_owner_id: str,
+ current_user_id: str,
+) -> OrganizationDB:
+ """Transfer organization ownership to another member.
+
+ Args:
+ organization_id: The ID of the organization
+ new_owner_id: The UUID of the new owner
+ current_user_id: The UUID of the current user (initiating the transfer)
+
+ Returns:
+ OrganizationDB: The updated organization
+
+ Raises:
+ ValueError: If new owner is not a member of the organization
+ """
+ from datetime import datetime, timezone
+ from ee.src.models.db_models import OrganizationMemberDB, WorkspaceMemberDB
+
+ async with engine.core_session() as session:
+ # Verify organization exists
+ org_result = await session.execute(
+ select(OrganizationDB).filter_by(id=uuid.UUID(organization_id))
+ )
+ organization = org_result.scalars().first()
+ if not organization:
+ raise ValueError(f"Organization {organization_id} not found")
+
+ # Check if new owner is a member
+ member_result = await session.execute(
+ select(OrganizationMemberDB).filter_by(
+ user_id=uuid.UUID(new_owner_id),
+ organization_id=uuid.UUID(organization_id),
+ )
+ )
+ member = member_result.scalars().first()
+ if not member:
+ raise ValueError("The new owner must be a member of the organization")
+
+ # Swap organization roles between current owner and new owner
+ current_owner_org_member_result = await session.execute(
+ select(OrganizationMemberDB).filter_by(
+ user_id=uuid.UUID(current_user_id),
+ organization_id=uuid.UUID(organization_id),
+ )
+ )
+ current_owner_org_member = current_owner_org_member_result.scalars().first()
+
+ if current_owner_org_member:
+ # Swap org roles
+ current_owner_org_old_role = current_owner_org_member.role
+ new_owner_org_old_role = member.role
+
+ current_owner_org_member.role = new_owner_org_old_role
+ member.role = current_owner_org_old_role
+
+ log.info(
+ "[organization] roles swapped",
+ organization_id=organization_id,
+ current_owner_id=current_user_id,
+ current_owner_old_role=current_owner_org_old_role,
+ current_owner_new_role=new_owner_org_old_role,
+ new_owner_id=new_owner_id,
+ new_owner_old_role=new_owner_org_old_role,
+ new_owner_new_role=current_owner_org_old_role,
+ )
+
+ # Get all workspaces in this organization
+ workspaces_result = await session.execute(
+ select(WorkspaceDB).filter_by(organization_id=uuid.UUID(organization_id))
+ )
+ workspaces = workspaces_result.scalars().all()
+
+ # Update workspace roles for both users in all workspaces - swap their roles
+ for workspace in workspaces:
+ # Get both members' workspace roles
+ current_owner_member_result = await session.execute(
+ select(WorkspaceMemberDB).filter_by(
+ user_id=uuid.UUID(current_user_id),
+ workspace_id=workspace.id,
+ )
+ )
+ current_owner_member = current_owner_member_result.scalars().first()
+
+ new_owner_member_result = await session.execute(
+ select(WorkspaceMemberDB).filter_by(
+ user_id=uuid.UUID(new_owner_id),
+ workspace_id=workspace.id,
+ )
+ )
+ new_owner_member = new_owner_member_result.scalars().first()
+
+ # Swap roles between the two users
+ if current_owner_member and new_owner_member:
+ current_owner_old_role = current_owner_member.role
+ new_owner_old_role = new_owner_member.role
+
+ # Swap the roles
+ current_owner_member.role = new_owner_old_role
+ new_owner_member.role = current_owner_old_role
+
+ log.info(
+ "[workspace] roles swapped",
+ workspace_id=str(workspace.id),
+ current_owner_id=current_user_id,
+ current_owner_old_role=current_owner_old_role,
+ current_owner_new_role=new_owner_old_role,
+ new_owner_id=new_owner_id,
+ new_owner_old_role=new_owner_old_role,
+ new_owner_new_role=current_owner_old_role,
+ )
+ elif current_owner_member:
+ # Only current owner is a member - keep their role
+ log.info(
+ "[workspace] new owner not a member",
+ workspace_id=str(workspace.id),
+ user_id=new_owner_id,
+ )
+ elif new_owner_member:
+ # Only new owner is a member - keep their role
+ log.info(
+ "[workspace] current owner not a member",
+ workspace_id=str(workspace.id),
+ user_id=current_user_id,
+ )
+
+ # Transfer ownership
+ organization.owner_id = uuid.UUID(new_owner_id)
+ organization.updated_at = datetime.now(timezone.utc)
+ organization.updated_by_id = uuid.UUID(current_user_id)
+
await session.commit()
+ await session.refresh(organization)
+
+ log.info(
+ "[organization] ownership transferred",
+ organization_id=organization_id,
+ old_owner_id=current_user_id,
+ new_owner_id=new_owner_id,
+ )
+
+ return organization
diff --git a/api/ee/src/services/organization_security_service.py b/api/ee/src/services/organization_security_service.py
new file mode 100644
index 0000000000..885ab264dc
--- /dev/null
+++ b/api/ee/src/services/organization_security_service.py
@@ -0,0 +1,729 @@
+"""Service layer for organization security features (domains and SSO providers)."""
+
+import secrets
+import hashlib
+import logging
+from typing import List, Optional
+from uuid import UUID
+from fastapi import HTTPException
+
+from oss.src.dbs.postgres.shared.engine import engine
+from oss.src.core.secrets.dtos import (
+ CreateSecretDTO,
+ UpdateSecretDTO,
+ SecretDTO,
+ SecretKind,
+ SSOProviderDTO,
+ SSOProviderSettingsDTO,
+)
+from oss.src.core.secrets.services import VaultService
+from oss.src.dbs.postgres.secrets.dao import SecretsDAO
+from oss.src.core.shared.dtos import Header
+from ee.src.dbs.postgres.organizations.dao import (
+ OrganizationDomainsDAO,
+ OrganizationProvidersDAO,
+)
+from ee.src.apis.fastapi.organizations.models import (
+ OrganizationDomainCreate,
+ OrganizationDomainResponse,
+ OrganizationProviderCreate,
+ OrganizationProviderUpdate,
+ OrganizationProviderResponse,
+)
+from ee.src.services import db_manager_ee
+
+logger = logging.getLogger(__name__)
+
+
+class DomainVerificationService:
+ """Service for managing domain verification."""
+
+ TOKEN_EXPIRY_HOURS = 48
+
+ @staticmethod
+ def generate_verification_token() -> str:
+ """Generate a unique verification token."""
+ # Generate cryptographically secure random token (16 bytes = 64 hex chars)
+ random_part = secrets.token_hex(16)
+
+ # Add prefix to make it identifiable as an Agenta verification token
+ return f"{random_part}"
+
+ @staticmethod
+ async def verify_domain_dns(domain: str, expected_token: str) -> bool:
+ """Verify domain ownership via DNS TXT record."""
+ import dns.resolver
+
+ try:
+ txt_record_name = f"_agenta-verification.{domain}"
+ logger.info(f"Attempting DNS verification for {txt_record_name}")
+ resolvers = [
+ ("system", None),
+ ("cloudflare+google", ["1.1.1.1", "8.8.8.8"]),
+ ]
+
+ def _resolve_txt(resolver_label: str, nameservers: list[str] | None):
+ resolver = dns.resolver.Resolver()
+ if nameservers:
+ resolver.nameservers = nameservers
+ logger.info(
+ f"DNS lookup using {resolver_label} resolver for {txt_record_name}"
+ )
+ return resolver.resolve(txt_record_name, "TXT")
+
+ for resolver_label, nameservers in resolvers:
+ try:
+ answers = _resolve_txt(resolver_label, nameservers)
+ except Exception as exc:
+ logger.warning(
+ f"DNS lookup failed via {resolver_label} resolver: {exc}"
+ )
+ continue
+
+ logger.info(f"Found {len(answers)} TXT records for {txt_record_name}")
+
+ for rdata in answers:
+ txt_value = rdata.to_text().strip('"')
+ logger.info(f"TXT record value: {txt_value}")
+
+ # Extract the token value from "_agenta-verification=TOKEN" format
+ if txt_value.startswith("_agenta-verification="):
+ token = txt_value.split("=", 1)[1]
+ logger.info(f"Extracted token from DNS: {token}")
+ logger.info(f"Expected token from DB: {expected_token}")
+ logger.info(f"Tokens match: {token == expected_token}")
+ if token == expected_token:
+ logger.info(f"Domain verification successful for {domain}")
+ return True
+ else:
+ logger.warning(
+ f"Token mismatch for {domain}. Expected length: {len(expected_token)}, Got length: {len(token)}"
+ )
+ logger.warning(f"Expected: {expected_token}")
+ logger.warning(f"Got: {token}")
+
+ logger.warning(
+ f"No matching verification token found in DNS records for {domain}"
+ )
+ return False
+ except dns.resolver.NXDOMAIN:
+ logger.warning(f"DNS record not found (NXDOMAIN) for {txt_record_name}")
+ return False
+ except dns.resolver.NoAnswer:
+ logger.warning(f"No TXT records found (NoAnswer) for {txt_record_name}")
+ return False
+ except dns.resolver.Timeout:
+ logger.error(f"DNS lookup timeout for {txt_record_name}")
+ return False
+ except Exception as e:
+ logger.error(
+ f"Unexpected error during DNS verification for {domain}: {e}",
+ exc_info=True,
+ )
+ return False
+
+ async def create_domain(
+ self,
+ organization_id: str,
+ payload: OrganizationDomainCreate,
+ user_id: str,
+ ) -> OrganizationDomainResponse:
+ """Create a new domain for verification.
+
+ Token expires after 48 hours and can be refreshed.
+ """
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+
+ # Block if a verified domain already exists anywhere
+ existing_verified = await dao.get_verified_by_slug(payload.domain)
+ if existing_verified:
+ raise HTTPException(
+ status_code=409,
+ detail=f"Domain {payload.domain} is already verified",
+ )
+
+ # Reuse existing unverified domain for this organization, if any
+ existing = await dao.get_by_slug(payload.domain, organization_id)
+ if existing and not (existing.flags or {}).get("is_verified"):
+ from datetime import datetime, timezone
+
+ token = self.generate_verification_token()
+ existing.token = token
+ existing.created_at = datetime.now(timezone.utc)
+ existing.flags = {"is_verified": False}
+ existing.updated_by_id = user_id
+ await session.commit()
+ await session.refresh(existing)
+ domain = existing
+ else:
+ # Generate verification token
+ token = self.generate_verification_token()
+
+ # Create domain with token
+ domain = await dao.create(
+ organization_id=organization_id,
+ slug=payload.domain,
+ name=payload.name,
+ description=payload.description,
+ token=token,
+ created_by_id=user_id,
+ )
+
+ await session.commit()
+ await session.refresh(domain)
+
+ return OrganizationDomainResponse(
+ id=str(domain.id),
+ organization_id=str(domain.organization_id),
+ slug=domain.slug,
+ name=domain.name,
+ description=domain.description,
+ token=token,
+ flags=domain.flags or {},
+ created_at=domain.created_at,
+ updated_at=domain.updated_at,
+ )
+
+ async def verify_domain(
+ self, organization_id: str, domain_id: str, user_id: str
+ ) -> OrganizationDomainResponse:
+ """Verify a domain via DNS check."""
+ from datetime import datetime, timezone, timedelta
+
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+
+ domain = await dao.get_by_id(domain_id, organization_id)
+ if not domain:
+ raise HTTPException(status_code=404, detail="Domain not found")
+
+ # Check if already verified by this organization
+ if domain.flags and domain.flags.get("is_verified"):
+ raise HTTPException(status_code=400, detail="Domain already verified")
+
+ # Check if domain is already verified by another organization
+ verified_by_other = await dao.get_verified_by_slug(domain.slug)
+ if (
+ verified_by_other
+ and str(verified_by_other.organization_id) != organization_id
+ ):
+ raise HTTPException(
+ status_code=409,
+ detail=f"Domain {domain.slug} is already verified by another organization",
+ )
+
+ # Check if token has expired (48 hours from creation)
+ token_age = datetime.now(timezone.utc) - domain.created_at
+ if token_age > timedelta(hours=self.TOKEN_EXPIRY_HOURS):
+ raise HTTPException(
+ status_code=400,
+ detail=f"Verification token expired after {self.TOKEN_EXPIRY_HOURS} hours. Please refresh the token.",
+ )
+
+ # Perform DNS verification
+ is_valid = await self.verify_domain_dns(domain.slug, domain.token)
+
+ if not is_valid:
+ raise HTTPException(
+ status_code=400,
+ detail="Domain verification failed. Please ensure the DNS TXT record is correctly configured.",
+ )
+
+ # Mark as verified and clear the token (one-time use)
+ domain.flags = {"is_verified": True}
+ domain.token = None
+ domain.updated_by_id = user_id
+ await session.commit()
+ await session.refresh(domain)
+
+ return OrganizationDomainResponse(
+ id=str(domain.id),
+ organization_id=str(domain.organization_id),
+ slug=domain.slug,
+ name=domain.name,
+ description=domain.description,
+ token=None,
+ flags=domain.flags or {},
+ created_at=domain.created_at,
+ updated_at=domain.updated_at,
+ )
+
+ async def list_domains(
+ self, organization_id: str
+ ) -> List[OrganizationDomainResponse]:
+ """List all domains for an organization.
+
+ Tokens are returned for unverified domains (within expiry period).
+ Verified domains have token=None (cleared after verification).
+ """
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+ domains = await dao.list_by_organization(organization_id)
+
+ return [
+ OrganizationDomainResponse(
+ id=str(d.id),
+ organization_id=str(d.organization_id),
+ slug=d.slug,
+ name=d.name,
+ description=d.description,
+ token=d.token, # Token available for unverified domains, None for verified
+ flags=d.flags or {},
+ created_at=d.created_at,
+ updated_at=d.updated_at,
+ )
+ for d in domains
+ ]
+
+ async def refresh_token(
+ self, organization_id: str, domain_id: str, user_id: str
+ ) -> OrganizationDomainResponse:
+ """Refresh the verification token for a domain.
+
+ Generates a new token and resets the 48-hour expiry window.
+ For verified domains, this marks them as unverified for re-verification.
+ """
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+
+ domain = await dao.get_by_id(domain_id, organization_id)
+ if not domain:
+ raise HTTPException(status_code=404, detail="Domain not found")
+
+ # Generate new token
+ new_token = self.generate_verification_token()
+
+ # Update domain with new token and reset created_at to restart the 48-hour expiry window
+ # If domain was verified, mark as unverified for re-verification
+ from datetime import datetime, timezone
+
+ domain.token = new_token
+ domain.created_at = datetime.now(timezone.utc)
+ domain.flags = {"is_verified": False}
+ domain.updated_by_id = user_id
+ await session.commit()
+ await session.refresh(domain)
+
+ return OrganizationDomainResponse(
+ id=str(domain.id),
+ organization_id=str(domain.organization_id),
+ slug=domain.slug,
+ name=domain.name,
+ description=domain.description,
+ token=new_token,
+ flags=domain.flags or {},
+ created_at=domain.created_at,
+ updated_at=domain.updated_at,
+ )
+
+ async def reset_domain(
+ self, organization_id: str, domain_id: str, user_id: str
+ ) -> OrganizationDomainResponse:
+ """Reset a verified domain to unverified state for re-verification.
+
+ Generates a new token and marks the domain as unverified.
+ """
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+
+ domain = await dao.get_by_id(domain_id, organization_id)
+ if not domain:
+ raise HTTPException(status_code=404, detail="Domain not found")
+
+ # Generate new token
+ new_token = self.generate_verification_token()
+
+ # Reset domain to unverified state with new token
+ from datetime import datetime, timezone
+
+ domain.token = new_token
+ domain.created_at = datetime.now(timezone.utc)
+ domain.flags = {"is_verified": False}
+ domain.updated_by_id = user_id
+ await session.commit()
+ await session.refresh(domain)
+
+ return OrganizationDomainResponse(
+ id=str(domain.id),
+ organization_id=str(domain.organization_id),
+ slug=domain.slug,
+ name=domain.name,
+ description=domain.description,
+ token=new_token,
+ flags=domain.flags or {},
+ created_at=domain.created_at,
+ updated_at=domain.updated_at,
+ )
+
+ async def delete_domain(
+ self, organization_id: str, domain_id: str, user_id: str
+ ) -> bool:
+ """Delete a domain."""
+ async with engine.core_session() as session:
+ dao = OrganizationDomainsDAO(session)
+
+ domain = await dao.get_by_id(domain_id, organization_id)
+ if not domain:
+ raise HTTPException(status_code=404, detail="Domain not found")
+
+ deleted = await dao.delete(domain_id, user_id)
+ await session.commit()
+ return deleted
+
+
+class SSOProviderService:
+ """Service for managing SSO providers."""
+
+ @staticmethod
+ def _vault_service() -> VaultService:
+ return VaultService(SecretsDAO())
+
+ @staticmethod
+ def mask_secret(secret: str) -> str:
+ """Mask a secret for display."""
+ if len(secret) <= 8:
+ return "***"
+ return f"{secret[:4]}...{secret[-4:]}"
+
+ @staticmethod
+ async def test_oidc_connection(
+ issuer_url: str,
+ client_id: str,
+ client_secret: str,
+ ) -> bool:
+ """Test OIDC provider connection by fetching discovery document."""
+ import httpx
+
+ try:
+ # Try to fetch OIDC discovery document
+ discovery_url = f"{issuer_url.rstrip('/')}/.well-known/openid-configuration"
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.get(discovery_url)
+
+ if response.status_code != 200:
+ return False
+
+ config = response.json()
+
+ # Verify required OIDC endpoints exist
+ required_fields = [
+ "authorization_endpoint",
+ "token_endpoint",
+ "userinfo_endpoint",
+ ]
+ if not all(field in config for field in required_fields):
+ return False
+
+ return True
+ except Exception:
+ return False
+
+ async def create_provider(
+ self,
+ organization_id: str,
+ payload: OrganizationProviderCreate,
+ user_id: str,
+ ) -> OrganizationProviderResponse:
+ """Create a new SSO provider."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+
+ # Use the slug from payload (already validated to be lowercase letters and hyphens)
+ slug = payload.slug
+
+ # Check if provider with this slug already exists
+ existing = await dao.get_by_slug(slug, organization_id)
+ if existing:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Provider with slug '{payload.slug}' already exists",
+ )
+
+ # Merge provided settings with defaults
+ settings = {
+ **payload.settings,
+ }
+
+ # Ensure scopes have default if not provided
+ if "scopes" not in settings or not settings["scopes"]:
+ settings["scopes"] = ["openid", "profile", "email"]
+
+ secret_payload = CreateSecretDTO(
+ header=Header(name=slug, description=payload.description),
+ secret=SecretDTO(
+ kind=SecretKind.SSO_PROVIDER,
+ data=SSOProviderDTO(
+ provider=SSOProviderSettingsDTO(
+ client_id=settings.get("client_id", ""),
+ client_secret=settings.get("client_secret", ""),
+ issuer_url=settings.get("issuer_url", ""),
+ scopes=settings.get("scopes", []),
+ extra=settings.get("extra", {}) or {},
+ )
+ ),
+ ),
+ )
+
+ secret_dto = await self._vault_service().create_secret(
+ organization_id=UUID(organization_id),
+ create_secret_dto=secret_payload,
+ )
+
+ # Merge provided flags with defaults
+ flags = payload.flags or {}
+ if "is_valid" not in flags:
+ flags["is_valid"] = False
+ if "is_active" not in flags:
+ flags["is_active"] = False
+
+ # Create provider
+ provider = await dao.create(
+ organization_id=organization_id,
+ slug=slug,
+ name=payload.name,
+ description=payload.description,
+ secret_id=str(secret_dto.id),
+ created_by_id=user_id,
+ flags=flags,
+ )
+
+ await session.commit()
+ await session.refresh(provider)
+
+ return await self._to_response(provider, organization_id)
+
+ async def update_provider(
+ self,
+ organization_id: str,
+ provider_id: str,
+ payload: OrganizationProviderUpdate,
+ user_id: str,
+ ) -> OrganizationProviderResponse:
+ """Update an SSO provider."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+
+ provider = await dao.get_by_id(provider_id, organization_id)
+ if not provider:
+ raise HTTPException(status_code=404, detail="Provider not found")
+
+ # Update settings if provided
+ settings = await self._get_provider_settings(
+ organization_id, str(provider.secret_id)
+ )
+ settings_changed = False
+
+ if payload.settings is not None:
+ settings.update(payload.settings)
+ settings_changed = True
+
+ # Update flags if provided
+ flags = provider.flags.copy() if provider.flags else {}
+
+ if payload.flags is not None:
+ flags.update(payload.flags)
+
+ # If settings changed, invalidate the provider (needs re-testing)
+ if settings_changed:
+ flags["is_valid"] = False
+ flags["is_active"] = False
+
+ # Update slug if provided
+ if payload.slug is not None:
+ # Check if new slug already exists
+ existing = await dao.get_by_slug(payload.slug, organization_id)
+ if existing and existing.id != provider_id:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Provider with slug '{payload.slug}' already exists",
+ )
+ # Update slug in the provider
+ provider.slug = payload.slug
+
+ # Update name if provided
+ if payload.name is not None:
+ provider.name = payload.name
+
+ # Update description if provided
+ if payload.description is not None:
+ provider.description = payload.description
+
+ if settings_changed:
+ updated_secret = UpdateSecretDTO(
+ header=Header(name=provider.slug, description=provider.description),
+ secret=SecretDTO(
+ kind=SecretKind.SSO_PROVIDER,
+ data=SSOProviderDTO(
+ provider=SSOProviderSettingsDTO(
+ client_id=settings.get("client_id", ""),
+ client_secret=settings.get("client_secret", ""),
+ issuer_url=settings.get("issuer_url", ""),
+ scopes=settings.get("scopes", []),
+ extra=settings.get("extra", {}) or {},
+ )
+ ),
+ ),
+ )
+ await self._vault_service().update_secret(
+ secret_id=provider.secret_id,
+ organization_id=organization_id,
+ update_secret_dto=updated_secret,
+ )
+
+ provider = await dao.update(
+ provider_id=provider_id,
+ flags=flags,
+ updated_by_id=user_id,
+ )
+
+ await session.commit()
+ await session.refresh(provider)
+
+ return await self._to_response(provider, organization_id)
+
+ async def list_providers(
+ self, organization_id: str
+ ) -> List[OrganizationProviderResponse]:
+ """List all SSO providers for an organization."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+ providers = await dao.list_by_organization(organization_id)
+
+ responses: List[OrganizationProviderResponse] = []
+ for provider in providers:
+ responses.append(await self._to_response(provider, organization_id))
+ return responses
+
+ async def get_provider(
+ self, organization_id: str, provider_id: str
+ ) -> OrganizationProviderResponse:
+ """Get a single SSO provider by ID."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+ provider = await dao.get_by_id(provider_id, organization_id)
+ if not provider:
+ raise HTTPException(status_code=404, detail="Provider not found")
+ return await self._to_response(provider, organization_id)
+
+ async def test_provider(
+ self, organization_id: str, provider_id: str, user_id: str
+ ) -> OrganizationProviderResponse:
+ """Test SSO provider connection and mark as valid if successful."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+
+ provider = await dao.get_by_id(provider_id, organization_id)
+ if not provider:
+ raise HTTPException(status_code=404, detail="Provider not found")
+
+ settings = await self._get_provider_settings(
+ organization_id, str(provider.secret_id)
+ )
+
+ # Test OIDC connection
+ is_valid = await self.test_oidc_connection(
+ issuer_url=settings.get("issuer_url", ""),
+ client_id=settings.get("client_id", ""),
+ client_secret=settings.get("client_secret", ""),
+ )
+
+ # Update flags based on test result
+ flags = provider.flags.copy() if provider.flags else {}
+ flags["is_valid"] = is_valid
+ if is_valid:
+ flags["is_active"] = True
+
+ # If validation failed, deactivate the provider
+ if not is_valid:
+ flags["is_active"] = False
+
+ provider = await dao.update(
+ provider_id=provider_id,
+ flags=flags,
+ updated_by_id=user_id,
+ )
+
+ await session.commit()
+ await session.refresh(provider)
+
+ return await self._to_response(provider, organization_id)
+
+ async def delete_provider(
+ self, organization_id: str, provider_id: str, user_id: str
+ ) -> bool:
+ """Delete an SSO provider."""
+ async with engine.core_session() as session:
+ dao = OrganizationProvidersDAO(session)
+
+ provider = await dao.get_by_id(provider_id, organization_id)
+ if not provider:
+ raise HTTPException(status_code=404, detail="Provider not found")
+
+ organization = await db_manager_ee.get_organization(organization_id)
+ flags = organization.flags or {}
+ if flags.get("allow_sso"):
+ providers = await dao.list_by_organization(organization_id)
+ remaining = [
+ p
+ for p in providers
+ if str(p.id) != str(provider_id)
+ and (p.flags or {}).get("is_active")
+ and (p.flags or {}).get("is_valid")
+ ]
+ if not remaining:
+ raise HTTPException(
+ status_code=400,
+ detail=(
+ "Cannot delete the last active and verified SSO provider while "
+ "SSO is enabled."
+ ),
+ )
+
+ await self._vault_service().delete_secret(
+ secret_id=provider.secret_id,
+ organization_id=organization_id,
+ )
+ deleted = await dao.delete(provider_id, user_id)
+ await session.commit()
+ return deleted
+
+ async def _get_provider_settings(
+ self, organization_id: str, secret_id: str
+ ) -> dict:
+ secret = await self._vault_service().get_secret(
+ secret_id=UUID(secret_id),
+ organization_id=UUID(organization_id),
+ )
+ if not secret:
+ raise HTTPException(status_code=404, detail="Provider secret not found")
+
+ data = secret.data
+ if hasattr(data, "provider"):
+ return data.provider.model_dump()
+ if isinstance(data, dict):
+ provider = data.get("provider") or {}
+ if isinstance(provider, dict):
+ return provider
+ raise HTTPException(status_code=500, detail="Invalid provider secret format")
+
+ async def _to_response(
+ self, provider, organization_id: str
+ ) -> OrganizationProviderResponse:
+ """Convert DBE to response model."""
+ settings = await self._get_provider_settings(
+ organization_id, str(provider.secret_id)
+ )
+
+ return OrganizationProviderResponse(
+ id=str(provider.id),
+ organization_id=str(provider.organization_id),
+ slug=provider.slug,
+ name=provider.name,
+ description=provider.description,
+ settings=settings,
+ flags=provider.flags or {},
+ created_at=provider.created_at,
+ updated_at=provider.updated_at,
+ )
diff --git a/api/ee/src/services/organization_service.py b/api/ee/src/services/organization_service.py
index 9c66c0c22f..463e528de9 100644
--- a/api/ee/src/services/organization_service.py
+++ b/api/ee/src/services/organization_service.py
@@ -12,6 +12,9 @@
)
from oss.src.utils.env import env
+from oss.src.utils.logging import get_module_logger
+
+log = get_module_logger(__name__)
async def update_an_organization(
@@ -19,8 +22,8 @@ async def update_an_organization(
) -> OrganizationDB:
org = await db_manager_ee.get_organization(organization_id)
if org is not None:
- await db_manager_ee.update_organization(str(org.id), payload)
- return org
+ updated_org = await db_manager_ee.update_organization(str(org.id), payload)
+ return updated_org
raise NotFound("Organization not found")
@@ -127,3 +130,30 @@ async def notify_org_admin_invitation(workspace: WorkspaceDB, user: UserDB) -> b
async def get_organization_details(organization_id: str) -> dict:
organization = await db_manager_ee.get_organization(organization_id)
return await db_manager_ee.get_org_details(organization)
+
+
+async def transfer_organization_ownership(
+ organization_id: str,
+ new_owner_id: str,
+ current_user_id: str,
+) -> OrganizationDB:
+ """Transfer organization ownership to another member.
+
+ Args:
+ organization_id: The ID of the organization
+ new_owner_id: The UUID of the new owner
+ current_user_id: The UUID of the current user (initiating the transfer)
+
+ Returns:
+ OrganizationDB: The updated organization
+
+ Raises:
+ NotFound: If organization or new owner member not found
+ ValueError: If new owner is not a member of the organization
+ """
+ # Delegate to db_manager_ee
+ return await db_manager_ee.transfer_organization_ownership(
+ organization_id=organization_id,
+ new_owner_id=new_owner_id,
+ current_user_id=current_user_id,
+ )
diff --git a/api/ee/src/services/workspace_manager.py b/api/ee/src/services/workspace_manager.py
index c252b19f66..dd34bfecae 100644
--- a/api/ee/src/services/workspace_manager.py
+++ b/api/ee/src/services/workspace_manager.py
@@ -30,6 +30,7 @@
check_valid_invitation,
)
from ee.src.services.organization_service import send_invitation_email
+from ee.src.dbs.postgres.organizations.dao import OrganizationDomainsDAO
log = get_module_logger(__name__)
@@ -155,6 +156,30 @@ async def invite_user_to_workspace(
organization = await db_manager_ee.get_organization(organization_id)
user_performing_action = await db_manager.get_user(user_uid)
+ # Check if domains_only is enabled for this organization
+ org_flags = organization.flags or {}
+ domains_only = org_flags.get("domains_only", False)
+
+ # If domains_only is enabled, get the list of verified domains
+ verified_domain_slugs = set()
+ if domains_only:
+ domains_dao = OrganizationDomainsDAO()
+ org_domains = await domains_dao.list_by_organization(organization_id)
+ verified_domain_slugs = {
+ d.slug.lower()
+ for d in org_domains
+ if d.flags and d.flags.get("is_verified", False)
+ }
+
+ # If domains_only is enabled but no verified domains exist, block all invitations
+ if not verified_domain_slugs:
+ return JSONResponse(
+ status_code=400,
+ content={
+ "error": "Cannot send invitations: domains_only is enabled but no verified domains exist"
+ },
+ )
+
for payload_invite in payload:
# Check that the user is not inviting themselves
if payload_invite.email == user_performing_action.email:
@@ -163,6 +188,17 @@ async def invite_user_to_workspace(
content={"error": "You cannot invite yourself to a workspace"},
)
+ # Check if domains_only is enabled and validate the email domain
+ if domains_only:
+ email_domain = payload_invite.email.split("@")[-1].lower()
+ if email_domain not in verified_domain_slugs:
+ return JSONResponse(
+ status_code=400,
+ content={
+ "error": f"Cannot invite {payload_invite.email}: domain '{email_domain}' is not a verified domain for this organization"
+ },
+ )
+
# Check if the user is already a member of the workspace
if await db_manager_ee.check_user_in_workspace_with_email(
payload_invite.email, str(workspace.id)
diff --git a/api/ee/src/utils/permissions.py b/api/ee/src/utils/permissions.py
index 5c575ef0cf..13a320bb8b 100644
--- a/api/ee/src/utils/permissions.py
+++ b/api/ee/src/utils/permissions.py
@@ -93,7 +93,7 @@ async def check_user_org_access(
if not organization:
log.error("Organization not found")
raise Exception("Organization not found")
- return organization.owner == str(user.id) # type: ignore
+ return organization.owner_id == user.id # type: ignore
else:
user_organizations: List = kwargs["organization_ids"]
user_exists_in_organizations = organization_id in user_organizations
@@ -290,7 +290,8 @@ async def check_rbac_permission(
if project_id is not None:
project = await db_manager.get_project_by_id(project_id)
if project is None:
- raise Exception("Project not found")
+ log.warning(f"Project {project_id} not found during permission check")
+ return False
workspace = await db_manager.get_workspace(str(project.workspace_id))
organization = await db_manager_ee.get_organization(
diff --git a/api/ee/tests/manual/auth/00-setup-verification.http b/api/ee/tests/manual/auth/00-setup-verification.http
new file mode 100644
index 0000000000..c430272175
--- /dev/null
+++ b/api/ee/tests/manual/auth/00-setup-verification.http
@@ -0,0 +1,471 @@
+###
+# Database Setup & Verification
+# SQL commands to set up test data and verify schema
+#
+# Run these in psql or your database client BEFORE running other tests
+###
+
+###
+# 1. Verify Migrations Applied
+###
+
+# Check user_identities table exists
+# \d user_identities
+
+# Expected columns:
+# - id (uuid)
+# - user_id (uuid, FK to users.id)
+# - method (text)
+# - subject (text)
+# - domain (text, nullable)
+# - created_at (timestamp)
+# - updated_at (timestamp)
+#
+# Expected constraints:
+# - UNIQUE (method, subject)
+# - INDEX on (user_id, method)
+# - INDEX on (domain)
+
+
+###
+# 2. Verify Organization Schema
+###
+
+# Check organizations table has updated schema
+# \d organizations
+
+# Expected columns:
+# - slug (text, unique, nullable)
+# - flags (jsonb, nullable) - contains is_personal, is_demo, auth policy flags
+# - owner_id (uuid, FK to users.id, NOT NULL)
+# - created_by_id, updated_by_id, deleted_by_id (uuid, nullable)
+# - created_at, updated_at, deleted_at (timestamp)
+#
+# Flags structure:
+# {
+# "is_personal": bool,
+# "is_demo": bool,
+# "allow_email": bool,
+# "allow_social": bool,
+# "allow_sso": bool,
+# "allow_root": bool,
+# "domains_only": bool,
+# "auto_join": bool
+# }
+
+
+###
+# 3. Setup Test Collaborative Organization (EE Mode)
+###
+
+-- Create test collaborative organization with slug
+-- INSERT INTO organizations (
+-- id,
+-- name,
+-- slug,
+-- description,
+-- flags,
+-- owner_id,
+-- created_by_id,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- 'ACME Corporation',
+-- 'acme',
+-- 'Test collaborative organization for SSO',
+-- '{"is_personal": false, "allow_email": true, "allow_social": true, "allow_sso": true}'::jsonb,
+-- '', -- Replace with actual user ID who will own this org
+-- '',
+-- now()
+-- )
+-- RETURNING id;
+-- Save the returned ID as @testOrgId
+
+
+###
+# 4. Setup Test Personal Organization (EE Mode)
+###
+
+-- Create test personal organization
+-- INSERT INTO organizations (
+-- id,
+-- name,
+-- slug,
+-- description,
+-- flags,
+-- owner_id,
+-- created_by_id,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- 'Personal',
+-- NULL, -- Personal orgs have no slug
+-- NULL,
+-- '{"is_personal": true}'::jsonb,
+-- '', -- Replace with actual user ID
+-- '',
+-- now()
+-- )
+-- RETURNING id;
+-- Save the returned ID as @testPersonalOrgId
+
+-- Add user as member to their personal org
+-- INSERT INTO organization_members (
+-- id,
+-- user_id,
+-- organization_id,
+-- role,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '',
+-- '',
+-- 'owner',
+-- now()
+-- );
+
+
+###
+# 5. Setup Organization Auth Flags (EE Mode)
+###
+
+-- Update organization flags to set authentication policies
+-- UPDATE organizations
+-- SET flags = jsonb_set(
+-- jsonb_set(
+-- jsonb_set(
+-- jsonb_set(
+-- jsonb_set(
+-- flags,
+-- '{allow_email}', 'false'
+-- ),
+-- '{allow_social}', 'false'
+-- ),
+-- '{allow_sso}', 'true'
+-- ),
+-- '{auto_join}', 'true'
+-- ),
+-- '{domains_only}', 'true'
+-- )
+-- WHERE id = '';
+
+# Auth policy flags in organizations.flags:
+# - allow_email (boolean, default: true) - Allow email authentication (OTP/password)
+# - allow_social (boolean, default: true) - Allow social authentication (Google, GitHub)
+# - allow_sso (boolean, default: false) - Allow SSO/OIDC authentication
+# - allow_root (boolean, default: true) - Allow owner bypass of auth restrictions
+# - domains_only (boolean, default: false) - Only allow users with verified domain emails
+# - auto_join (boolean, default: false) - Auto-add users with verified domain emails
+
+
+###
+# 6. Setup Verified Domain (EE Mode - Collaborative Org)
+###
+
+-- Add verified domain for SSO on collaborative org
+-- INSERT INTO organization_domains (
+-- id,
+-- organization_id,
+-- slug,
+-- name,
+-- description,
+-- token,
+-- flags,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '', -- Collaborative org ID
+-- 'acme.com',
+-- 'ACME Domain',
+-- 'Primary domain for ACME Corporation',
+-- NULL, -- Token only needed during verification
+-- '{"is_verified": true}'::jsonb,
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @testDomainId
+
+# Note: domain column renamed to slug, verified moved to flags.is_verified
+
+
+###
+# 7. Test Domain Verification Restrictions (EE Mode)
+###
+
+-- Attempt to add domain to personal org (verification should fail)
+-- INSERT INTO organization_domains (
+-- id,
+-- organization_id,
+-- slug,
+-- name,
+-- token,
+-- flags,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '', -- Personal org ID
+-- 'personal-test.com',
+-- 'Personal Test Domain',
+-- 'test-token-123',
+-- '{"is_verified": false}'::jsonb,
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @personalOrgDomainId
+
+# NOTE: Attempting to verify this domain should fail with:
+# "Personal organizations cannot verify domains"
+
+
+###
+# 8. Test Domain Exclusivity (EE Mode)
+###
+
+-- Create second collaborative org to test exclusivity
+-- INSERT INTO organizations (
+-- id,
+-- name,
+-- slug,
+-- description,
+-- flags,
+-- owner_id,
+-- created_by_id,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- 'Second Corp',
+-- 'second',
+-- 'Test domain exclusivity',
+-- '{"is_personal": false}'::jsonb,
+-- '',
+-- '',
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @secondOrgId
+
+-- Attempt to verify same domain as first org
+-- INSERT INTO organization_domains (
+-- id,
+-- organization_id,
+-- slug,
+-- name,
+-- token,
+-- flags,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '',
+-- 'acme.com', -- Same domain as testOrgId
+-- 'Conflicting ACME Domain',
+-- 'conflict-token-456',
+-- '{"is_verified": false}'::jsonb,
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @conflictingDomainId
+
+# NOTE: Attempting to verify this domain should fail with:
+# "Domain 'acme.com' is already verified by another organization"
+
+
+###
+# 9. Setup OIDC Provider (EE Mode)
+###
+
+-- Add OIDC provider configuration
+-- INSERT INTO organization_providers (
+-- id,
+-- organization_id,
+-- slug,
+-- name,
+-- description,
+-- settings,
+-- flags,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '',
+-- 'okta',
+-- 'Okta SSO',
+-- 'ACME Okta integration',
+-- '{
+-- "issuer": "https://dev-12345.okta.com",
+-- "client_id": "0oa...",
+-- "client_secret": "xxx",
+-- "scopes": ["openid", "profile", "email"],
+-- "authorization_endpoint": "https://dev-12345.okta.com/oauth2/v1/authorize",
+-- "token_endpoint": "https://dev-12345.okta.com/oauth2/v1/token",
+-- "userinfo_endpoint": "https://dev-12345.okta.com/oauth2/v1/userinfo"
+-- }'::jsonb,
+-- '{"is_active": true}'::jsonb,
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @testProviderId
+
+# Note:
+# - config renamed to settings
+# - enabled moved to flags.is_active
+# - domain_id removed (SSO provider can handle multiple domains)
+
+
+###
+# 10. Create Test User
+###
+
+-- Create test user
+-- INSERT INTO users (id, uid, username, email, created_at)
+-- VALUES (
+-- gen_random_uuid(),
+-- 'st_user_123', -- SuperTokens user ID
+-- 'Test User',
+-- 'test@acme.com',
+-- now()
+-- )
+-- RETURNING id;
+-- Save as @testUserId
+
+
+###
+# 11. Add User to Organization (EE Mode)
+###
+
+-- Create organization membership
+-- INSERT INTO organization_members (
+-- id,
+-- user_id,
+-- organization_id,
+-- role,
+-- created_at
+-- )
+-- VALUES (
+-- gen_random_uuid(),
+-- '',
+-- '',
+-- 'member',
+-- now()
+-- );
+
+# Role can be: 'owner', 'member'
+
+
+###
+# 12. Verification Queries
+###
+
+-- Check all test data created successfully
+
+-- Verify organizations (check flags)
+-- SELECT id, name, slug, flags FROM organizations ORDER BY flags->>'is_personal';
+
+-- Verify collaborative organization
+-- SELECT id, name, slug, flags FROM organizations WHERE slug = 'acme';
+
+-- Verify personal organization
+-- SELECT id, name, slug, flags FROM organizations WHERE flags->>'is_personal' = 'true';
+
+-- Verify organization auth flags
+-- SELECT
+-- id,
+-- name,
+-- flags->'allow_email' as allow_email,
+-- flags->'allow_social' as allow_social,
+-- flags->'allow_sso' as allow_sso,
+-- flags->'auto_join' as auto_join,
+-- flags->'domains_only' as domains_only,
+-- flags->'allow_root' as allow_root
+-- FROM organizations
+-- WHERE id = '';
+
+-- Verify domains
+-- SELECT
+-- od.id,
+-- od.slug as domain,
+-- od.flags->>'is_verified' as verified,
+-- o.name as org_name,
+-- o.flags->>'is_personal' as is_personal
+-- FROM organization_domains od
+-- JOIN organizations o ON o.id = od.organization_id
+-- ORDER BY o.flags->>'is_personal', od.slug;
+
+-- Verify provider
+-- SELECT
+-- slug,
+-- name,
+-- flags->>'is_active' as is_active,
+-- settings->>'issuer' as issuer
+-- FROM organization_providers
+-- WHERE organization_id = '';
+
+-- Verify user
+-- SELECT id, email FROM users WHERE email = 'test@acme.com';
+
+-- Verify membership
+-- SELECT
+-- om.id,
+-- u.email,
+-- o.name,
+-- om.role,
+-- o.flags->>'is_personal' as is_personal
+-- FROM organization_members om
+-- JOIN users u ON u.id = om.user_id
+-- JOIN organizations o ON o.id = om.organization_id
+-- WHERE u.email = 'test@acme.com'
+-- ORDER BY o.flags->>'is_personal';
+
+
+###
+# 13. Cleanup (Run after testing)
+###
+
+-- Clean up test data
+-- DELETE FROM organization_members WHERE user_id = '';
+-- DELETE FROM organization_providers WHERE organization_id IN ('', '');
+-- DELETE FROM organization_domains WHERE organization_id IN ('', '', '');
+-- DELETE FROM user_identities WHERE user_id = '';
+-- DELETE FROM users WHERE id = '';
+-- DELETE FROM organizations WHERE id IN ('', '', '');
+
+
+###
+# Quick Setup Script (OSS Mode - Email OTP Only)
+###
+
+-- For OSS mode testing, verify migrations:
+-- SELECT EXISTS (
+-- SELECT FROM information_schema.tables
+-- WHERE table_name = 'user_identities'
+-- ) as user_identities_exists;
+
+-- SELECT EXISTS (
+-- SELECT FROM information_schema.columns
+-- WHERE table_name = 'organizations' AND column_name = 'slug'
+-- ) as org_slug_exists;
+
+-- SELECT EXISTS (
+-- SELECT FROM information_schema.columns
+-- WHERE table_name = 'organizations' AND column_name = 'flags'
+-- ) as org_flags_exists;
+
+-- Verify OSS has exactly 1 collaborative organization
+-- SELECT
+-- COUNT(*) as org_count,
+-- flags->>'is_personal' as is_personal
+-- FROM organizations
+-- GROUP BY flags->>'is_personal';
+-- Expected: 1 row with is_personal=false (or null), count=1
+
+-- Verify no personal organizations exist in OSS
+-- SELECT COUNT(*) as personal_org_count
+-- FROM organizations
+-- WHERE flags->>'is_personal' = 'true';
+-- Expected: 0
diff --git a/api/ee/tests/manual/auth/00-setup-verification.md b/api/ee/tests/manual/auth/00-setup-verification.md
new file mode 100644
index 0000000000..2bae988343
--- /dev/null
+++ b/api/ee/tests/manual/auth/00-setup-verification.md
@@ -0,0 +1,576 @@
+# Database Setup & Verification
+
+SQL commands to set up test data and verify schema.
+
+Run these in psql or your database client BEFORE running other tests.
+
+---
+
+## 1. Verify Migrations Applied
+
+Check user_identities table exists:
+
+```sql
+\d user_identities
+```
+
+**Expected columns:**
+- `id` (uuid)
+- `user_id` (uuid, FK to users.id)
+- `method` (text)
+- `subject` (text)
+- `domain` (text, nullable)
+- `created_at` (timestamp)
+- `updated_at` (timestamp)
+- `updated_by_id` (uuid, nullable)
+
+**Expected constraints:**
+- UNIQUE (method, subject)
+- INDEX on (user_id, method)
+- INDEX on (domain)
+
+---
+
+## 2. Verify Organization Schema
+
+Check organizations table has updated schema:
+
+```sql
+\d organizations
+```
+
+**Expected columns:**
+- `slug` (text, unique, nullable)
+- `flags` (jsonb, nullable) - contains `is_personal`, `is_demo`, auth policy flags
+- `tags` (jsonb, nullable)
+- `meta` (jsonb, nullable)
+- `owner_id` (uuid, FK to users.id, NOT NULL)
+- `created_by_id` (uuid, FK to users.id, NOT NULL)
+- `updated_by_id` (uuid, nullable)
+- `deleted_by_id` (uuid, nullable)
+- `created_at` (timestamp, NOT NULL)
+- `updated_at` (timestamp, nullable)
+- `deleted_at` (timestamp, nullable)
+
+**Expected indexes:**
+- Unique index on `slug`
+- GIN index on `flags`
+
+---
+
+## 3. Setup Test Collaborative Organization (EE Mode)
+
+Create test collaborative organization with slug:
+
+```sql
+INSERT INTO organizations (
+ id,
+ name,
+ slug,
+ description,
+ flags,
+ owner_id,
+ created_by_id,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ 'ACME Corporation',
+ 'acme',
+ 'Test collaborative organization for SSO',
+ '{"is_personal": false, "allow_email": true, "allow_social": true, "allow_sso": true}'::jsonb,
+ '', -- Replace with actual user ID who will own this org
+ '',
+ now()
+)
+RETURNING id;
+```
+
+Save the returned ID as `@testOrgId`.
+
+---
+
+## 4. Setup Test Personal Organization (EE Mode)
+
+Create test personal organization:
+
+```sql
+INSERT INTO organizations (
+ id,
+ name,
+ slug,
+ description,
+ flags,
+ owner_id,
+ created_by_id,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ 'Personal',
+ NULL, -- Personal orgs have no slug
+ NULL,
+ '{"is_personal": true}'::jsonb,
+ '', -- Replace with actual user ID
+ '',
+ now()
+)
+RETURNING id;
+```
+
+Save the returned ID as `@testPersonalOrgId`.
+
+Add user as member to their personal org:
+
+```sql
+INSERT INTO organization_members (
+ id,
+ user_id,
+ organization_id,
+ role,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '',
+ '',
+ 'owner',
+ now()
+);
+```
+
+---
+
+## 5. Setup Organization Policy Flags (EE Mode)
+
+Update organization flags to set authentication policies:
+
+```sql
+-- Allow only SSO, enforce verified domains, allow auto-join
+UPDATE organizations
+SET flags = jsonb_set(
+ jsonb_set(
+ jsonb_set(
+ jsonb_set(
+ jsonb_set(
+ flags,
+ '{allow_email}', 'false'
+ ),
+ '{allow_social}', 'false'
+ ),
+ '{allow_sso}', 'true'
+ ),
+ '{auto_join}', 'true'
+ ),
+ '{domains_only}', 'true'
+)
+WHERE id = '';
+```
+
+**Policy flags in `organizations.flags`:**
+- `allow_email` (boolean, default: true) - Allow email authentication (OTP/password)
+- `allow_social` (boolean, default: true) - Allow social authentication (Google, GitHub, etc.)
+- `allow_sso` (boolean, default: true) - Allow SSO/OIDC authentication
+- `auto_join` (boolean, default: false) - Allow users with verified domains to automatically join
+- `domains_only` (boolean, default: false) - Only allow users with verified domain emails
+- `allow_root` (boolean, default: true) - Allow organization owner to bypass auth restrictions
+
+---
+
+## 6. Setup Verified Domain (EE Mode - Collaborative Org)
+
+Add verified domain for SSO on collaborative org:
+
+```sql
+INSERT INTO organization_domains (
+ id,
+ organization_id,
+ slug,
+ name,
+ description,
+ token,
+ flags,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '', -- Collaborative org ID
+ 'acme.com',
+ 'ACME Domain',
+ 'Primary domain for ACME Corporation',
+ NULL, -- Token only needed during verification
+ '{"is_verified": true}'::jsonb,
+ now()
+)
+RETURNING id;
+```
+
+Save as `@testDomainId`.
+
+**Note:** `domain` field renamed to `slug`, `verified` moved to `flags.is_verified`, `verification_token` renamed to `token`.
+
+---
+
+## 7. Test Domain Verification Restrictions (EE Mode)
+
+Attempt to add domain to personal org (verification should fail):
+
+```sql
+INSERT INTO organization_domains (
+ id,
+ organization_id,
+ slug,
+ name,
+ token,
+ flags,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '', -- Personal org ID
+ 'personal-test.com',
+ 'Personal Test Domain',
+ 'test-token-123',
+ '{"is_verified": false}'::jsonb,
+ now()
+)
+RETURNING id;
+```
+
+Save as `@personalOrgDomainId`.
+
+**NOTE:** Attempting to verify this domain should fail with:
+> "Personal organizations cannot verify domains"
+
+---
+
+## 8. Test Domain Exclusivity (EE Mode)
+
+Create second collaborative org to test exclusivity:
+
+```sql
+INSERT INTO organizations (
+ id,
+ name,
+ slug,
+ description,
+ flags,
+ owner_id,
+ created_by_id,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ 'Second Corp',
+ 'second',
+ 'Test domain exclusivity',
+ '{"is_personal": false}'::jsonb,
+ '',
+ '',
+ now()
+)
+RETURNING id;
+```
+
+Save as `@secondOrgId`.
+
+Attempt to verify same domain as first org:
+
+```sql
+INSERT INTO organization_domains (
+ id,
+ organization_id,
+ slug,
+ name,
+ token,
+ flags,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '',
+ 'acme.com', -- Same domain as testOrgId
+ 'Conflicting ACME Domain',
+ 'conflict-token-456',
+ '{"is_verified": false}'::jsonb,
+ now()
+)
+RETURNING id;
+```
+
+Save as `@conflictingDomainId`.
+
+**NOTE:** Attempting to verify this domain should fail with:
+> "Domain 'acme.com' is already verified by another organization"
+
+---
+
+## 9. Setup OIDC Provider (EE Mode)
+
+Add OIDC provider configuration:
+
+```sql
+INSERT INTO organization_providers (
+ id,
+ organization_id,
+ slug,
+ name,
+ description,
+ settings,
+ flags,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '',
+ 'okta',
+ 'Okta SSO',
+ 'ACME Okta integration',
+ '{
+ "issuer": "https://dev-12345.okta.com",
+ "client_id": "0oa...",
+ "client_secret": "xxx",
+ "scopes": ["openid", "profile", "email"],
+ "authorization_endpoint": "https://dev-12345.okta.com/oauth2/v1/authorize",
+ "token_endpoint": "https://dev-12345.okta.com/oauth2/v1/token",
+ "userinfo_endpoint": "https://dev-12345.okta.com/oauth2/v1/userinfo"
+ }'::jsonb,
+ '{"is_active": true}'::jsonb,
+ now()
+)
+RETURNING id;
+```
+
+Save as `@testProviderId`.
+
+**Note:**
+- `config` renamed to `settings`
+- `enabled` moved to `flags.is_active`
+- `domain_id` removed (SSO provider can handle multiple domains)
+
+---
+
+## 10. Create Test User
+
+Create test user:
+
+```sql
+INSERT INTO users (id, uid, username, email, created_at)
+VALUES (
+ gen_random_uuid(),
+ 'st_user_123', -- SuperTokens user ID
+ 'Test User',
+ 'test@acme.com',
+ now()
+)
+RETURNING id;
+```
+
+Save as `@testUserId`.
+
+---
+
+## 11. Add User to Organization (EE Mode)
+
+Create organization membership:
+
+```sql
+INSERT INTO organization_members (
+ id,
+ user_id,
+ organization_id,
+ role,
+ created_at
+)
+VALUES (
+ gen_random_uuid(),
+ '',
+ '',
+ 'member',
+ now()
+);
+```
+
+**Note:** Added `role` field (default: "member", can be "owner").
+
+---
+
+## 12. Verification Queries
+
+Check all test data created successfully:
+
+### Verify organizations (check flags)
+
+```sql
+SELECT id, name, slug, flags
+FROM organizations
+ORDER BY flags->>'is_personal';
+```
+
+### Verify collaborative organization
+
+```sql
+SELECT id, name, slug, flags
+FROM organizations
+WHERE slug = 'acme';
+```
+
+### Verify personal organization
+
+```sql
+SELECT id, name, slug, flags
+FROM organizations
+WHERE flags->>'is_personal' = 'true';
+```
+
+### Verify organization policy flags
+
+```sql
+SELECT
+ id,
+ name,
+ flags->'allow_email' as allow_email,
+ flags->'allow_social' as allow_social,
+ flags->'allow_sso' as allow_sso,
+ flags->'auto_join' as auto_join,
+ flags->'domains_only' as domains_only,
+ flags->'allow_root' as allow_root
+FROM organizations
+WHERE id = '';
+```
+
+### Verify domains
+
+```sql
+SELECT
+ od.id,
+ od.slug as domain,
+ od.flags->>'is_verified' as verified,
+ o.name as org_name,
+ o.flags->>'is_personal' as is_personal
+FROM organization_domains od
+JOIN organizations o ON o.id = od.organization_id
+ORDER BY o.flags->>'is_personal', od.slug;
+```
+
+### Verify provider
+
+```sql
+SELECT
+ slug,
+ name,
+ flags->>'is_active' as enabled,
+ settings->>'issuer' as issuer
+FROM organization_providers
+WHERE organization_id = '';
+```
+
+### Verify user
+
+```sql
+SELECT id, email
+FROM users
+WHERE email = 'test@acme.com';
+```
+
+### Verify membership
+
+```sql
+SELECT
+ om.id,
+ u.email,
+ o.name,
+ om.role,
+ o.flags->>'is_personal' as is_personal
+FROM organization_members om
+JOIN users u ON u.id = om.user_id
+JOIN organizations o ON o.id = om.organization_id
+WHERE u.email = 'test@acme.com'
+ORDER BY o.flags->>'is_personal';
+```
+
+---
+
+## 13. Cleanup (Run after testing)
+
+Clean up test data:
+
+```sql
+DELETE FROM organization_members WHERE user_id = '';
+DELETE FROM organization_providers WHERE organization_id IN ('', '');
+DELETE FROM organization_domains WHERE organization_id IN ('', '', '');
+DELETE FROM user_identities WHERE user_id = '';
+DELETE FROM users WHERE id = '';
+DELETE FROM organizations WHERE id IN ('', '', '');
+```
+
+**Note:** No need to delete from `organization_policies` table (removed).
+
+---
+
+## Quick Setup Script (OSS Mode - Email OTP Only)
+
+For OSS mode testing, verify migrations:
+
+```sql
+-- Check user_identities table exists
+SELECT EXISTS (
+ SELECT FROM information_schema.tables
+ WHERE table_name = 'user_identities'
+) as user_identities_exists;
+
+-- Check organizations.slug exists
+SELECT EXISTS (
+ SELECT FROM information_schema.columns
+ WHERE table_name = 'organizations' AND column_name = 'slug'
+) as org_slug_exists;
+
+-- Check organizations.flags exists
+SELECT EXISTS (
+ SELECT FROM information_schema.columns
+ WHERE table_name = 'organizations' AND column_name = 'flags'
+) as org_flags_exists;
+
+-- Verify OSS has exactly 1 collaborative organization
+SELECT
+ COUNT(*) as org_count,
+ flags->>'is_personal' as is_personal
+FROM organizations
+GROUP BY flags->>'is_personal';
+-- Expected: 1 row with is_personal=false (or null), count=1
+
+-- Verify no personal organizations exist in OSS
+SELECT COUNT(*) as personal_org_count
+FROM organizations
+WHERE flags->>'is_personal' = 'true';
+-- Expected: 0
+```
+
+---
+
+## Schema Changes Summary
+
+### Organizations Table
+- **Removed:** `type`, `owner` (string), `kind`
+- **Added:** `owner_id` (UUID FK), `created_by_id`, `updated_by_id`, `deleted_by_id`, `deleted_at`, `flags`, `tags`, `meta`
+- **Flags structure:** `{"is_personal": bool, "is_demo": bool, "allow_email": bool, "allow_social": bool, "allow_sso": bool, "auto_join": bool, "domains_only": bool, "allow_root": bool}`
+
+### Organization Domains Table
+- **Renamed:** `domain` → `slug`, `verification_token` → `token`
+- **Removed:** `verified` (boolean column)
+- **Added:** `name`, `description`, `flags`, `tags`, `meta`
+- **Flags structure:** `{"is_verified": bool}`
+- **Lifecycle:** Changed from `LegacyLifecycle` to `Lifecycle` (added `deleted_at`, `created_by_id`, `deleted_by_id`)
+
+### Organization Providers Table
+- **Renamed:** `config` → `settings`
+- **Removed:** `enabled` (boolean), `domain_id` (FK)
+- **Added:** `name`, `description`, `flags`, `tags`, `meta`
+- **Flags structure:** `{"is_active": bool}`
+- **Lifecycle:** Changed from `LegacyLifecycle` to `Lifecycle`
+
+### Organization Members Table
+- **Added:** `role` (string, default: "member"), `created_at`, `updated_at`, `updated_by_id` (nullable)
+
+### Removed Tables
+- **organization_policies** - moved to `organizations.flags`
diff --git a/api/ee/tests/manual/auth/01-discovery.http b/api/ee/tests/manual/auth/01-discovery.http
new file mode 100644
index 0000000000..33d8e54b24
--- /dev/null
+++ b/api/ee/tests/manual/auth/01-discovery.http
@@ -0,0 +1,105 @@
+###
+# Auth Discovery Tests
+# Tests the /auth/discover endpoint which determines available authentication methods
+#
+# Setup:
+# 1. Start backend: cd vibes/api && uvicorn main:app --reload
+# 2. Run migrations: alembic upgrade head
+# 3. Update @apiUrl if needed
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@contentType = application/json
+
+### Test 1: New User Discovery (No Account)
+# Expected: Returns available auth methods (email:[...], social:[...], oidc:[...])
+# Should show: exists=false, all globally enabled methods
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "newuser@example.com"
+}
+
+### Test 2: Existing User Discovery (Email OTP)
+# Prerequisites: User exists with email:otp identity
+# Expected: Returns exists=true, methods based on their org policies
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "existing@example.com"
+}
+
+### Test 3: SSO Required Organization Member
+# Prerequisites:
+# - User exists and is member of org with sso_only policy
+# - Organization has verified domain
+# - OIDC provider configured
+# Expected: sso_required_by_some=true, shows SSO providers
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "user@acme.com"
+}
+
+### Test 4: Multi-Org User Discovery
+# Prerequisites: User is member of multiple orgs with different policies
+# Expected: Returns union of all allowed methods across all orgs
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "multiorg@example.com"
+}
+
+### Test 5: Domain with SSO Provider
+# Prerequisites:
+# - Domain "verified.com" is verified in organization_domains
+# - SSO provider linked to this domain
+# Expected: Returns SSO provider information
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "user@verified.com"
+}
+
+### Test 6: Invalid Email Format
+# Expected: 400 Bad Request or validation error
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": "notanemail"
+}
+
+### Test 7: Empty Email
+# Expected: 422 Validation error
+POST {{apiUrl}}/auth/discover
+Content-Type: {{contentType}}
+
+{
+ "email": ""
+}
+
+###
+# Expected Response Structure:
+# {
+# "exists": boolean,
+# "methods": {
+# "email:otp": boolean,
+# "email:password": boolean,
+# "social:google": boolean,
+# "social:github": boolean,
+# "sso": [
+# {
+# "slug": string,
+# "name": string,
+# }
+# ]
+# }
+# }
+###
diff --git a/api/ee/tests/manual/auth/02-oidc-authorize.http b/api/ee/tests/manual/auth/02-oidc-authorize.http
new file mode 100644
index 0000000000..e8b6e35e6d
--- /dev/null
+++ b/api/ee/tests/manual/auth/02-oidc-authorize.http
@@ -0,0 +1,99 @@
+###
+# OIDC Authorization Tests (EE Only)
+# Tests the OIDC authorization flow initiation
+#
+# Setup:
+# 1. Ensure AGENTA_LICENSE=ee
+# 2. Create organization with SSO provider:
+# - Add organization_providers record with OIDC settings
+# - Configure issuer, client_id, client_secret in settings
+# - Set flags.is_active=true
+# 3. Update variables below with actual UUIDs
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@contentType = application/json
+
+# REPLACE THESE WITH ACTUAL VALUES FROM YOUR DATABASE
+@organizationId = 00000000-0000-0000-0000-000000000000
+@providerId = 00000000-0000-0000-0000-000000000000
+
+### Test 1: Initiate OIDC Authorization (Valid Provider)
+# Expected: 302 redirect to IdP authorization endpoint
+# Should include state parameter and redirect_uri
+GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/
+Accept: application/json
+
+### Test 2: OIDC Authorization with Custom Redirect
+# Expected: 302 redirect, redirect_uri includes custom path
+GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/dashboard
+Accept: application/json
+
+### Test 3: Invalid Provider ID (Not Found)
+# Expected: 404 Not Found
+GET {{apiUrl}}/auth/authorize/oidc?provider_id=99999999-9999-9999-9999-999999999999&redirect=/
+Accept: application/json
+
+### Test 4: Disabled Provider (flags.is_active=false)
+# Prerequisites: Provider exists but flags.is_active=false
+# Expected: 403 Forbidden or 404
+GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/
+Accept: application/json
+
+### Test 5: Missing Provider ID
+# Expected: 422 Validation error
+GET {{apiUrl}}/auth/authorize/oidc?redirect=/
+Accept: application/json
+
+### Test 6: OIDC in OSS Mode (Should Fail)
+# Prerequisites: AGENTA_LICENSE=oss
+# Expected: 404 Not Found with message "SSO/OIDC is only available in Enterprise Edition"
+GET {{apiUrl}}/auth/authorize/oidc?provider_id={{providerId}}&redirect=/
+Accept: application/json
+
+###
+# OIDC Flow Overview:
+# 1. User clicks SSO provider button
+# 2. Frontend calls GET /auth/authorize/oidc?provider_id=xxx
+# 3. Backend generates state, stores in state_store
+# 4. Backend redirects (302) to IdP authorization endpoint
+# 5. User authenticates at IdP
+# 6. IdP redirects back to /auth/callback with code + state
+# 7. Backend exchanges code for tokens
+# 8. Backend creates/links user_identity
+# 9. Backend creates session with identities array
+# 10. Backend redirects to application
+###
+
+###
+# Testing OIDC Provider Configuration:
+# You can verify provider config is loaded correctly by checking:
+# - Provider record exists in organization_providers table
+# - settings JSONB contains: issuer, client_id, client_secret, scopes
+# - Provider is active (flags.is_active=true)
+# - Organization exists and has allow_sso=true in flags
+###
+
+###
+# Example Provider Configuration (organization_providers table):
+# {
+# "id": "uuid",
+# "organization_id": "uuid",
+# "slug": "okta",
+# "name": "Okta SSO",
+# "settings": {
+# "issuer": "https://dev-12345.okta.com",
+# "client_id": "0oa...",
+# "client_secret": "xxx",
+# "scopes": ["openid", "profile", "email"]
+# },
+# "flags": {
+# "is_active": true
+# }
+# }
+#
+# Note:
+# - 'config' renamed to 'settings'
+# - 'enabled' moved to 'flags.is_active'
+###
diff --git a/api/ee/tests/manual/auth/03-domain-verification.http b/api/ee/tests/manual/auth/03-domain-verification.http
new file mode 100644
index 0000000000..76d091d3f5
--- /dev/null
+++ b/api/ee/tests/manual/auth/03-domain-verification.http
@@ -0,0 +1,291 @@
+###
+# Domain Verification Testing
+# Tests for organization type restrictions and domain exclusivity
+#
+# Prerequisites:
+# - Run 00-setup-verification.http first
+# - EE mode enabled (AGENTA_LICENSE=ee)
+# - Test organizations created (collaborative + personal)
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@testOrgId =
+@testPersonalOrgId =
+@secondOrgId =
+@personalOrgDomainId =
+@conflictingDomainId =
+
+
+###
+# Test 1: Verify Domain on Collaborative Organization (Should Succeed)
+###
+
+# This should succeed - collaborative orgs can verify domains
+POST {{apiUrl}}/organizations/{{testOrgId}}/domains/verify
+Content-Type: application/json
+
+{
+ "domain_id": ""
+}
+
+# Expected: 200 OK
+# {
+# "id": "...",
+# "organization_id": "{{testOrgId}}",
+# "slug": "acme.com",
+# "flags": {"is_verified": true}
+# }
+
+
+###
+# Test 2: Attempt to Verify Domain on Personal Organization (Should Fail)
+###
+
+# This should fail - personal orgs cannot verify domains
+POST {{apiUrl}}/organizations/{{testPersonalOrgId}}/domains/verify
+Content-Type: application/json
+
+{
+ "domain_id": "{{personalOrgDomainId}}"
+}
+
+# Expected: 400 Bad Request
+# {
+# "error": "Personal organizations cannot verify domains. Domain verification is only available for collaborative organizations."
+# }
+
+
+###
+# Test 3: Attempt to Verify Already-Verified Domain (Should Fail)
+###
+
+# This should fail - domain already verified by another org
+POST {{apiUrl}}/organizations/{{secondOrgId}}/domains/verify
+Content-Type: application/json
+
+{
+ "domain_id": "{{conflictingDomainId}}"
+}
+
+# Expected: 400 Bad Request
+# {
+# "error": "Domain 'acme.com' is already verified by another organization. Each domain can only be verified by one organization at a time."
+# }
+
+
+###
+# Test 4: Add Domain to Collaborative Organization (Should Succeed)
+###
+
+POST {{apiUrl}}/organizations/{{testOrgId}}/domains
+Content-Type: application/json
+
+{
+ "slug": "newdomain.com",
+ "name": "New Domain"
+}
+
+# Expected: 201 Created
+# {
+# "id": "...",
+# "organization_id": "{{testOrgId}}",
+# "slug": "newdomain.com",
+# "flags": {"is_verified": false},
+# "token": "..."
+# }
+
+
+###
+# Test 5: List Domains for Collaborative Organization
+###
+
+GET {{apiUrl}}/organizations/{{testOrgId}}/domains
+
+# Expected: 200 OK
+# [
+# {
+# "id": "...",
+# "slug": "acme.com",
+# "flags": {"is_verified": true}
+# },
+# {
+# "id": "...",
+# "slug": "newdomain.com",
+# "flags": {"is_verified": false}
+# }
+# ]
+
+
+###
+# Test 6: List Domains for Personal Organization
+###
+
+GET {{apiUrl}}/organizations/{{testPersonalOrgId}}/domains
+
+# Expected: 200 OK
+# [
+# {
+# "id": "...",
+# "slug": "personal-test.com",
+# "flags": {"is_verified": false}
+# }
+# ]
+# NOTE: Personal org can have domains, but cannot verify them
+
+
+###
+# Test 7: Unverify Domain (Transfer Ownership)
+###
+
+# First, unverify the domain from the first org
+DELETE {{apiUrl}}/organizations/{{testOrgId}}/domains//verification
+
+# Expected: 200 OK
+# {
+# "id": "...",
+# "flags": {"is_verified": false}
+# }
+
+# Now verify it with the second org (should succeed now)
+POST {{apiUrl}}/organizations/{{secondOrgId}}/domains/verify
+Content-Type: application/json
+
+{
+ "domain_id": "{{conflictingDomainId}}"
+}
+
+# Expected: 200 OK
+# {
+# "flags": {"is_verified": true}
+# }
+
+
+###
+# Test 8: Check Organization Type (via flags)
+###
+
+GET {{apiUrl}}/organizations/{{testOrgId}}
+
+# Expected: 200 OK
+# {
+# "id": "{{testOrgId}}",
+# "name": "ACME Corporation",
+# "slug": "acme",
+# "flags": {"is_personal": false, "allow_email": true, ...}
+# }
+
+GET {{apiUrl}}/organizations/{{testPersonalOrgId}}
+
+# Expected: 200 OK
+# {
+# "id": "{{testPersonalOrgId}}",
+# "name": "Personal",
+# "slug": null,
+# "flags": {"is_personal": true}
+# }
+
+
+###
+# Test 9: Verify Auto-Join Flag with Verified Domain
+###
+
+# Get organization to check flags
+GET {{apiUrl}}/organizations/{{testOrgId}}
+
+# Expected: 200 OK
+# {
+# "id": "{{testOrgId}}",
+# "flags": {
+# "is_personal": false,
+# "allow_email": false,
+# "allow_social": false,
+# "allow_sso": true,
+# "domains_only": true,
+# "auto_join": true,
+# "allow_root": true
+# }
+# }
+
+# When a user authenticates with email from verified domain (e.g., user@acme.com),
+# and auto_join=true, they should automatically be added to the organization as a member
+# This is tested in the SSO flow tests
+
+
+###
+# Test 10: Update Auto-Join Flag
+###
+
+PATCH {{apiUrl}}/organizations/{{testOrgId}}
+Content-Type: application/json
+
+{
+ "flags": {
+ "auto_join": false
+ }
+}
+
+# Expected: 200 OK
+# {
+# "flags": {"auto_join": false, ...}
+# }
+
+# Now users with verified domain emails will NOT be auto-added
+# They must be explicitly invited
+
+
+###
+# Test 11: Test domains_only Enforcement
+###
+
+# When domains_only=true, only users with verified domain emails can access the org
+
+# Step 1: Set domains_only flag
+PATCH {{apiUrl}}/organizations/{{testOrgId}}
+Content-Type: application/json
+
+{
+ "flags": {
+ "domains_only": true
+ }
+}
+
+# Expected: 200 OK
+
+# Step 2: User with non-verified domain tries to access
+# Expected: 403 AUTH_DOMAIN_DENIED
+# {
+# "error": "AUTH_DOMAIN_DENIED",
+# "message": "Your email domain 'gmail.com' is not allowed for this organization"
+# }
+
+# Step 3: User with verified domain (acme.com) accesses
+# Expected: 200 OK (access granted)
+
+
+###
+# Test 12: Invitation Validation with domains_only
+###
+
+# When domains_only=true, invitations to non-verified domains should be blocked
+
+POST {{apiUrl}}/organizations/{{testOrgId}}/invitations
+Content-Type: application/json
+
+{
+ "email": "user@gmail.com"
+}
+
+# Expected: 400 Bad Request
+# {
+# "error": "Cannot invite user@gmail.com: domain 'gmail.com' is not a verified domain for this organization"
+# }
+
+POST {{apiUrl}}/organizations/{{testOrgId}}/invitations
+Content-Type: application/json
+
+{
+ "email": "user@acme.com"
+}
+
+# Expected: 201 Created (invitation sent)
diff --git a/api/ee/tests/manual/auth/03-identity-tracking.http b/api/ee/tests/manual/auth/03-identity-tracking.http
new file mode 100644
index 0000000000..286129838d
--- /dev/null
+++ b/api/ee/tests/manual/auth/03-identity-tracking.http
@@ -0,0 +1,225 @@
+###
+# Identity Tracking & Session Payload Tests
+# Tests that user_identities are created and sessions contain identities array
+#
+# These tests verify the SuperTokens override functions are working correctly
+#
+# Prerequisites:
+# 1. SuperTokens Core running
+# 2. Migrations applied
+# 3. Backend started with SuperTokens configured
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@contentType = application/json
+
+###
+# Test Flow 1: Email OTP Login → Check Identity Created
+###
+
+### Step 1: Request OTP Code
+# This triggers SuperTokens passwordless flow
+# You'll need to complete this via SuperTokens UI or SDK
+# After successful login, check database:
+
+# SQL to verify email:otp identity created:
+# SELECT * FROM user_identities
+# WHERE method = 'email:otp'
+# ORDER BY created_at DESC
+# LIMIT 5;
+
+# Expected:
+# - id: uuid
+# - user_id: matches SuperTokens user
+# - method: 'email:otp'
+# - subject: user's email address
+# - domain: extracted from email (e.g., 'example.com')
+
+
+### Step 2: Check Session Payload Contains Identities
+# After successful OTP login, use SuperTokens session verification
+# to check the access token payload
+
+# The session should contain:
+# {
+# "userId": "...",
+# "identities": ["email:otp"],
+# ...other claims
+# }
+
+
+###
+# Test Flow 2: Social Login (Google) → Check Identity Created
+###
+
+### Step 1: Complete Google OAuth Flow
+# Navigate to: http://localhost/api/auth
+# Click "Continue with Google"
+# Complete Google auth
+# Check database:
+
+# SQL to verify social:google identity:
+# SELECT * FROM user_identities
+# WHERE method = 'social:google'
+# ORDER BY created_at DESC
+# LIMIT 5;
+
+# Expected:
+# - method: 'social:google'
+# - subject: Google user ID (stable identifier)
+# - domain: extracted from Google email
+
+
+### Step 2: Check Session After Social Login
+# Session should now contain:
+# {
+# "identities": ["email:otp", "social:google"]
+# }
+# (assuming user previously logged in with OTP)
+
+
+###
+# Test Flow 3: SSO Login (OIDC) → Check Identity Created
+###
+
+### Step 1: Complete OIDC Flow (EE Only)
+# Prerequisites:
+# - Organization provider configured
+# - User initiates SSO via provider_id
+
+# After successful SSO login, check:
+
+# SQL to verify sso identity:
+# SELECT * FROM user_identities
+# WHERE method LIKE 'sso:%'
+# ORDER BY created_at DESC
+# LIMIT 5;
+
+# Expected:
+# - method: 'sso:acme:okta' (or sso:{org_id}:okta if slug not set)
+# - subject: OIDC subject claim from IdP
+# - domain: extracted from OIDC email
+
+
+### Step 3: Check Multi-Method Session
+# After logging in via all three methods, session should contain:
+# {
+# "identities": [
+# "email:otp",
+# "social:google",
+# "sso:acme:okta"
+# ]
+# }
+
+
+###
+# Verification Queries
+###
+
+# Query 1: Check all identities for a specific user
+# SELECT
+# ui.method,
+# ui.subject,
+# ui.domain,
+# ui.created_at
+# FROM user_identities ui
+# WHERE ui.user_id = ''
+# ORDER BY ui.created_at;
+
+
+# Query 2: Count identities by method
+# SELECT
+# method,
+# COUNT(*) as count
+# FROM user_identities
+# GROUP BY method
+# ORDER BY count DESC;
+
+
+# Query 3: Find users with multiple identities
+# SELECT
+# ui.user_id,
+# u.email,
+# COUNT(*) as identity_count,
+# array_agg(ui.method) as methods
+# FROM user_identities ui
+# JOIN users u ON u.id = ui.user_id
+# GROUP BY ui.user_id, u.email
+# HAVING COUNT(*) > 1
+# ORDER BY identity_count DESC;
+
+
+# Query 4: Check identity created after OTP login
+# SELECT
+# ui.*,
+# u.email as user_email
+# FROM user_identities ui
+# JOIN users u ON u.id = ui.user_id
+# WHERE ui.method = 'email:otp'
+# AND ui.created_at > now() - interval '5 minutes'
+# ORDER BY ui.created_at DESC;
+
+
+###
+# Testing Session Payload (via SuperTokens API)
+###
+
+# You can verify session payload using SuperTokens' session verification:
+# 1. Log in to get session cookie
+# 2. Make authenticated request to any protected endpoint
+# 3. Backend should verify session and have access to identities array
+
+# Example protected endpoint (if you have one):
+# GET {{apiUrl}}/me
+# Cookie: sAccessToken=...; sRefreshToken=...
+
+# The endpoint handler can access session like:
+# session = await verify_session(request)
+# payload = session.get_access_token_payload()
+# existing_identities = payload.get("existing_identities", [])
+
+
+###
+# Edge Cases to Test
+###
+
+# 1. User logs in with email:otp twice
+# → Should NOT create duplicate identity
+# → Check UNIQUE constraint works
+
+# 2. User logs in with social:google, then email:otp
+# → Should have 2 identities
+# → Both should appear in session
+
+# 3. User with SSO identity logs in with email:otp
+# → Should accumulate both
+# → Session should reflect all methods
+
+# 4. Database query errors (simulate by removing permissions)
+# → Should log error but not block authentication
+# → Session should still be created with fallback
+
+
+###
+# Common Issues & Debugging
+###
+
+# Issue: Identity not created after login
+# Check:
+# - SuperTokens override functions are registered
+# - Database has user_identities table
+# - User exists in users table
+# - No errors in backend logs
+
+# Issue: Session doesn't contain existing_identities array
+# Check:
+# - Session override function is registered
+# - user_context["existing_identities"] is set in sign_in_up/consume_code
+# - SuperTokens session.init includes override config
+
+# Issue: Wrong method format (e.g., 'sso:undefined:okta')
+# Check:
+# - Organization has slug set
+# - db_manager.get_organization_by_id returns org
+# - Fallback to org_id works if slug is null
diff --git a/api/ee/tests/manual/auth/04-policy-enforcement.http b/api/ee/tests/manual/auth/04-policy-enforcement.http
new file mode 100644
index 0000000000..6c187bf570
--- /dev/null
+++ b/api/ee/tests/manual/auth/04-policy-enforcement.http
@@ -0,0 +1,329 @@
+###
+# Organization Flag Enforcement Tests (EE Only)
+# Tests the auth flag enforcement and auth upgrade requirements
+#
+# Prerequisites:
+# 1. AGENTA_LICENSE=ee
+# 2. Organizations set up with different flag configurations
+# 3. Users with various authentication methods
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@contentType = application/json
+
+# REPLACE THESE WITH ACTUAL VALUES
+@organizationId = 00000000-0000-0000-0000-000000000000
+@projectId = 00000000-0000-0000-0000-000000000000
+
+###
+# Scenario 1: User with email:otp tries to access SSO-only organization
+###
+
+### Step 1: Login with Email OTP
+# Complete OTP login flow first to get session with identities=["email:otp"]
+
+### Step 2: Try to Access SSO-Required Organization
+# Expected: 403 Forbidden with AUTH_UPGRADE_REQUIRED
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+# Cookie: sAccessToken=...; sRefreshToken=...
+
+# Expected Response:
+# {
+# "error": "AUTH_UPGRADE_REQUIRED",
+# "message": "Additional authentication required",
+# "required_methods": ["sso:*"],
+# "current_identities": ["email:otp"]
+# }
+
+
+###
+# Scenario 2: User completes SSO, then retries access
+###
+
+### Step 1: Complete SSO Authentication
+# User is redirected to SSO provider
+# After successful SSO, session updated to:
+# identities = ["email:otp", "sso:acme:okta"]
+
+### Step 2: Retry Organization Access
+# Expected: 200 OK, access granted
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+# Cookie: sAccessToken=...; sRefreshToken=...
+
+# Expected: Successful response with project data
+
+
+###
+# Scenario 3: Organization allows multiple methods (via flags)
+###
+
+### Setup:
+# Organization flags: allow_email=true, allow_social=true, allow_sso=true
+
+### Test: Access with any valid method
+# User with identities=["email:otp"] should be allowed
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 200 OK
+
+
+###
+# Scenario 4: No organization_id in request
+###
+
+### Test: Request without organization context
+# Middleware should skip flag enforcement
+GET {{apiUrl}}/health
+Accept: application/json
+
+# Expected: 200 OK (no flag enforcement)
+
+
+###
+# Scenario 5: User not a member of organization
+###
+
+### Setup:
+# User exists but not in organization_members for this org
+
+### Test: Try to access organization
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 403 Forbidden
+# {
+# "error": "NOT_A_MEMBER",
+# "message": "You are not a member of this organization"
+# }
+
+
+###
+# Scenario 6: Organization has default flags (all methods allowed)
+###
+
+### Setup:
+# Organization flags: allow_email=true (default), allow_social=true (default)
+
+### Test: Access should work with any auth method
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 200 OK (no restrictions)
+
+
+###
+# Scenario 7: Owner Bypass (allow_root=true)
+###
+
+### Setup:
+# Organization flags: allow_email=false, allow_social=false, allow_sso=true, allow_root=true
+# User is owner of organization
+
+### Test: Owner accesses with email:otp (normally blocked method)
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 200 OK (owner bypasses all auth restrictions when allow_root=true)
+
+
+###
+# Scenario 8: domains_only Enforcement
+###
+
+### Setup:
+# Organization flags: domains_only=true
+# Verified domain: acme.com
+# User email: user@gmail.com
+
+### Test: User with non-verified domain tries to access
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 403 AUTH_DOMAIN_DENIED
+# {
+# "error": "AUTH_DOMAIN_DENIED",
+# "message": "Your email domain 'gmail.com' is not allowed for this organization",
+# "current_domain": "gmail.com",
+# "allowed_domains": ["acme.com"]
+# }
+
+
+###
+# Scenario 9: SSO Provider Disabled
+###
+
+### Setup:
+# User has session with sso:acme:okta identity
+# Admin sets provider flags.is_active=false
+
+### Test: User tries to access SSO-only org
+GET {{apiUrl}}/projects?organization_id={{organizationId}}
+Accept: application/json
+
+# Expected: 403 AUTH_SSO_DENIED
+# {
+# "error": "AUTH_SSO_DENIED",
+# "message": "SSO provider is disabled or no longer valid"
+# }
+
+
+###
+# Testing Flag Configurations
+###
+
+# SQL to set different flag configurations:
+
+# Config 1: SSO Only
+# UPDATE organizations
+# SET flags = jsonb_set(jsonb_set(jsonb_set(flags,
+# '{allow_email}', 'false'),
+# '{allow_social}', 'false'),
+# '{allow_sso}', 'true')
+# WHERE id = '';
+
+# Config 2: Email or Social (no SSO)
+# UPDATE organizations
+# SET flags = jsonb_set(jsonb_set(jsonb_set(flags,
+# '{allow_email}', 'true'),
+# '{allow_social}', 'true'),
+# '{allow_sso}', 'false')
+# WHERE id = '';
+
+# Config 3: Everything allowed (default)
+# UPDATE organizations
+# SET flags = jsonb_set(jsonb_set(jsonb_set(flags,
+# '{allow_email}', 'true'),
+# '{allow_social}', 'true'),
+# '{allow_sso}', 'true')
+# WHERE id = '';
+
+# Config 4: SSO only with owner bypass
+# UPDATE organizations
+# SET flags = jsonb_set(jsonb_set(jsonb_set(jsonb_set(flags,
+# '{allow_email}', 'false'),
+# '{allow_social}', 'false'),
+# '{allow_sso}', 'true'),
+# '{allow_root}', 'true')
+# WHERE id = '';
+
+
+###
+# Verification Queries
+###
+
+# Check user's current identities
+# SELECT ui.method
+# FROM user_identities ui
+# WHERE ui.user_id = ''
+# ORDER BY ui.created_at;
+
+# Check organization flags
+# SELECT
+# id,
+# name,
+# flags->'allow_email' as allow_email,
+# flags->'allow_social' as allow_social,
+# flags->'allow_sso' as allow_sso,
+# flags->'allow_root' as allow_root,
+# flags->'domains_only' as domains_only
+# FROM organizations
+# WHERE id = '';
+
+# Check if user is organization member
+# SELECT EXISTS (
+# SELECT 1
+# FROM organization_members
+# WHERE user_id = ''
+# AND organization_id = ''
+# ) as is_member;
+
+# Check if user is owner
+# SELECT owner_id = '' as is_owner
+# FROM organizations
+# WHERE id = '';
+
+
+###
+# Middleware Bypass Routes (Should NOT Check Flags)
+###
+
+### Auth routes (no flag check)
+GET {{apiUrl}}/auth/discover
+Accept: application/json
+
+### Health check (no flag check)
+GET {{apiUrl}}/health
+Accept: application/json
+
+### Public routes (no flag check)
+GET {{apiUrl}}/public/status
+Accept: application/json
+
+
+###
+# Edge Cases
+###
+
+# 1. User session has no identities array
+# → Should treat as empty, enforce flags
+
+# 2. All allow_* flags are false
+# → System auto-enables allow_root to prevent lockout
+# → Owner can still access
+
+# 3. User has multiple identities, only one matches
+# → Should allow access (OR logic)
+
+# 4. Organization deleted but membership remains
+# → Should handle gracefully
+
+# 5. Concurrent flag updates during request
+# → Should use consistent flag snapshot
+
+
+###
+# Error Codes Summary
+###
+
+# AUTH_UPGRADE_REQUIRED
+# - Trigger: User's session identities don't match any allowed method
+# - Response: 403 with required_methods list
+# - User action: Complete additional authentication
+
+# AUTH_SSO_DENIED
+# - Trigger: SSO provider is disabled (flags.is_active=false)
+# - Response: 403 with message
+# - User action: Contact admin or use different auth method
+
+# AUTH_DOMAIN_DENIED
+# - Trigger: User's email domain not in verified domains list (when domains_only=true)
+# - Response: 403 with allowed_domains list
+# - User action: Use email from verified domain or contact admin
+
+
+###
+# Common Issues & Debugging
+###
+
+# Issue: Flags not enforced
+# Check:
+# - Middleware is registered in FastAPI app
+# - Request includes organization_id param
+# - AGENTA_LICENSE=ee
+# - organizations.flags contains auth flags
+
+# Issue: Wrong flags applied
+# Check:
+# - organization_id in request matches flags
+# - No caching issues
+# - Database query returns correct organization
+
+# Issue: Session identities not checked
+# Check:
+# - Session payload includes "identities" array
+# - Middleware can verify session correctly
+# - check_organization_access() function logic is correct
diff --git a/api/ee/tests/manual/auth/05-slug-immutability.http b/api/ee/tests/manual/auth/05-slug-immutability.http
new file mode 100644
index 0000000000..3fe4ca45b5
--- /dev/null
+++ b/api/ee/tests/manual/auth/05-slug-immutability.http
@@ -0,0 +1,192 @@
+###
+# Organization Slug Immutability Tests
+# Tests that organization slug cannot be changed once set
+#
+# This validates the business rule:
+# - Slug can be null (for backward compatibility)
+# - Slug can be set once (from null to a value)
+# - Slug CANNOT be changed once set (immutable)
+###
+
+@baseUrl = http://localhost
+@apiUrl = {{baseUrl}}/api
+@contentType = application/json
+
+# REPLACE WITH ACTUAL VALUES
+@organizationId = 00000000-0000-0000-0000-000000000000
+@apikey = your_api_key_here
+###
+# Scenario 1: Set slug for organization without slug (Should Succeed)
+###
+
+# Step 1: Verify organization has no slug
+# SQL: SELECT id, name, slug FROM organizations WHERE id = '';
+# Expected: slug IS NULL
+
+# Step 2: Set slug for the first time
+# This should SUCCEED
+# Note: Replace with your actual organization update endpoint
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "some-slug"
+}
+
+# Expected Response: 200 OK
+# Slug should now be set to "some-slug"
+
+
+###
+# Scenario 2: Try to change existing slug (Should Fail)
+###
+
+# Prerequisites: Organization already has slug="some-slug"
+
+# Step 1: Try to change slug to different value
+# This should FAIL with ValueError
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "different-slug"
+}
+
+# Expected Response: 400 Bad Request or 422 Validation Error
+# {
+# "detail": "Organization slug cannot be changed once set. Current slug: 'some-slug'"
+# }
+
+
+###
+# Scenario 3: Update slug to same value (Should Succeed)
+###
+
+# Prerequisites: Organization has slug="some-slug"
+
+# Step 1: Set slug to the same value
+# This should SUCCEED (idempotent operation)
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "some-slug"
+}
+
+# Expected Response: 200 OK
+# Slug remains "some-slug"
+
+###
+# Scenario 4: Update other fields without touching slug (Should Succeed)
+###
+
+# Prerequisites: Organization has slug="some-slug"
+
+# Step 1: Update organization name without changing slug
+# This should SUCCEED
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "name": "ACME Corporation Updated",
+ "description": "New description"
+}
+
+# Expected Response: 200 OK
+# Name and description updated, slug unchanged
+
+
+###
+# Scenario 5: Try to set slug to null/empty (Should Fail)
+###
+
+# Prerequisites: Organization has slug="some-slug"
+
+# Step 1: Try to clear the slug
+# This should FAIL (changing from value to null)
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": null
+}
+
+# Expected Response: 400 Bad Request
+# Slug should remain "some-slug"
+
+
+###
+# Verification Queries
+###
+
+# Check slug value after operations
+# SQL: SELECT id, name, slug FROM organizations WHERE id = '';
+
+# Check slug is unique across organizations
+# SQL: SELECT slug, COUNT(*) FROM organizations WHERE slug IS NOT NULL GROUP BY slug HAVING COUNT(*) > 1;
+# Expected: No rows (all slugs unique)
+
+# Find organizations without slugs (legacy)
+# SQL: SELECT id, name, created_at FROM organizations WHERE slug IS NULL ORDER BY created_at;
+
+
+###
+# Edge Cases
+###
+
+# Edge Case 1: Slug with special characters
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "some-slug-2025!#$%"
+}
+
+###
+
+# Should succeed if slug validation allows hyphens and numbers
+
+# Edge Case 2: Very long slug
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "this-looooooooooooooooooooooooooooooooooooooooooooooooooooooong-slug"
+}
+
+###
+
+# Should fail if slug has length constraint
+
+# Edge Case 3: Duplicate slug (different organization)
+# Prerequisites: Organization A has slug="some-slug"
+# Try to set Organization B's slug to "some-slug"
+# Expected: Database constraint violation (unique constraint)
+
+PATCH {{apiUrl}}/organizations/{{organizationId}}
+Content-Type: {{contentType}}
+Authorization: ApiKey {{apikey}}
+
+{
+ "slug": "some-slug"
+}
+
+###
+# Test Summary
+###
+
+# ✅ Setting slug first time (null → value): ALLOWED
+# ❌ Changing slug (value → different value): BLOCKED
+# ✅ Updating slug (value → same value): ALLOWED
+# ❌ Clearing slug (value → null): BLOCKED
+# ✅ Updating other fields with slug set: ALLOWED
+# ❌ Special characters in slug: DEPENDS ON VALIDATION
+# ❌ Very long slug: DEPENDS ON LENGTH CONSTRAINT
+# ❌ Duplicate slugs across orgs: BLOCKED (DB constraint)
diff --git a/api/ee/tests/manual/auth/QUICK-START.md b/api/ee/tests/manual/auth/QUICK-START.md
new file mode 100644
index 0000000000..dd48a1ced6
--- /dev/null
+++ b/api/ee/tests/manual/auth/QUICK-START.md
@@ -0,0 +1,100 @@
+# 🚀 Quick Start - SSO/OIDC Testing
+
+Get started testing in 5 minutes!
+
+## 1️⃣ Start Services (1 minute)
+
+Nothing to see, here.
+
+## 2️⃣ Test Discovery (30 seconds)
+
+```bash
+curl -X POST http://localhost:8000/auth/discover \
+ -H "Content-Type: application/json" \
+ -d '{"email": "test@example.com"}'
+```
+
+**Expected Response:**
+```json
+{
+ "user_exists": false,
+ "methods": {
+ "email:otp": true,
+ "sso": {
+ "available": false,
+ "required_by_some_orgs": false,
+ "providers": []
+ }
+ }
+}
+```
+
+## 3️⃣ Test OTP Flow (2 minutes)
+
+1. Open browser: `http://localhost:8000/auth`
+2. Enter email and click "Send OTP"
+3. Check backend logs for OTP code (dev mode)
+4. Enter code and submit
+5. Verify in database:
+
+```sql
+SELECT * FROM user_identities WHERE method = 'email:otp' ORDER BY created_at DESC LIMIT 1;
+```
+
+**Expected:** New row with your email as subject
+
+## 4️⃣ Verify Session (30 seconds)
+
+After login, check session cookie contains identities:
+
+```bash
+# Get session from browser dev tools
+# Cookie: sAccessToken=...
+
+# Make authenticated request
+curl http://localhost:8000/api/me \
+ -H "Cookie: sAccessToken="
+```
+
+The backend should verify session and see `identities: ["email:otp"]`
+
+---
+
+## ✅ Basic Test Complete!
+
+You've verified:
+- ✅ Migrations applied
+- ✅ Discovery endpoint works
+- ✅ Email OTP login functional
+- ✅ Identity tracking creates records
+- ✅ Session contains identities array
+
+## 🎯 Next Steps
+
+### For OSS Mode Testing:
+Continue with `01-discovery.http` tests
+
+### For EE Mode Testing:
+1. Switch to EE mode: `export AGENTA_LICENSE=ee`
+2. Set up test organization (use SQL in `00-setup-verification.http`)
+3. Configure SSO provider
+4. Test with `02-oidc-authorize.http`
+
+## 🐛 Something Not Working?
+
+### Discovery returns error
+- Check backend is running on port 8000
+- Verify SuperTokens Core is accessible
+
+### OTP not sent
+- Check email provider configuration
+- Look for OTP code in backend logs (dev mode)
+- Verify SuperTokens Core connection
+
+### Identity not created
+- Check `user_identities` table exists
+- Verify override functions registered
+- Check backend logs for errors
+
+### Need Help?
+See `README.md` for detailed troubleshooting guide.
diff --git a/api/ee/tests/manual/auth/README.md b/api/ee/tests/manual/auth/README.md
new file mode 100644
index 0000000000..c909b02c24
--- /dev/null
+++ b/api/ee/tests/manual/auth/README.md
@@ -0,0 +1,224 @@
+# SSO/OIDC Manual Testing Guide
+
+This directory contains `.http` files for manually testing the SSO/OIDC authentication implementation.
+
+## 🧪 Test Execution Order
+
+### Phase 1: Setup & Verification
+1. **`00-setup-verification.http`** - Run SQL setup commands first
+ - Create test organizations with flags
+ - Set up domains and providers (EE only)
+ - Create test users
+ - Verify schema
+
+### Phase 2: Discovery Testing
+2. **`01-discovery.http`** - Test auth method discovery
+ - Test new user discovery
+ - Test existing user discovery
+ - Test SSO-required scenarios
+ - Test multi-org users
+
+### Phase 3: Domain Verification Testing (EE Only)
+3. **`03-domain-verification.http`** - Test domain verification and governance
+ - Verify domains on collaborative organizations
+ - Prevent personal orgs from verifying domains
+ - Enforce domain exclusivity (one domain, one org)
+ - Test auto-join flag configuration
+ - Domain transfer scenarios
+
+### Phase 4: OIDC Flow Testing (EE Only)
+4. **`02-oidc-authorize.http`** - Test OIDC initiation
+ - Valid provider authorization
+ - Invalid provider handling
+ - OSS mode blocking
+
+### Phase 5: Identity Tracking
+5. **`03-identity-tracking.http`** - Verify identity creation
+ - Email OTP identity tracking
+ - Social login identity tracking
+ - SSO identity tracking
+ - Session payload verification
+
+### Phase 6: Flag Enforcement (EE Only)
+6. **`04-policy-enforcement.http`** - Test access control
+ - SSO-only organization access (via flags)
+ - Multi-method flag combinations
+ - Auth upgrade requirements
+ - Membership validation
+
+### Phase 7: Slug Immutability
+7. **`05-slug-immutability.http`** - Test slug constraints
+ - Setting slug first time (null → value)
+ - Preventing slug changes (immutability)
+ - Updating organization without changing slug
+ - Edge cases and validation
+
+## 🔧 Using the .http Files
+
+### Option 1: VS Code REST Client
+1. Install "REST Client" extension by Huachao Mao
+2. Open any `.http` file
+3. Click "Send Request" above each test
+4. View response in split pane
+
+### Option 2: IntelliJ HTTP Client
+1. Open `.http` file in IntelliJ IDEA
+2. Click ▶️ button next to each request
+3. View response in tool window
+
+### Option 3: Manual with curl
+```bash
+# Discovery example
+curl -X POST http://localhost:8000/auth/discover \
+ -H "Content-Type: application/json" \
+ -d '{"email": "test@example.com"}'
+```
+
+## 📊 Expected Test Results
+
+### OSS Mode Tests (Should Pass)
+- ✅ Discovery returns `email:otp` and social methods
+- ✅ Email OTP login creates `user_identity` record
+- ✅ Social login creates `user_identity` record
+- ✅ Session contains `identities` array
+- ✅ Exactly 1 collaborative organization exists
+- ✅ No personal organizations exist
+- ✅ Organization has `flags.is_personal = false`
+- ❌ SSO endpoints return 404 "EE only"
+- ❌ Flag enforcement not active (EE only)
+- ❌ Domain verification not available
+
+### EE Mode Tests (Should Pass)
+- ✅ All OSS tests pass
+- ✅ Organizations have `flags.is_personal` (true or false)
+- ✅ Personal organizations cannot verify domains
+- ✅ Domain exclusivity enforced (one domain per org)
+- ✅ Auto-join flag (`flags.auto_join`) can be configured
+- ✅ Discovery returns SSO providers for verified domains
+- ✅ OIDC authorization redirects to IdP
+- ✅ SSO login creates `user_identity` with `sso:*` method
+- ✅ Flag-based access control blocks unauthorized methods
+- ✅ Auth upgrade flow works
+
+## 🐛 Troubleshooting
+
+### Discovery Returns No Methods
+**Check:**
+- Backend environment variables configured
+- SuperTokens Core is running
+- Database migrations applied
+- No errors in backend logs
+
+### Identities Not Created
+**Check:**
+- `user_identities` table exists
+- SuperTokens overrides registered in config
+- User exists in `users` table
+- Database has write permissions
+
+### Flags Not Enforced
+**Check:**
+- `AGENTA_LICENSE=ee`
+- Middleware registered in FastAPI app
+- Request includes `organization_id` parameter
+- `organizations.flags` JSONB contains policy flags
+
+### SSO Flow Fails
+**Check:**
+- OIDC provider configuration in `organization_providers`
+- Provider `flags.is_active = true`
+- Domain verified in `organization_domains` (`flags.is_verified = true`)
+- Organization is collaborative (`flags.is_personal = false`)
+- IdP credentials valid
+- Callback URL configured at IdP
+
+### Domain Verification Fails
+**Check:**
+- Organization is collaborative (`flags.is_personal = false`)
+- Domain not already verified by another organization
+- `organization_domains` table exists and populated
+
+## 🔍 Debugging Tips
+
+### Inspect Database State
+```sql
+-- Check identities created
+SELECT * FROM user_identities ORDER BY created_at DESC LIMIT 10;
+
+-- Check organization types (personal vs collaborative)
+SELECT id, name, slug, flags->>'is_personal' as is_personal
+FROM organizations
+ORDER BY flags->>'is_personal';
+
+-- Check organization flags (auth policy)
+SELECT
+ id,
+ name,
+ flags->'is_personal' as is_personal,
+ flags->'allow_email' as allow_email,
+ flags->'allow_social' as allow_social,
+ flags->'allow_sso' as allow_sso,
+ flags->'domains_only' as domains_only,
+ flags->'auto_join' as auto_join,
+ flags->'allow_root' as allow_root
+FROM organizations;
+
+-- Check domain verification
+SELECT
+ od.slug as domain,
+ od.flags->>'is_verified' as verified,
+ o.name as org_name,
+ o.flags->>'is_personal' as is_personal
+FROM organization_domains od
+JOIN organizations o ON o.id = od.organization_id
+ORDER BY od.flags->>'is_verified' DESC;
+
+-- Check SSO providers
+SELECT
+ o.name,
+ o.flags->>'is_personal' as is_personal,
+ op.slug,
+ op.flags->>'is_active' as is_active,
+ op.settings->>'issuer' as issuer
+FROM organizations o
+JOIN organization_providers op ON op.organization_id = o.id;
+```
+
+### Check SuperTokens Core
+```bash
+# Verify SuperTokens is running
+curl http://localhost:3567/hello
+
+# Check user list
+curl http://localhost:3567/users?limit=10
+```
+
+## ✅ Test Coverage
+
+These manual tests cover:
+- ✅ Email OTP authentication (OSS + EE)
+- ✅ Social authentication (OSS + EE)
+- ✅ SSO/OIDC authentication (EE only)
+- ✅ Identity tracking and accumulation
+- ✅ Session payload with identities
+- ✅ Organization membership queries
+- ✅ Flag-based access control
+- ✅ Auth method discovery
+- ✅ Multi-organization support
+- ✅ Auth upgrade requirements
+- ✅ Organization slug immutability
+- ✅ Slug validation and constraints
+- ✅ Organization classification (personal vs collaborative via `flags.is_personal`)
+- ✅ Domain verification on collaborative organizations
+- ✅ Domain verification restrictions on personal organizations
+- ✅ Domain exclusivity enforcement (one domain per org)
+- ✅ Auto-join flag configuration
+- ✅ Auto-join behavior with verified domains
+
+## Error Codes
+
+| Error Code | Trigger | HTTP Status |
+|------------|---------|-------------|
+| `AUTH_UPGRADE_REQUIRED` | Auth method not in allowed list | 403 |
+| `AUTH_SSO_DENIED` | SSO provider disabled or inactive | 403 |
+| `AUTH_DOMAIN_DENIED` | Email domain not in verified list | 403 |
diff --git a/api/entrypoints/routers.py b/api/entrypoints/routers.py
index e9c0ac03f6..9d12da4865 100644
--- a/api/entrypoints/routers.py
+++ b/api/entrypoints/routers.py
@@ -73,6 +73,7 @@
# Routers
from oss.src.apis.fastapi.vault.router import VaultRouter
+from oss.src.apis.fastapi.auth.router import auth_router
from oss.src.apis.fastapi.otlp.router import OTLPRouter
from oss.src.apis.fastapi.tracing.router import TracingRouter
from oss.src.apis.fastapi.invocations.router import InvocationsRouter
@@ -225,8 +226,8 @@ async def lifespan(*args, **kwargs):
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
@@ -391,11 +392,20 @@ async def lifespan(*args, **kwargs):
tags=["Observability"],
)
+app.include_router(
+ router=auth_router,
+ prefix="/auth",
+ tags=["Auth"],
+)
+
+## DEPRECATED
app.include_router(
router=tracing.router,
prefix="/preview/tracing",
tags=["Deprecated"],
+ include_in_schema=False,
)
+## DEPRECATED
app.include_router(
router=tracing.router,
diff --git a/api/entrypoints/worker_evaluations.py b/api/entrypoints/worker_evaluations.py
index 6955bfc03e..12aec47860 100644
--- a/api/entrypoints/worker_evaluations.py
+++ b/api/entrypoints/worker_evaluations.py
@@ -103,8 +103,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py b/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py
index ac015ac6a2..a17c8e085d 100644
--- a/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py
+++ b/api/oss/databases/postgres/migrations/core/data_migrations/evaluators.py
@@ -39,8 +39,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
index ad2f342fe6..0a97e71236 100644
--- a/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
+++ b/api/oss/databases/postgres/migrations/core/data_migrations/projects.py
@@ -64,7 +64,7 @@ def check_for_multiple_default_projects(session: Session) -> Sequence[ProjectDB]
def create_default_project():
- PROJECT_NAME = "Default Project"
+ PROJECT_NAME = "Default"
engine = create_engine(env.postgres.uri_core)
sync_session = sessionmaker(engine, expire_on_commit=False)
diff --git a/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py b/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py
index 4399873507..cf70e04fc9 100644
--- a/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py
+++ b/api/oss/databases/postgres/migrations/core/data_migrations/secrets.py
@@ -2,11 +2,9 @@
import traceback
import click
-from sqlalchemy.future import select
-from sqlalchemy import Connection, update, func
+from sqlalchemy import Connection, MetaData, Table, func, select, update
from oss.src.utils.env import env
-from oss.src.dbs.postgres.secrets.dbes import SecretsDBE
from oss.src.core.secrets.dtos import (
StandardProviderDTO,
StandardProviderSettingsDTO,
@@ -17,15 +15,22 @@
BATCH_SIZE = 500
+def _secrets_table(session: Connection) -> Table:
+ metadata = MetaData()
+ return Table("secrets", metadata, autoload_with=session)
+
+
def rename_and_update_secrets_data_schema(session: Connection):
try:
TOTAL_MIGRATED = 0
+ secrets_table = _secrets_table(session)
+
# Count total rows in secrets table
- total_query = select(func.count()).select_from(SecretsDBE)
+ total_query = select(func.count()).select_from(secrets_table)
result = session.execute(total_query).scalar()
TOTAL_SECRETS = result or 0
- print(f"Total rows in {SecretsDBE.__tablename__}: {TOTAL_SECRETS}")
+ print(f"Total rows in secrets: {TOTAL_SECRETS}")
encryption_key = env.agenta.crypt_key
if not encryption_key:
@@ -37,31 +42,36 @@ def rename_and_update_secrets_data_schema(session: Connection):
while True:
with set_data_encryption_key(data_encryption_key=encryption_key):
- # Fetch a batch of records using keyset pagination (ID-based)
- stmt = select(SecretsDBE).order_by(SecretsDBE.id).limit(BATCH_SIZE)
+ data_expr = func.pgp_sym_decrypt(
+ secrets_table.c.data, encryption_key
+ ).label("data")
+ stmt = (
+ select(secrets_table.c.id, data_expr)
+ .order_by(secrets_table.c.id)
+ .limit(BATCH_SIZE)
+ )
if last_processed_id:
- stmt = stmt.where(SecretsDBE.id > last_processed_id)
+ stmt = stmt.where(secrets_table.c.id > last_processed_id)
- secrets_dbes = session.execute(stmt).fetchall()
- if not secrets_dbes:
- break # No more records to process
+ secrets_rows = session.execute(stmt).fetchall()
+ if not secrets_rows:
+ break
- actual_batch_size = len(secrets_dbes)
+ actual_batch_size = len(secrets_rows)
if actual_batch_size == 0:
break
- # Update the schema structure of data for each record in the batch
- for secret_dbe in secrets_dbes:
- last_processed_id = secret_dbe.id # Update checkpoint
+ for secret_row in secrets_rows:
+ secret_id = secret_row.id
+ last_processed_id = secret_id
- # Load and validate JSON
- secret_json_data = json.loads(secret_dbe.data)
+ secret_json_data = json.loads(secret_row.data)
if (
"provider" not in secret_json_data
and "key" not in secret_json_data
):
raise ValueError(
- f"Invalid secret data format for ID {secret_dbe.id}. Data format: {secret_json_data}"
+ f"Invalid secret data format for ID {secret_id}. Data format: {secret_json_data}"
)
secret_data_dto = StandardProviderDTO(
@@ -72,9 +82,14 @@ def rename_and_update_secrets_data_schema(session: Connection):
)
update_statement = (
- update(SecretsDBE)
- .where(SecretsDBE.id == secret_dbe.id)
- .values(data=secret_data_dto.model_dump_json())
+ update(secrets_table)
+ .where(secrets_table.c.id == secret_id)
+ .values(
+ data=func.pgp_sym_encrypt(
+ secret_data_dto.model_dump_json(),
+ encryption_key,
+ )
+ )
)
session.execute(update_statement)
@@ -83,7 +98,7 @@ def rename_and_update_secrets_data_schema(session: Connection):
click.echo(
click.style(
- f"Processed {len(secrets_dbes)} records in this batch. "
+ f"Processed {len(secrets_rows)} records in this batch. "
f"Total migrated: {TOTAL_MIGRATED}. Remaining: {remaining_secrets}",
fg="yellow",
)
@@ -108,10 +123,12 @@ def revert_rename_and_update_secrets_data_schema(session: Connection):
try:
TOTAL_MIGRATED = 0
+ secrets_table = _secrets_table(session)
+
# Count total rows in secrets table
- total_query = select(func.count()).select_from(SecretsDBE)
+ total_query = select(func.count()).select_from(secrets_table)
TOTAL_SECRETS = session.execute(total_query).scalar() or 0
- print(f"Total rows in {SecretsDBE.__tablename__}: {TOTAL_SECRETS}")
+ print(f"Total rows in secrets: {TOTAL_SECRETS}")
encryption_key = env.agenta.crypt_key
if not encryption_key:
@@ -123,47 +140,53 @@ def revert_rename_and_update_secrets_data_schema(session: Connection):
while True:
with set_data_encryption_key(data_encryption_key=encryption_key):
- # Fetch a batch of records using keyset pagination
- stmt = select(SecretsDBE).order_by(SecretsDBE.id).limit(BATCH_SIZE)
+ data_expr = func.pgp_sym_decrypt(
+ secrets_table.c.data, encryption_key
+ ).label("data")
+ stmt = (
+ select(secrets_table.c.id, data_expr)
+ .order_by(secrets_table.c.id)
+ .limit(BATCH_SIZE)
+ )
if last_processed_id:
- stmt = stmt.where(SecretsDBE.id > last_processed_id)
+ stmt = stmt.where(secrets_table.c.id > last_processed_id)
- secrets_dbes = session.execute(stmt).fetchall()
- if not secrets_dbes:
- break # No more records to process
+ secrets_rows = session.execute(stmt).fetchall()
+ if not secrets_rows:
+ break
- for secret_dbe in secrets_dbes:
- last_processed_id = secret_dbe.id # Update checkpoint
+ for secret_row in secrets_rows:
+ secret_id = secret_row.id
+ last_processed_id = secret_id
- # Load and validate JSON
- secret_json_data = json.loads(secret_dbe.data)
+ secret_json_data = json.loads(secret_row.data)
if (
"kind" not in secret_json_data
and "provider" not in secret_json_data
):
- raise ValueError(
- f"Invalid secret format for ID {secret_dbe.id}"
- )
+ raise ValueError(f"Invalid secret format for ID {secret_id}")
- # Convert back to old schema
old_format_data = {
"provider": secret_json_data["kind"],
"key": secret_json_data["provider"]["key"],
}
- # Update record with encryption
session.execute(
- update(SecretsDBE)
- .where(SecretsDBE.id == secret_dbe.id)
- .values(data=json.dumps(old_format_data))
+ update(secrets_table)
+ .where(secrets_table.c.id == secret_id)
+ .values(
+ data=func.pgp_sym_encrypt(
+ json.dumps(old_format_data), encryption_key
+ )
+ )
)
- TOTAL_MIGRATED += len(secrets_dbes)
+ TOTAL_MIGRATED += len(secrets_rows)
remaining_secrets = TOTAL_SECRETS - TOTAL_MIGRATED
click.echo(
click.style(
- f"Processed {len(secrets_dbes)} records in this batch. "
+ f"Processed {len(secrets_rows)} records in this batch. "
f"Total reverted: {TOTAL_MIGRATED}. Remaining: {remaining_secrets}",
fg="yellow",
)
diff --git a/api/oss/databases/postgres/migrations/core/utils.py b/api/oss/databases/postgres/migrations/core/utils.py
index c5ce9cd838..d47ccf7f8d 100644
--- a/api/oss/databases/postgres/migrations/core/utils.py
+++ b/api/oss/databases/postgres/migrations/core/utils.py
@@ -117,7 +117,9 @@ async def get_pending_migration_head():
def run_alembic_migration():
"""
- Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ Applies migration for first-time users and also checks the environment variable
+ "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether
+ to apply migrations for returning users.
"""
try:
diff --git a/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py b/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py
new file mode 100644
index 0000000000..a22123f1e7
--- /dev/null
+++ b/api/oss/databases/postgres/migrations/core/versions/12d23a8f7dde_add_slug_to_organizations.py
@@ -0,0 +1,51 @@
+"""add slug to organizations
+
+Revision ID: 12d23a8f7dde
+Revises: 59b85eb7516c
+Create Date: 2025-12-25 00:00:00.000000+00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "12d23a8f7dde"
+down_revision: Union[str, None] = "59b85eb7516c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # Add slug column to organizations table
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # Add unique constraint on slug
+ op.create_unique_constraint(
+ "uq_organizations_slug",
+ "organizations",
+ ["slug"],
+ )
+
+ # Add index for faster lookups
+ op.create_index(
+ "ix_organizations_slug",
+ "organizations",
+ ["slug"],
+ )
+
+
+def downgrade() -> None:
+ # Drop in reverse order
+ op.drop_index("ix_organizations_slug", table_name="organizations")
+ op.drop_constraint("uq_organizations_slug", "organizations", type_="unique")
+ op.drop_column("organizations", "slug")
diff --git a/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py b/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py
new file mode 100644
index 0000000000..f1282a85c0
--- /dev/null
+++ b/api/oss/databases/postgres/migrations/core/versions/59b85eb7516c_add_sso_oidc_tables.py
@@ -0,0 +1,132 @@
+"""add sso oidc tables
+
+Revision ID: 59b85eb7516c
+Revises: 80910d2fa9a4
+Create Date: 2025-12-10 08:53:56.000000+00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "59b85eb7516c"
+down_revision: Union[str, None] = "80910d2fa9a4"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # 1. user_identities table
+ op.create_table(
+ "user_identities",
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "user_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "method",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "subject",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "domain",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ondelete="CASCADE",
+ ),
+ sa.UniqueConstraint(
+ "method",
+ "subject",
+ name="uq_user_identities_method_subject",
+ ),
+ sa.Index(
+ "ix_user_identities_user_method",
+ "user_id",
+ "method",
+ ),
+ sa.Index(
+ "ix_user_identities_domain",
+ "domain",
+ ),
+ )
+
+ # EE-only tables (organization_policies, organization_domains, organization_providers, organization_invitations)
+ # are defined in the EE migration version of this file
+
+ # 2. Add is_active to users table
+ op.add_column(
+ "users",
+ sa.Column(
+ "is_active",
+ sa.Boolean(),
+ nullable=False,
+ server_default="true",
+ ),
+ )
+
+
+def downgrade() -> None:
+ # Drop in reverse order
+ op.drop_column("users", "is_active")
+
+ # EE-only table drops are in the EE migration version of this file
+
+ op.drop_index(
+ "ix_user_identities_domain",
+ table_name="user_identities",
+ )
+ op.drop_index(
+ "ix_user_identities_user_method",
+ table_name="user_identities",
+ )
+ op.drop_table("user_identities")
diff --git a/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py b/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py
new file mode 100644
index 0000000000..1905a6a320
--- /dev/null
+++ b/api/oss/databases/postgres/migrations/core/versions/a9f3e8b7c5d1_clean_up_organizations.py
@@ -0,0 +1,335 @@
+"""clean up organizations
+
+Revision ID: a9f3e8b7c5d1
+Revises: 12d23a8f7dde
+Create Date: 2025-12-26 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy import text
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "a9f3e8b7c5d1"
+down_revision: Union[str, None] = "12d23a8f7dde"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """
+ Clean up organizations table and introduce new schema.
+
+ Changes:
+ - Add flags (JSONB, nullable) with is_personal and is_demo fields
+ - Migrate type='view-only' to flags.is_demo=true
+ - Set is_personal=false for the single organization
+ - Drop type column
+ - Convert owner (String) to owner_id (UUID, NOT NULL)
+ - Add created_by_id (UUID, NOT NULL)
+ - Ensure created_at is NOT NULL, remove default from updated_at
+ - Add updated_by_id (UUID, nullable)
+ - Add deleted_at (DateTime, nullable)
+ - Add deleted_by_id (UUID, nullable)
+ - Drop user_organizations table (replaced by organization_members)
+ - Drop invitations table (obsolete)
+
+ OSS Mode:
+ - Must have exactly 1 organization (fail-fast if not)
+ - Set is_personal=false (no personal organizations in OSS)
+ """
+ conn = op.get_bind()
+
+ # OSS: Must have exactly 1 organization
+ org_count = conn.execute(text("SELECT COUNT(*) FROM organizations")).scalar()
+
+ if org_count == 0:
+ raise ValueError(
+ "OSS mode: No organizations found. Cannot proceed with migration."
+ )
+ elif org_count > 1:
+ raise ValueError(
+ f"OSS mode: Found {org_count} organizations. OSS supports exactly 1 collaborative organization. "
+ "Please consolidate organizations before migrating."
+ )
+
+ # Step 1: Add JSONB columns (flags, tags, meta - all nullable)
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "flags",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column(
+ "meta",
+ postgresql.JSONB(astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+
+ # Step 2: Add new UUID columns (all nullable initially for migration)
+ op.add_column(
+ "organizations",
+ sa.Column("owner_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("created_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("updated_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True),
+ )
+ op.add_column(
+ "organizations",
+ sa.Column("deleted_by_id", postgresql.UUID(as_uuid=True), nullable=True),
+ )
+
+ # Step 3: Migrate type='view-only' to is_demo=true, set is_personal=false
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET flags = jsonb_build_object(
+ 'is_demo', CASE WHEN type = 'view-only' THEN true ELSE false END,
+ 'is_personal', false
+ )
+ WHERE flags IS NULL OR flags = '{}'::jsonb
+ """)
+ )
+
+ # Step 4: Migrate owner (String) to owner_id (UUID)
+ # Set owner_id = owner::uuid for existing org
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET owner_id = owner::uuid
+ WHERE owner IS NOT NULL
+ """)
+ )
+
+ # Step 5: Set created_by_id = owner_id for existing org
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET created_by_id = owner_id
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Step 6: Set updated_by_id = owner_id for existing org
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET updated_by_id = owner_id
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Step 7: Ensure created_at has a value for all existing records
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET created_at = COALESCE(created_at, NOW())
+ WHERE created_at IS NULL
+ """)
+ )
+
+ # Step 8: Make owner_id, created_by_id, and created_at NOT NULL; remove updated_at default
+ op.alter_column("organizations", "owner_id", nullable=False)
+ op.alter_column("organizations", "created_by_id", nullable=False)
+ op.alter_column("organizations", "created_at", nullable=False)
+ op.alter_column("organizations", "updated_at", server_default=None)
+
+ # Step 9: Add foreign key constraints
+ op.create_foreign_key(
+ "fk_organizations_owner_id_users",
+ "organizations",
+ "users",
+ ["owner_id"],
+ ["id"],
+ ondelete="RESTRICT",
+ )
+ op.create_foreign_key(
+ "fk_organizations_created_by_id_users",
+ "organizations",
+ "users",
+ ["created_by_id"],
+ ["id"],
+ ondelete="RESTRICT",
+ )
+ op.create_foreign_key(
+ "fk_organizations_updated_by_id_users",
+ "organizations",
+ "users",
+ ["updated_by_id"],
+ ["id"],
+ ondelete="SET NULL",
+ )
+ op.create_foreign_key(
+ "fk_organizations_deleted_by_id_users",
+ "organizations",
+ "users",
+ ["deleted_by_id"],
+ ["id"],
+ ondelete="SET NULL",
+ )
+
+ # Step 9b: Ensure workspaces cascade on organization delete
+ try:
+ op.drop_constraint(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "workspaces_organization_id_fkey",
+ "workspaces",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Step 9c: Ensure workspace_members cascade on workspace delete
+ try:
+ op.drop_constraint(
+ "workspace_members_workspace_id_fkey",
+ "workspace_members",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "workspace_members_workspace_id_fkey",
+ "workspace_members",
+ "workspaces",
+ ["workspace_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Step 9d: Ensure projects cascade on organization delete
+ try:
+ op.drop_constraint(
+ "projects_organization_id_fkey",
+ "projects",
+ type_="foreignkey",
+ )
+ except Exception:
+ pass # Constraint might not exist yet
+ op.create_foreign_key(
+ "projects_organization_id_fkey",
+ "projects",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+ # Note: Other tables (testsets, evaluations, scenarios, etc.) are linked to
+ # organizations via projects, so they will cascade delete through projects.
+ # They should keep SET NULL on organization_id for direct references.
+
+ # Step 10: Drop type and owner columns
+ op.drop_column("organizations", "type")
+ op.drop_column("organizations", "owner")
+
+ # Step 11: Drop obsolete tables
+ conn.execute(text("DROP TABLE IF EXISTS user_organizations CASCADE"))
+ conn.execute(text("DROP TABLE IF EXISTS invitations CASCADE"))
+
+
+def downgrade() -> None:
+ """Restore organizations type and owner columns and revert schema changes."""
+ conn = op.get_bind()
+
+ # Drop foreign key constraints
+ op.drop_constraint(
+ "fk_organizations_deleted_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_updated_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_created_by_id_users", "organizations", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "fk_organizations_owner_id_users", "organizations", type_="foreignkey"
+ )
+
+ # Recreate type column
+ op.add_column("organizations", sa.Column("type", sa.String(), nullable=True))
+
+ # Migrate flags back to type
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET type = CASE
+ WHEN flags->>'is_demo' = 'true' THEN 'view-only'
+ ELSE 'default'
+ END
+ """)
+ )
+
+ op.alter_column("organizations", "type", nullable=False)
+
+ # Recreate owner column
+ op.add_column("organizations", sa.Column("owner", sa.String(), nullable=True))
+
+ # Migrate owner_id back to owner (UUID to String)
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET owner = owner_id::text
+ WHERE owner_id IS NOT NULL
+ """)
+ )
+
+ # Restore updated_at default
+ conn.execute(
+ text("""
+ UPDATE organizations
+ SET updated_at = COALESCE(updated_at, NOW())
+ WHERE updated_at IS NULL
+ """)
+ )
+ op.alter_column(
+ "organizations",
+ "updated_at",
+ server_default=sa.text("NOW()"),
+ nullable=False,
+ )
+
+ # Drop new columns
+ op.drop_column("organizations", "deleted_by_id")
+ op.drop_column("organizations", "deleted_at")
+ op.drop_column("organizations", "updated_by_id")
+ op.drop_column("organizations", "created_by_id")
+ op.drop_column("organizations", "owner_id")
+ op.drop_column("organizations", "meta")
+ op.drop_column("organizations", "tags")
+ op.drop_column("organizations", "flags")
+
+ # Note: We don't recreate user_organizations and invitations tables
+ # as they contain no data at this point
diff --git a/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py b/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py
new file mode 100644
index 0000000000..d62826b2f5
--- /dev/null
+++ b/api/oss/databases/postgres/migrations/core/versions/c3b2a1d4e5f6_add_secret_org_scope.py
@@ -0,0 +1,48 @@
+"""add organization scope to secrets
+
+Revision ID: c3b2a1d4e5f6
+Revises: a9f3e8b7c5d1
+Create Date: 2025-01-10 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision: str = "c3b2a1d4e5f6"
+down_revision: Union[str, None] = "a9f3e8b7c5d1"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ connection = op.get_bind()
+
+ op.execute("ALTER TYPE secretkind_enum ADD VALUE IF NOT EXISTS 'SSO_PROVIDER'")
+
+ inspector = sa.inspect(connection)
+ columns = {col["name"] for col in inspector.get_columns("secrets")}
+
+ if "organization_id" not in columns:
+ op.add_column("secrets", sa.Column("organization_id", sa.UUID(), nullable=True))
+
+ op.alter_column("secrets", "project_id", nullable=True)
+
+ op.create_foreign_key(
+ "secrets_organization_id_fkey",
+ "secrets",
+ "organizations",
+ ["organization_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+
+
+def downgrade() -> None:
+ op.drop_constraint("secrets_organization_id_fkey", "secrets", type_="foreignkey")
+ op.drop_column("secrets", "organization_id")
+ op.alter_column("secrets", "project_id", nullable=False)
diff --git a/api/oss/databases/postgres/migrations/tracing/utils.py b/api/oss/databases/postgres/migrations/tracing/utils.py
index b6065ac8a0..10815051c3 100644
--- a/api/oss/databases/postgres/migrations/tracing/utils.py
+++ b/api/oss/databases/postgres/migrations/tracing/utils.py
@@ -109,7 +109,9 @@ async def get_pending_migration_head():
def run_alembic_migration():
"""
- Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ Applies migration for first-time users and also checks the environment variable
+ "ALEMBIC_AUTO_MIGRATIONS" (legacy: "AGENTA_AUTO_MIGRATIONS") to determine whether
+ to apply migrations for returning users.
"""
try:
diff --git a/api/oss/src/__init__.py b/api/oss/src/__init__.py
index f76b6b286a..9712dde450 100644
--- a/api/oss/src/__init__.py
+++ b/api/oss/src/__init__.py
@@ -6,9 +6,6 @@
from supertokens_python import init, InputAppInfo, SupertokensConfig
from supertokens_python.asyncio import get_user as get_user_from_supertokens
from supertokens_python.recipe.thirdparty import (
- ProviderInput,
- ProviderConfig,
- ProviderClientConfig,
SignInAndUpFeature,
)
from supertokens_python.recipe import (
@@ -193,6 +190,8 @@ async def _create_account(email: str, uid: str) -> None:
- Organization assignment (OSS only)
- Account creation
+ This function is idempotent - if user already exists, it returns early.
+
Args:
email: The user's normalized email address
uid: The SuperTokens user ID
@@ -200,6 +199,11 @@ async def _create_account(email: str, uid: str) -> None:
Raises:
UnauthorizedException: If email is blocked or user not invited (OSS only)
"""
+ # Check if user already exists (idempotent - skip if adding new auth method)
+ existing_user = await get_user_with_email(email=email)
+ if existing_user is not None:
+ return
+
# Check email blocking (EE only)
if is_ee() and await _is_blocked(email):
raise UnauthorizedException(detail="This email is not allowed.")
@@ -207,6 +211,8 @@ async def _create_account(email: str, uid: str) -> None:
payload = {
"uid": uid,
"email": email,
+ "name": "Personal",
+ "is_personal": True,
}
# For OSS: compute organization before calling create_accounts
@@ -269,6 +275,8 @@ async def consume_code_post(
if isinstance(response, ConsumeCodeOkResult):
email = response.user.emails[0].lower()
await _create_account(email, response.user.id)
+ # Note: Identity tracking is now handled by the recipe-level override (override_passwordless_functions)
+ # which runs before session creation and properly injects user_identities into the JWT payload
return response
@@ -304,6 +312,8 @@ async def thirdparty_sign_in_up_post(
if isinstance(response, SignInUpPostOkResult):
email = response.user.emails[0].lower()
await _create_account(email, response.user.id)
+ # Note: Identity tracking is now handled by the recipe-level override (override_thirdparty_functions)
+ # which runs before session creation and properly injects user_identities into the JWT payload
return response
@@ -361,14 +371,27 @@ async def sign_up_post(
api_options: EmailPasswordAPIOptions,
user_context: Dict[str, Any],
):
- # FLOW 1: Sign in (redirect existing users)
+ # FLOW 1: Sign in (redirect existing users with emailpassword credential)
email = form_fields[0].value.lower()
if is_ee() and await _is_blocked(email):
raise UnauthorizedException(detail="This email is not allowed.")
user_info_from_st = await list_users_by_account_info(
tenant_id="public", account_info=AccountInfo(email=email)
)
- if len(user_info_from_st) >= 1 or await get_user_with_email(email=email):
+
+ # Check if user has an emailpassword login method
+ has_emailpassword_method = False
+ for user in user_info_from_st:
+ for lm in user.login_methods:
+ if lm.recipe_id == "emailpassword":
+ has_emailpassword_method = True
+ break
+ if has_emailpassword_method:
+ break
+
+ # Only redirect to sign_in if user has emailpassword credential
+ # This allows users who signed up via OAuth to add email/password
+ if has_emailpassword_method:
return await sign_in_post(
form_fields,
tenant_id,
@@ -388,20 +411,14 @@ async def sign_up_post(
user_context,
)
- # FLOW 3: Create application user (organization assignment is handled in create_accounts)
+ # FLOW 3: Create application user (idempotent - skips if user exists)
if isinstance(response, EmailPasswordSignUpPostOkResult):
- # sign up successful
actual_email = ""
for field in form_fields:
if field.id == "email":
actual_email = field.value
- if actual_email == "":
- # User did not provide an email.
- # This is possible since we set optional: true
- # in the form field config
- pass
- else:
+ if actual_email != "":
email = (
actual_email
if "@" in actual_email
@@ -439,7 +456,7 @@ def _init_supertokens():
# Validate auth configuration
try:
- env.auth.validate()
+ env.auth.validate_config()
except ValueError as e:
logger.error(f"[AUTH CONFIG ERROR] {e}")
raise
@@ -449,6 +466,10 @@ def _init_supertokens():
# Email Password Authentication
if env.auth.email_method == "password":
+ from oss.src.core.auth.supertokens_overrides import (
+ override_emailpassword_functions,
+ )
+
logger.info("✓ Email/Password authentication enabled")
recipe_list.append(
emailpassword.init(
@@ -466,87 +487,80 @@ def _init_supertokens():
),
override=InputOverrideConfig(
apis=override_password_apis,
+ functions=override_emailpassword_functions,
),
)
)
# Email OTP Authentication
if env.auth.email_method == "otp":
+ from oss.src.core.auth.supertokens_overrides import (
+ override_passwordless_functions,
+ )
+
logger.info("✓ Email/OTP authentication enabled")
recipe_list.append(
passwordless.init(
flow_type="USER_INPUT_CODE",
contact_config=ContactEmailOnlyConfig(),
override=passwordless.InputOverrideConfig(
- functions=override_passwordless_apis
+ apis=override_passwordless_apis,
+ functions=override_passwordless_functions,
),
)
)
# Third-Party OIDC Authentication
- oidc_providers = []
- if env.auth.google_enabled:
- logger.info("✓ Google OAuth enabled")
- oidc_providers.append(
- ProviderInput(
- config=ProviderConfig(
- third_party_id="google",
- clients=[
- ProviderClientConfig(
- client_id=env.auth.google_oauth_client_id,
- client_secret=env.auth.google_oauth_client_secret,
- ),
- ],
- ),
- )
- )
-
- if env.auth.github_enabled:
- logger.info("✓ GitHub OAuth enabled")
- oidc_providers.append(
- ProviderInput(
- config=ProviderConfig(
- third_party_id="github",
- clients=[
- ProviderClientConfig(
- client_id=env.auth.github_oauth_client_id,
- client_secret=env.auth.github_oauth_client_secret,
- )
- ],
- ),
- )
- )
+ # Always initialize thirdparty recipe for dynamic OIDC support (EE)
+ from oss.src.core.auth.supertokens_config import get_thirdparty_providers
+ from oss.src.core.auth.supertokens_overrides import override_thirdparty_functions
+ from oss.src.utils.common import is_ee
+ oidc_providers = get_thirdparty_providers()
if oidc_providers:
+ enabled_providers = [
+ provider.config.third_party_id for provider in oidc_providers
+ ]
+ logger.info("✓ OIDC providers enabled: %s", ", ".join(enabled_providers))
+
+ # Initialize thirdparty recipe if we have static providers OR if EE is enabled (for dynamic OIDC)
+ if oidc_providers or is_ee():
recipe_list.append(
thirdparty.init(
sign_in_and_up_feature=SignInAndUpFeature(providers=oidc_providers),
- override=thirdparty.InputOverrideConfig(apis=override_thirdparty_apis),
+ override=thirdparty.InputOverrideConfig(
+ apis=override_thirdparty_apis,
+ functions=override_thirdparty_functions,
+ ),
)
)
+ if is_ee() and not oidc_providers:
+ logger.info("✓ Third-party recipe enabled for dynamic OIDC (EE)")
# Sessions always required if auth is enabled
+ from oss.src.core.auth.supertokens_overrides import override_session_functions
+
recipe_list.append(
- session.init(expose_access_token_to_frontend_in_cookie_based_auth=True)
+ session.init(
+ expose_access_token_to_frontend_in_cookie_based_auth=True,
+ override=session.InputOverrideConfig(
+ functions=override_session_functions,
+ ),
+ )
)
# Dashboard for admin management
recipe_list.append(dashboard.init())
# Initialize SuperTokens with selected recipes
+ from oss.src.core.auth.supertokens_config import (
+ get_app_info,
+ get_supertokens_config,
+ )
+
init(
- app_info=InputAppInfo(
- app_name="agenta",
- api_domain=api_domain,
- website_domain=env.agenta.web_url,
- api_gateway_path=api_gateway_path,
- api_base_path="/auth/",
- website_base_path="/auth",
- ),
- supertokens_config=SupertokensConfig(
- uri_core=env.supertokens.uri_core,
- api_key=env.supertokens.api_key,
- ),
+ app_info=get_app_info(),
+ supertokens_config=SupertokensConfig(**get_supertokens_config()),
framework="fastapi",
recipe_list=recipe_list,
mode="asgi",
diff --git a/api/oss/src/apis/fastapi/auth/__init__.py b/api/oss/src/apis/fastapi/auth/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/apis/fastapi/auth/models.py b/api/oss/src/apis/fastapi/auth/models.py
new file mode 100644
index 0000000000..6013b2329e
--- /dev/null
+++ b/api/oss/src/apis/fastapi/auth/models.py
@@ -0,0 +1,46 @@
+from pydantic import BaseModel, EmailStr
+from typing import Optional, List, Dict
+
+
+# ============================================================================
+# AUTH DISCOVER
+# ============================================================================
+
+
+class DiscoverRequest(BaseModel):
+ email: EmailStr
+
+
+class SSOProviderInfo(BaseModel):
+ id: str
+ slug: str
+ third_party_id: str
+
+
+class SSOProviders(BaseModel):
+ providers: List[SSOProviderInfo]
+
+
+class DiscoverResponse(BaseModel):
+ exists: bool
+ methods: Dict[str, bool | SSOProviders]
+
+
+# ============================================================================
+# OIDC AUTHORIZE
+# ============================================================================
+
+
+class OIDCAuthorizeRequest(BaseModel):
+ provider_id: str
+ redirect: Optional[str] = "/"
+
+
+# ============================================================================
+# OIDC CALLBACK
+# ============================================================================
+
+
+class OIDCCallbackRequest(BaseModel):
+ code: str
+ state: str
diff --git a/api/oss/src/apis/fastapi/auth/router.py b/api/oss/src/apis/fastapi/auth/router.py
new file mode 100644
index 0000000000..1697d8c582
--- /dev/null
+++ b/api/oss/src/apis/fastapi/auth/router.py
@@ -0,0 +1,339 @@
+from fastapi import APIRouter, HTTPException, Request
+from fastapi.responses import RedirectResponse
+from pydantic import BaseModel
+from supertokens_python.recipe.session.asyncio import get_session
+
+from oss.src.apis.fastapi.auth.models import (
+ DiscoverRequest,
+ DiscoverResponse,
+)
+from oss.src.core.auth.service import AuthService
+from oss.src.utils.common import is_ee
+from oss.src.utils.logging import get_module_logger
+
+
+auth_router = APIRouter()
+auth_service = AuthService()
+log = get_module_logger(__name__)
+
+
+class SessionIdentitiesUpdate(BaseModel):
+ session_identities: list[str]
+
+
+@auth_router.post("/discover", response_model=DiscoverResponse)
+async def discover(request: DiscoverRequest):
+ """
+ Discover authentication methods available for a given email.
+
+ This endpoint does NOT reveal:
+ - Organization names
+ - User existence (optionally - currently does for UX)
+ - Detailed policy information
+
+ Returns minimal information needed for authentication flow.
+ """
+ try:
+ result = await auth_service.discover(request.email)
+ return DiscoverResponse(**result)
+ except Exception as e:
+ import traceback
+
+ print(f"❌ Discovery error: {e}")
+ print(traceback.format_exc())
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@auth_router.get("/organization/access")
+async def check_organization_access(request: Request, organization_id: str):
+ """
+ Check if the current session satisfies the organization's auth policy.
+
+ Returns 200 when access is allowed, 403 with AUTH_UPGRADE_REQUIRED when not.
+ """
+ try:
+ session = await get_session(request) # type: ignore
+ except Exception:
+ raise HTTPException(status_code=401, detail="Unauthorized")
+
+ payload = session.get_access_token_payload() if session else {}
+ session_identities = payload.get("session_identities") or []
+ user_identities = payload.get("user_identities", [])
+
+ try:
+ from uuid import UUID
+ from oss.src.services import db_manager
+
+ user_uid = session.get_user_id()
+ user = await db_manager.get_user_with_uid(user_uid)
+ if not user:
+ raise HTTPException(
+ status_code=403,
+ detail={
+ "error": "AUTH_DOMAIN_DENIED",
+ "message": "Organization available but access restricted to verified domain(s).",
+ "required_methods": [],
+ },
+ )
+
+ user_id = UUID(str(user.id))
+ org_id = UUID(organization_id)
+ except HTTPException:
+ raise
+ except Exception:
+ raise HTTPException(status_code=400, detail="Invalid organization_id")
+
+ policy_error = await auth_service.check_organization_access(
+ user_id, org_id, session_identities
+ )
+
+ if policy_error and policy_error.get("error") in {
+ "AUTH_UPGRADE_REQUIRED",
+ "AUTH_SSO_DENIED",
+ "AUTH_DOMAIN_DENIED",
+ }:
+ detail = {
+ "error": policy_error.get("error"),
+ "message": policy_error.get("message"),
+ "required_methods": policy_error.get("required_methods", []),
+ "session_identities": session_identities,
+ "user_identities": user_identities,
+ "sso_providers": policy_error.get("sso_providers", []),
+ "current_domain": policy_error.get("current_domain"),
+ "allowed_domains": policy_error.get("allowed_domains", []),
+ }
+ raise HTTPException(status_code=403, detail=detail)
+
+ return {"ok": True}
+
+
+@auth_router.post("/session/identities")
+async def update_session_identities(request: Request, payload: SessionIdentitiesUpdate):
+ try:
+ session = await get_session(request) # type: ignore
+ except Exception:
+ raise HTTPException(status_code=401, detail="Unauthorized")
+
+ access_payload = session.get_access_token_payload() if session else {}
+ current = access_payload.get("session_identities") or []
+ merged = list(dict.fromkeys(current + payload.session_identities))
+ log.debug(
+ "[AUTH-IDENTITY] session_identities update",
+ {
+ "user_id": session.get_user_id() if session else None,
+ "current": current,
+ "incoming": payload.session_identities,
+ "merged": merged,
+ },
+ )
+
+ if hasattr(session, "update_access_token_payload"):
+ access_payload["session_identities"] = merged
+ await session.update_access_token_payload(access_payload)
+ elif hasattr(session, "merge_into_access_token_payload"):
+ await session.merge_into_access_token_payload({"session_identities": merged})
+ else:
+ raise HTTPException(
+ status_code=500, detail="Session payload update not supported"
+ )
+ return {"session_identities": merged, "previous": current}
+
+
+@auth_router.get("/authorize/oidc")
+async def oidc_authorize(request: Request, provider_id: str, redirect: str = "/"):
+ """
+ Initiate OIDC/SSO authorization flow using SuperTokens third-party recipe (EE only).
+
+ Query params:
+ - provider_id: UUID of the organization_providers entry
+ - redirect: Where to redirect after successful authentication (stored in state)
+
+ This endpoint redirects to SuperTokens third-party signinup with:
+ - third_party_id: "sso:{organization_slug}:{provider_slug}"
+ - redirect_uri: Frontend URL after authentication
+
+ SuperTokens will handle:
+ 1. Building OIDC authorization URL (via our get_dynamic_oidc_provider)
+ 2. Redirecting user to IdP
+ 3. Handling callback at /auth/callback/sso:{organization_slug}:{provider_slug}
+ 4. Creating session with user_identities (via our overrides)
+ 5. Redirecting to frontend
+ """
+ if not is_ee():
+ raise HTTPException(
+ status_code=404,
+ detail="SSO/OIDC is only available in Enterprise Edition",
+ )
+
+ try:
+ # Get provider to build third_party_id
+ from uuid import UUID
+ from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO
+ import httpx
+
+ from oss.src.utils.env import env
+ from oss.src.utils.helpers import parse_url
+
+ providers_dao = OrganizationProvidersDAO()
+ provider = await providers_dao.get_by_id_any(str(provider_id))
+
+ if not provider or not (provider.flags and provider.flags.get("is_active")):
+ raise HTTPException(
+ status_code=404, detail="Provider not found or disabled"
+ )
+
+ from oss.src.services import db_manager
+
+ organization = await db_manager.get_organization_by_id(
+ str(provider.organization_id)
+ )
+ if not organization or not organization.slug:
+ raise HTTPException(
+ status_code=400,
+ detail="Organization slug is required for SSO providers",
+ )
+
+ # Build third_party_id for SuperTokens
+ # Format: "sso:{organization_slug}:{provider_slug}"
+ third_party_id = f"sso:{organization.slug}:{provider.slug}"
+
+ callback_url = (
+ f"{env.agenta.web_url.rstrip('/')}/auth/callback/{third_party_id}"
+ )
+ print(f"[OIDC-AUTH] Expected redirect URI: {callback_url}")
+ api_url = parse_url(env.agenta.api_url)
+ request_base_url = str(request.base_url).rstrip("/")
+
+ authorisation_urls = [
+ f"{request_base_url}/auth/authorisationurl",
+ f"{api_url}/auth/authorisationurl",
+ ]
+
+ print(
+ "[OIDC-AUTH] Request context: "
+ f"request_url={request.url} base_url={request_base_url} api_url={api_url} "
+ f"candidates={authorisation_urls}"
+ )
+
+ response = None
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ for candidate in authorisation_urls:
+ print(
+ f"[OIDC-AUTH] Resolving auth URL. third_party_id={third_party_id} "
+ f"authorisation_url={candidate} callback_url={callback_url}"
+ )
+ try:
+ response = await client.get(
+ candidate,
+ params={
+ "thirdPartyId": third_party_id,
+ "redirectURIOnProviderDashboard": callback_url,
+ },
+ )
+ except Exception as exc:
+ print(f"[OIDC-AUTH] Request failed for {candidate}: {exc}")
+ continue
+ content_type = response.headers.get("content-type", "")
+ print(
+ f"[OIDC-AUTH] SuperTokens response status={response.status_code} "
+ f"content_type={content_type} body={response.text}"
+ )
+ if response.status_code == 200 and "application/json" in content_type:
+ break
+
+ if not response or response.status_code != 200:
+ raise HTTPException(
+ status_code=502,
+ detail="Failed to fetch authorization URL from auth provider.",
+ )
+
+ data = response.json()
+ redirect_url = data.get("urlWithQueryParams") or data.get("url")
+ if not redirect_url:
+ raise HTTPException(
+ status_code=502,
+ detail="Auth provider response missing authorization URL.",
+ )
+
+ return RedirectResponse(url=redirect_url, status_code=302)
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@auth_router.get("/sso/callback/{organization_slug}/{provider_slug}")
+async def sso_callback_redirect(
+ organization_slug: str, provider_slug: str, request: Request
+):
+ """
+ Custom SSO callback endpoint that redirects to SuperTokens.
+
+ This endpoint:
+ 1. Accepts clean URL path: /auth/sso/callback/{organization_slug}/{provider_slug}
+ 2. Validates the organization and provider exist
+ 3. Builds SuperTokens thirdPartyId: sso:{organization_slug}:{provider_slug}
+ 4. Redirects to SuperTokens callback: /auth/callback/{thirdPartyId}
+
+ SuperTokens then handles:
+ 1. Exchange code for tokens (using our dynamic provider config)
+ 2. Get user info
+ 3. Call our sign_in_up override (creates user_identity, adds user_identities to session)
+ 4. Redirect to frontend with session cookie
+ """
+ if not is_ee():
+ raise HTTPException(
+ status_code=404,
+ detail="SSO/OIDC is only available in Enterprise Edition",
+ )
+
+ try:
+ from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO
+ from oss.src.services import db_manager
+
+ # Validate organization exists
+ organization = await db_manager.get_organization_by_slug(organization_slug)
+ if not organization:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Organization '{organization_slug}' not found",
+ )
+
+ # Validate provider exists and is active
+ providers_dao = OrganizationProvidersDAO()
+ provider = await providers_dao.get_by_slug(provider_slug, str(organization.id))
+
+ if not provider:
+ raise HTTPException(
+ status_code=404,
+ detail=f"SSO provider '{provider_slug}' not found for organization '{organization_slug}'",
+ )
+
+ if not (provider.flags and provider.flags.get("is_active")):
+ raise HTTPException(
+ status_code=400,
+ detail=f"SSO provider '{provider_slug}' is not active",
+ )
+
+ # Build thirdPartyId and redirect to SuperTokens callback
+ third_party_id = f"sso:{organization.slug}:{provider.slug}"
+
+ # Get the original query parameters from the IdP callback (code, state, etc.)
+ # SuperTokens expects them at /auth/callback/{thirdPartyId}?code=...&state=...
+ query_params = request.query_params
+
+ # Build SuperTokens callback URL with query params
+ supertokens_callback_url = f"/auth/callback/{third_party_id}"
+ if query_params:
+ query_string = "&".join(f"{k}={v}" for k, v in query_params.items())
+ supertokens_callback_url = f"{supertokens_callback_url}?{query_string}"
+
+ return RedirectResponse(url=supertokens_callback_url, status_code=302)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# Note: Final SSO callback is handled by SuperTokens at /auth/callback/{thirdPartyId}
+# After our custom endpoint redirects to it with the code and state parameters
diff --git a/api/oss/src/apis/fastapi/shared/utils.py b/api/oss/src/apis/fastapi/shared/utils.py
index b7e2c4300b..b231e2d440 100644
--- a/api/oss/src/apis/fastapi/shared/utils.py
+++ b/api/oss/src/apis/fastapi/shared/utils.py
@@ -44,6 +44,7 @@ def compute_next_windowing(
entities: List[Any],
attribute: str,
windowing: Optional[Windowing],
+ order: str = "ascending",
) -> Optional[Windowing]:
if not windowing or not windowing.limit or not entities:
return None
@@ -68,20 +69,46 @@ def compute_next_windowing(
order_attribute_name = attribute.lower()
- if not time_attribute_value or not id_attribute_value:
+ if not id_attribute_value:
return None
- if order_attribute_name in id_attributes:
- next_value = id_attribute_value
- elif order_attribute_name in time_attributes:
- next_value = time_attribute_value
- else:
- return None
+ # Determine effective order (windowing.order overrides default)
+ effective_order = (windowing.order or order).lower()
- return Windowing(
- newest=windowing.newest,
- oldest=windowing.oldest,
- next=next_value,
- limit=windowing.limit,
- order=windowing.order,
- )
+ # For ID-based ordering (UUID7), just use the ID as cursor
+ if order_attribute_name in id_attributes:
+ return Windowing(
+ newest=windowing.newest,
+ oldest=windowing.oldest,
+ next=id_attribute_value,
+ limit=windowing.limit,
+ order=windowing.order,
+ )
+
+ # For time-based ordering (UUID5/content-hashed IDs), we need both:
+ # - next: the ID for tie-breaking when timestamps are equal
+ # - oldest/newest: the timestamp boundary for the cursor
+ if order_attribute_name in time_attributes:
+ if not time_attribute_value:
+ return None
+
+ if effective_order == "ascending":
+ # Ascending: set oldest to last record's timestamp
+ return Windowing(
+ newest=windowing.newest,
+ oldest=time_attribute_value,
+ next=id_attribute_value,
+ limit=windowing.limit,
+ order=windowing.order,
+ )
+ else:
+ # Descending: set newest to last record's timestamp
+ return Windowing(
+ newest=time_attribute_value,
+ oldest=windowing.oldest,
+ next=id_attribute_value,
+ limit=windowing.limit,
+ order=windowing.order,
+ )
+
+ return None
diff --git a/api/oss/src/apis/fastapi/testcases/router.py b/api/oss/src/apis/fastapi/testcases/router.py
index f56a5cadb4..67fd20159c 100644
--- a/api/oss/src/apis/fastapi/testcases/router.py
+++ b/api/oss/src/apis/fastapi/testcases/router.py
@@ -150,7 +150,7 @@ async def query_testcases(
# Revision not found or has no testcases
return TestcasesResponse()
- testcases = await self.testcases_service.fetch_testcases(
+ testcases = await self.testcases_service.query_testcases(
project_id=UUID(request.state.project_id),
#
testcase_ids=testcase_ids,
@@ -162,8 +162,9 @@ async def query_testcases(
next_windowing = compute_next_windowing(
entities=testcases,
- attribute="id", # UUID7 - use id for cursor-based pagination
+ attribute="created_at", # Testcase IDs are content-hashed (UUID5), use timestamp
windowing=testcases_query_request.windowing,
+ order="ascending", # Must match order used in BlobsDAO.query_blobs
)
testcase_response = TestcasesResponse(
diff --git a/api/oss/src/apis/fastapi/testsets/models.py b/api/oss/src/apis/fastapi/testsets/models.py
index 515eab1f14..3f7273236e 100644
--- a/api/oss/src/apis/fastapi/testsets/models.py
+++ b/api/oss/src/apis/fastapi/testsets/models.py
@@ -23,7 +23,6 @@
TestsetRevisionCreate,
TestsetRevisionEdit,
TestsetRevisionCommit,
- TestsetRevisionPatch,
#
SimpleTestset,
SimpleTestsetCreate,
@@ -61,6 +60,7 @@ class TestsetResponse(BaseModel):
class TestsetsResponse(BaseModel):
count: int = 0
testsets: List[Testset] = []
+ windowing: Optional[Windowing] = None
# TESTSET VARIANTS -------------------------------------------------------------
@@ -126,10 +126,6 @@ class TestsetRevisionCommitRequest(BaseModel):
include_testcases: Optional[bool] = None
-class TestsetRevisionPatchRequest(BaseModel):
- testset_revision_patch: TestsetRevisionPatch
-
-
class TestsetRevisionRetrieveRequest(BaseModel):
testset_ref: Optional[Reference] = None
testset_variant_ref: Optional[Reference] = None
diff --git a/api/oss/src/apis/fastapi/testsets/router.py b/api/oss/src/apis/fastapi/testsets/router.py
index e0e3bf8f2d..ca84ac23b1 100644
--- a/api/oss/src/apis/fastapi/testsets/router.py
+++ b/api/oss/src/apis/fastapi/testsets/router.py
@@ -25,6 +25,8 @@
from oss.src.utils.exceptions import intercept_exceptions, suppress_exceptions
from oss.src.utils.caching import get_cache, set_cache, invalidate_cache
+from oss.src.apis.fastapi.shared.utils import compute_next_windowing
+
from oss.src.core.shared.dtos import (
Reference,
)
@@ -79,7 +81,6 @@
TestsetRevisionRetrieveRequest,
TestsetRevisionCommitRequest,
TestsetRevisionsLogRequest,
- TestsetRevisionPatchRequest,
TestsetRevisionResponse,
TestsetRevisionsResponse,
#
@@ -128,6 +129,143 @@
}
+def _to_plain_dict(value: Any) -> Dict[str, Any]:
+ """Convert a value to a plain Python dict, handling Pydantic models."""
+ if value is None:
+ return {}
+ if hasattr(value, "model_dump"):
+ return value.model_dump()
+ if hasattr(value, "dict"):
+ return value.dict()
+ if isinstance(value, dict):
+ return dict(value) # Make a copy to be safe
+ return {}
+
+
+def _serialize_value(value: Any) -> Any:
+ """Serialize a value to a JSON-safe type.
+
+ Handles Pydantic models, dicts, lists, and primitives.
+ Returns the serialized value (not a JSON string).
+ """
+ if value is None:
+ return None
+ if isinstance(value, (str, int, float, bool)):
+ return value
+ if hasattr(value, "model_dump"):
+ return value.model_dump()
+ if hasattr(value, "dict"):
+ return value.dict()
+ if isinstance(value, dict):
+ return {k: _serialize_value(v) for k, v in value.items()}
+ if isinstance(value, (list, tuple)):
+ return [_serialize_value(v) for v in value]
+ # Fallback: convert to string
+ return str(value)
+
+
+def _serialize_value_for_csv(value: Any) -> Any:
+ """Serialize complex values to JSON strings for CSV export.
+
+ Polars cannot serialize dicts, lists, or other complex objects to CSV,
+ so we convert them to JSON strings. This includes Pydantic models.
+ """
+ if value is None:
+ return ""
+ # Handle primitive types directly
+ if isinstance(value, (str, int, float, bool)):
+ return value
+ # Handle Pydantic models by converting to dict first
+ if hasattr(value, "model_dump"):
+ return orjson.dumps(value.model_dump()).decode("utf-8")
+ if hasattr(value, "dict"):
+ return orjson.dumps(value.dict()).decode("utf-8")
+ # Handle dicts and lists
+ if isinstance(value, (dict, list)):
+ return orjson.dumps(value).decode("utf-8")
+ # Fallback: convert to string
+ return str(value)
+
+
+def _prepare_testcases_for_csv(
+ testcases_data: List[Dict[str, Any]],
+) -> List[Dict[str, Any]]:
+ """Prepare testcases data for CSV export by serializing complex values."""
+ return [
+ {key: _serialize_value_for_csv(val) for key, val in row.items()}
+ for row in testcases_data
+ ]
+
+
+def _drop_empty_export_columns(testcases_data: List[Dict[str, Any]]) -> None:
+ """Drop metadata columns that are None for every row (CSV export only)."""
+ if not testcases_data:
+ return
+ for column in ("__flags__", "__tags__", "__meta__"):
+ if all(row.get(column) is None for row in testcases_data):
+ for row in testcases_data:
+ row.pop(column, None)
+
+
+def _normalize_testcase_dedup_ids(testcases_data: List[Dict[str, Any]]) -> None:
+ """Normalize legacy dedup keys to the canonical testcase_dedup_id field."""
+ for testcase_data in testcases_data:
+ if not isinstance(testcase_data, dict):
+ continue
+ legacy_dedup_id = testcase_data.pop("__dedup_id__", None)
+ existing_dedup_id = testcase_data.get("testcase_dedup_id")
+ if legacy_dedup_id not in (None, "") and existing_dedup_id in (None, ""):
+ testcase_data["testcase_dedup_id"] = legacy_dedup_id
+
+
+def _normalize_testcase_dedup_ids_in_request(
+ testcases: Optional[List[Testcase]],
+) -> None:
+ """Normalize CSV-style dedup keys in JSON body requests."""
+ for testcase in testcases or []:
+ testcase_data = testcase.data
+ if not isinstance(testcase_data, dict):
+ continue
+ legacy_dedup_id = testcase_data.pop("__dedup_id__", None)
+ existing_dedup_id = testcase_data.get("testcase_dedup_id")
+ if legacy_dedup_id not in (None, "") and existing_dedup_id in (None, ""):
+ testcase_data["testcase_dedup_id"] = legacy_dedup_id
+
+
+def _build_testcase_export_row(testcase: Any) -> Dict[str, Any]:
+ """Build a dict for exporting a testcase, properly handling Pydantic models.
+
+ Extracts and serializes all testcase fields into a flat dict suitable for export.
+ """
+ # Extract the data field - handle both Pydantic models and plain dicts
+ data_dict = _to_plain_dict(testcase.data)
+
+ # Serialize all values in the data dict to ensure they're JSON-safe
+ serialized_data = {key: _serialize_value(val) for key, val in data_dict.items()}
+ if "__dedup_id__" not in serialized_data and "testcase_dedup_id" in serialized_data:
+ serialized_data["__dedup_id__"] = serialized_data["testcase_dedup_id"]
+ if "__dedup_id__" in serialized_data and "testcase_dedup_id" in serialized_data:
+ serialized_data.pop("testcase_dedup_id", None)
+
+ export_row = {
+ **serialized_data,
+ "__id__": str(testcase.id) if testcase.id else None,
+ }
+
+ flags = _serialize_value(testcase.flags)
+ tags = _serialize_value(testcase.tags)
+ meta = _serialize_value(testcase.meta)
+
+ if flags is not None:
+ export_row["__flags__"] = flags
+ if tags is not None:
+ export_row["__tags__"] = tags
+ if meta is not None:
+ export_row["__meta__"] = meta
+
+ return export_row
+
+
class TestsetsRouter:
TESTCASES_FLAGS = TestsetFlags(
has_testcases=True,
@@ -331,6 +469,15 @@ def __init__(self, *, testsets_service: TestsetsService):
response_model_exclude=TESTSET_REVISION_RESPONSE_EXCLUDE,
)
+ # POST /api/preview/testsets/revisions/{testset_revision_id}/download
+ self.router.add_api_route(
+ "/revisions/{testset_revision_id}/download",
+ self.fetch_testset_revision_to_file,
+ methods=["POST"],
+ operation_id="fetch_testset_revision_to_file",
+ status_code=status.HTTP_200_OK,
+ )
+
self.router.add_api_route(
"/revisions/query",
self.query_testset_revisions,
@@ -353,16 +500,6 @@ def __init__(self, *, testsets_service: TestsetsService):
response_model_exclude=TESTSET_REVISION_RESPONSE_EXCLUDE,
)
- self.router.add_api_route(
- "/revisions/patch",
- self.patch_testset_revision,
- methods=["POST"],
- operation_id="patch_testset_revision",
- status_code=status.HTTP_200_OK,
- response_model=TestsetRevisionResponse,
- response_model_exclude_none=True,
- )
-
self.router.add_api_route(
"/revisions/log",
self.log_testset_revisions,
@@ -550,9 +687,16 @@ async def query_testsets(
windowing=testset_query_request.windowing,
)
+ next_windowing = compute_next_windowing(
+ entities=testsets,
+ attribute="id", # UUID7 - use id for cursor-based pagination
+ windowing=testset_query_request.windowing,
+ )
+
testsets_response = TestsetsResponse(
count=len(testsets),
testsets=testsets,
+ windowing=next_windowing,
)
return testsets_response
@@ -943,6 +1087,92 @@ async def unarchive_testset_revision(
return testset_revision_response
+ @intercept_exceptions()
+ async def fetch_testset_revision_to_file(
+ self,
+ request: Request,
+ *,
+ testset_revision_id: UUID,
+ #
+ file_type: Optional[Literal["csv", "json"]] = Query(
+ "csv",
+ description="File type to download. Supported: 'csv' or 'json'. Default: 'csv'.",
+ ),
+ file_name: Optional[str] = Query(
+ None,
+ description="Optional custom filename for the download.",
+ ),
+ ) -> StreamingResponse: # type: ignore
+ if is_ee():
+ if not await check_action_access( # type: ignore
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_TESTSETS, # type: ignore
+ ):
+ raise FORBIDDEN_EXCEPTION # type: ignore
+
+ if file_type is None or file_type not in ["csv", "json"]:
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid file type. Supported types are 'csv' and 'json'.",
+ )
+
+ # Fetch the revision with testcases
+ testset_revision_response = await self.fetch_testset_revision(
+ request=request,
+ testset_revision_id=testset_revision_id,
+ include_testcases=True,
+ )
+
+ if (
+ not testset_revision_response.count
+ or not testset_revision_response.testset_revision
+ ):
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Testset revision not found. Please check the revision_id and try again.",
+ )
+
+ revision = testset_revision_response.testset_revision
+
+ filename = (
+ file_name or f"revision_{testset_revision_id}"
+ ) + f".{file_type.lower()}"
+ testcases = revision.data.testcases if revision.data else []
+
+ # Build export data using helper that properly handles Pydantic models
+ testcases_data = [
+ _build_testcase_export_row(testcase) for testcase in testcases or []
+ ]
+
+ if file_type.lower() == "json":
+ buffer = BytesIO(orjson.dumps(testcases_data))
+
+ return StreamingResponse(
+ buffer,
+ media_type="application/json",
+ headers={"Content-Disposition": f"attachment; filename={filename}"},
+ )
+
+ elif file_type.lower() == "csv":
+ buffer = BytesIO()
+ _drop_empty_export_columns(testcases_data)
+ csv_data = _prepare_testcases_for_csv(testcases_data)
+ pl.DataFrame(csv_data).write_csv(buffer)
+ buffer.seek(0)
+
+ return StreamingResponse(
+ buffer,
+ media_type="text/csv",
+ headers={"Content-Disposition": f"attachment; filename={filename}"},
+ )
+
+ else:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Invalid file type. Supported types are 'csv' and 'json'.",
+ )
+
async def query_testset_revisions(
self,
request: Request,
@@ -993,11 +1223,23 @@ async def commit_testset_revision(
):
raise FORBIDDEN_EXCEPTION # type: ignore
+ commit = testset_revision_commit_request.testset_revision_commit
+ if commit.data and commit.delta:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Provide either data or delta for a commit, not both.",
+ )
+ if not commit.data and not commit.delta:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Provide either data or delta for a commit.",
+ )
+
testset_revision = await self.testsets_service.commit_testset_revision(
project_id=UUID(request.state.project_id),
user_id=UUID(request.state.user_id),
#
- testset_revision_commit=testset_revision_commit_request.testset_revision_commit,
+ testset_revision_commit=commit,
include_testcases=testset_revision_commit_request.include_testcases,
)
@@ -1008,34 +1250,6 @@ async def commit_testset_revision(
return testset_revision_response
- async def patch_testset_revision(
- self,
- request: Request,
- *,
- testset_revision_patch_request: TestsetRevisionPatchRequest,
- ) -> TestsetRevisionResponse:
- if is_ee():
- if not await check_action_access( # type: ignore
- user_uid=request.state.user_id,
- project_id=request.state.project_id,
- permission=Permission.EDIT_EVALUATORS, # type: ignore
- ):
- raise FORBIDDEN_EXCEPTION # type: ignore
-
- testset_revision = await self.testsets_service.patch_testset_revision(
- project_id=UUID(request.state.project_id),
- user_id=UUID(request.state.user_id),
- #
- testset_revision_patch=testset_revision_patch_request.testset_revision_patch,
- )
-
- testset_revision_response = TestsetRevisionResponse(
- count=1 if testset_revision else 0,
- testset_revision=testset_revision,
- )
-
- return testset_revision_response
-
async def log_testset_revisions(
self,
request: Request,
@@ -1211,6 +1425,10 @@ async def create_simple_testset(
):
raise FORBIDDEN_EXCEPTION # type: ignore
+ _normalize_testcase_dedup_ids_in_request(
+ simple_testset_create_request.testset.data.testcases
+ )
+
simple_testset = await self.simple_testsets_service.create(
project_id=UUID(request.state.project_id),
user_id=UUID(request.state.user_id),
@@ -1322,6 +1540,10 @@ async def edit_simple_testset(
if str(testset_id) != str(simple_testset_edit_request.testset.id):
return SimpleTestsetResponse()
+ _normalize_testcase_dedup_ids_in_request(
+ simple_testset_edit_request.testset.data.testcases
+ )
+
simple_testset: Optional[
SimpleTestset
] = await self.simple_testsets_service.edit(
@@ -1605,6 +1827,7 @@ async def create_simple_testset_from_file(
)
try:
+ _normalize_testcase_dedup_ids(testcases_data)
testcases_data = json_array_to_json_object(
data=testcases_data,
testcase_id_key="__id__",
@@ -1737,6 +1960,7 @@ async def edit_simple_testset_from_file(
)
try:
+ _normalize_testcase_dedup_ids(testcases_data)
testcases_data = json_array_to_json_object(
data=testcases_data,
testcase_id_key="__id__",
@@ -1852,15 +2076,9 @@ async def fetch_simple_testset_to_file(
filename = (file_name or f"testset_{testset_id}") + f".{file_type.lower()}"
testcases = testset.data.testcases
+ # Build export data using helper that properly handles Pydantic models
testcases_data = [
- {
- **testcase.data,
- "__id__": testcase.id,
- "__flags__": testcase.flags,
- "__tags__": testcase.tags,
- "__meta__": testcase.meta,
- }
- for testcase in testcases or []
+ _build_testcase_export_row(testcase) for testcase in testcases or []
]
if file_type.lower() == "json":
@@ -1874,7 +2092,10 @@ async def fetch_simple_testset_to_file(
elif file_type.lower() == "csv":
buffer = BytesIO()
- pl.DataFrame(testcases_data).write_csv(buffer)
+ _drop_empty_export_columns(testcases_data)
+ csv_data = _prepare_testcases_for_csv(testcases_data)
+ df = pl.DataFrame(csv_data)
+ df.write_csv(buffer)
buffer.seek(0)
return StreamingResponse(
diff --git a/api/oss/src/apis/fastapi/tracing/router.py b/api/oss/src/apis/fastapi/tracing/router.py
index 2349eef759..45831fdc93 100644
--- a/api/oss/src/apis/fastapi/tracing/router.py
+++ b/api/oss/src/apis/fastapi/tracing/router.py
@@ -9,6 +9,8 @@
from oss.src.utils.exceptions import intercept_exceptions, suppress_exceptions
from oss.src.utils.caching import get_cache, set_cache, invalidate_cache
+from oss.src.core.tracing.dtos import ListOperator, ComparisonOperator, Condition
+
from oss.src.apis.fastapi.tracing.utils import (
merge_queries,
parse_query_from_params_request,
@@ -225,17 +227,26 @@ async def query_spans( # QUERY
merged_query = merge_queries(query, query_from_body)
- try:
- span_dtos = await self.service.query(
+ # Optimize: detect simple trace_id queries and use fetch() instead
+ trace_ids = self._extract_trace_ids_from_query(merged_query)
+
+ if trace_ids is not None:
+ span_dtos = await self.service.fetch(
project_id=UUID(request.state.project_id),
- #
- query=merged_query,
+ trace_ids=trace_ids,
)
- except FilteringException as e:
- raise HTTPException(
- status_code=400,
- detail=str(e),
- ) from e
+ else:
+ try:
+ span_dtos = await self.service.query(
+ project_id=UUID(request.state.project_id),
+ #
+ query=merged_query,
+ )
+ except FilteringException as e:
+ raise HTTPException(
+ status_code=400,
+ detail=str(e),
+ ) from e
spans_or_traces = parse_spans_into_response(
span_dtos,
@@ -269,6 +280,42 @@ async def query_spans( # QUERY
return spans_response
+ def _extract_trace_ids_from_query(
+ self, query: TracingQuery
+ ) -> Optional[List[UUID]]:
+ """
+ Detect if query is a simple trace_id filter and extract trace IDs.
+ Returns trace_ids if query can be optimized to use fetch(), else None.
+ """
+ if not query.filtering or not query.filtering.conditions:
+ return None
+
+ if len(query.filtering.conditions) != 1:
+ return None
+
+ condition = query.filtering.conditions[0]
+
+ if not isinstance(condition, Condition):
+ return None
+
+ if condition.field != "trace_id":
+ return None
+
+ if condition.operator not in [ComparisonOperator.IS, ListOperator.IN]:
+ return None
+
+ # Extract trace IDs from value
+ try:
+ if isinstance(condition.value, list):
+ # IN operator with list of trace_ids
+ return [UUID(str(tid)) for tid in condition.value]
+ else:
+ # IS operator with single trace_id
+ return [UUID(str(condition.value))]
+ except (ValueError, TypeError):
+ # Invalid UUID format
+ return None
+
@intercept_exceptions()
@suppress_exceptions(default=AnalyticsResponse())
async def fetch_analytics(
diff --git a/api/oss/src/core/auth/__init__.py b/api/oss/src/core/auth/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/core/auth/middleware.py b/api/oss/src/core/auth/middleware.py
new file mode 100644
index 0000000000..4839219490
--- /dev/null
+++ b/api/oss/src/core/auth/middleware.py
@@ -0,0 +1,139 @@
+"""Organization policy enforcement middleware (EE)."""
+
+from typing import Optional, Callable, List
+from uuid import UUID
+from fastapi import Request, Response, HTTPException
+from fastapi.responses import JSONResponse
+from starlette.middleware.base import BaseHTTPMiddleware
+
+from supertokens_python.recipe.session import SessionContainer
+from supertokens_python.recipe.session.framework.fastapi import verify_session
+
+from oss.src.core.auth.types import MethodKind
+from oss.src.utils.common import is_ee
+
+# Note: This middleware requires EE organization tables
+# Organization policy enforcement is only available in EE
+# TODO: Policy enforcement needs to be reimplemented to read from organizations.flags
+# Previously used organization_policies table (now removed)
+policies_dao = None
+
+
+def matches_policy(identities: List[str], allowed_methods: List[str]) -> bool:
+ """
+ Check if user's identities satisfy the organization's allowed_methods policy.
+
+ Supports wildcards defined in MethodKind:
+ - "email:*" matches "email:otp", "email:password"
+ - "social:*" matches "social:google", "social:github"
+ - "sso:*" matches any SSO provider
+ - "sso:acme:*" matches any provider for organization 'acme'
+
+ Args:
+ identities: List of authentication methods used by user (from session)
+ allowed_methods: List of MethodKind patterns allowed by organization policy
+
+ Returns:
+ True if any identity matches any allowed method pattern
+
+ Examples:
+ identities = ["email:otp", "social:google"]
+ allowed_methods = ["email:*", "social:*"]
+ → True
+
+ identities = ["email:otp"]
+ allowed_methods = ["sso:*"]
+ → False
+
+ identities = ["sso:acme:okta"]
+ allowed_methods = ["sso:acme:*"]
+ → True
+ """
+ for identity in identities:
+ for allowed in allowed_methods:
+ if MethodKind.matches_pattern(identity, allowed):
+ return True
+
+ return False
+
+
+async def check_organization_policy(
+ session: SessionContainer,
+ organization_id: UUID,
+) -> Optional[dict]:
+ """
+ Check if user's session satisfies organization policy.
+
+ Returns:
+ None if policy satisfied
+ Dict with error details if upgrade required
+ """
+ # Get session identities
+ payload = session.get_access_token_payload()
+ identities = payload.get("session_identities") or []
+
+ # Get user ID
+ user_id = UUID(session.get_user_id())
+
+ # Use AuthService for policy enforcement
+ from oss.src.core.auth.service import AuthService
+
+ auth_service = AuthService()
+ return await auth_service.check_organization_access(
+ user_id, organization_id, identities
+ )
+
+
+class OrganizationPolicyMiddleware(BaseHTTPMiddleware):
+ """
+ Middleware to enforce organization authentication policies (EE).
+
+ Applies to routes that specify an organization_id (via query param or path).
+ Only active when EE features are enabled.
+ """
+
+ async def dispatch(self, request: Request, call_next: Callable) -> Response:
+ # Skip if EE not enabled
+ if not is_ee():
+ return await call_next(request)
+
+ # Skip auth routes
+ if request.url.path.startswith("/auth"):
+ return await call_next(request)
+
+ # Skip non-org routes
+ # Check if organization_id is in query params
+ organization_id_str = request.query_params.get("organization_id")
+
+ if not organization_id_str:
+ # No organization context, skip policy check
+ return await call_next(request)
+
+ try:
+ organization_id = UUID(organization_id_str)
+ except ValueError:
+ return JSONResponse(
+ status_code=400,
+ content={"error": "Invalid organization_id"},
+ )
+
+ # Verify session
+ try:
+ session = await verify_session(request)
+ except Exception:
+ return JSONResponse(
+ status_code=401,
+ content={"error": "Unauthorized", "message": "No valid session"},
+ )
+
+ # Check organization policy
+ policy_error = await check_organization_policy(session, organization_id)
+
+ if policy_error:
+ return JSONResponse(
+ status_code=403,
+ content=policy_error,
+ )
+
+ # Policy satisfied, continue
+ return await call_next(request)
diff --git a/api/oss/src/core/auth/oidc.py b/api/oss/src/core/auth/oidc.py
new file mode 100644
index 0000000000..de44dfd78b
--- /dev/null
+++ b/api/oss/src/core/auth/oidc.py
@@ -0,0 +1,94 @@
+"""OIDC helper utilities for authorization and token exchange."""
+
+import secrets
+import httpx
+from typing import Dict, Any, Optional
+from urllib.parse import urlencode
+
+
+class OIDCState:
+ """Manages OIDC state for CSRF protection."""
+
+ def __init__(self, provider_id: str, redirect_uri: str):
+ self.state_id = secrets.token_urlsafe(32)
+ self.nonce = secrets.token_urlsafe(32)
+ self.provider_id = provider_id
+ self.redirect_uri = redirect_uri
+
+ def to_dict(self) -> Dict[str, str]:
+ return {
+ "state_id": self.state_id,
+ "nonce": self.nonce,
+ "provider_id": self.provider_id,
+ "redirect_uri": self.redirect_uri,
+ }
+
+
+class OIDCClient:
+ """OIDC client for building authorization URLs and exchanging tokens."""
+
+ def __init__(self, config: Dict[str, Any], callback_url: str):
+ self.issuer = config["issuer"]
+ self.client_id = config["client_id"]
+ self.client_secret = config["client_secret"]
+ self.scopes = config.get("scopes", ["openid", "profile", "email"])
+ self.callback_url = callback_url
+
+ # Endpoints can be explicit or discovered
+ self.authorization_endpoint = config.get("authorization_endpoint")
+ self.token_endpoint = config.get("token_endpoint")
+ self.userinfo_endpoint = config.get("userinfo_endpoint")
+
+ async def discover_endpoints(self):
+ """Discover OIDC endpoints from .well-known/openid-configuration."""
+ if not self.authorization_endpoint or not self.token_endpoint:
+ discovery_url = f"{self.issuer}/.well-known/openid-configuration"
+ async with httpx.AsyncClient() as client:
+ response = await client.get(discovery_url)
+ response.raise_for_status()
+ config = response.json()
+
+ self.authorization_endpoint = config["authorization_endpoint"]
+ self.token_endpoint = config["token_endpoint"]
+ self.userinfo_endpoint = config.get("userinfo_endpoint")
+
+ def build_authorization_url(self, state: OIDCState) -> str:
+ """Build the OIDC authorization URL."""
+ params = {
+ "client_id": self.client_id,
+ "redirect_uri": self.callback_url,
+ "response_type": "code",
+ "scope": " ".join(self.scopes),
+ "state": state.state_id,
+ "nonce": state.nonce,
+ }
+ return f"{self.authorization_endpoint}?{urlencode(params)}"
+
+ async def exchange_code_for_tokens(self, code: str) -> Dict[str, Any]:
+ """Exchange authorization code for tokens."""
+ data = {
+ "grant_type": "authorization_code",
+ "code": code,
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "redirect_uri": self.callback_url,
+ }
+
+ async with httpx.AsyncClient() as client:
+ response = await client.post(
+ self.token_endpoint,
+ data=data,
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ )
+ response.raise_for_status()
+ return response.json()
+
+ async def get_userinfo(self, access_token: str) -> Dict[str, Any]:
+ """Fetch user info from the userinfo endpoint."""
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ self.userinfo_endpoint,
+ headers={"Authorization": f"Bearer {access_token}"},
+ )
+ response.raise_for_status()
+ return response.json()
diff --git a/api/oss/src/core/auth/service.py b/api/oss/src/core/auth/service.py
new file mode 100644
index 0000000000..689da967ea
--- /dev/null
+++ b/api/oss/src/core/auth/service.py
@@ -0,0 +1,887 @@
+"""Authentication and authorization service.
+
+This service provides three main capabilities:
+1. Discovery: Determine available authentication methods for a user
+2. Authentication: Support authentication flows (via SuperTokens + helpers)
+3. Authorization: Validate user access based on organization policies
+"""
+
+from typing import Optional, Dict, List, Any
+from uuid import UUID
+
+from oss.src.utils.common import is_ee
+from oss.src.dbs.postgres.users.dao import IdentitiesDAO
+from oss.src.services import db_manager
+from oss.src.utils.env import env
+from oss.src.utils.logging import get_module_logger
+from oss.src.models.db_models import InvitationDB, ProjectDB
+from oss.src.dbs.postgres.shared.engine import engine
+from sqlalchemy import select
+
+# Organization DAOs and models (EE only)
+if is_ee():
+ from ee.src.dbs.postgres.organizations.dao import (
+ OrganizationDomainsDAO,
+ OrganizationProvidersDAO,
+ )
+ from oss.src.models.db_models import OrganizationDB
+ from ee.src.models.db_models import OrganizationMemberDB
+
+log = get_module_logger(__name__)
+
+
+class AuthService:
+ """
+ Centralized authentication and authorization service.
+
+ Note: Actual authentication flows are handled by SuperTokens recipes.
+ This service provides supporting logic for discovery, validation, and policy enforcement.
+ """
+
+ def __init__(self):
+ self.identities_dao = IdentitiesDAO()
+
+ # Initialize EE DAOs if available
+ if is_ee():
+ self.domains_dao = OrganizationDomainsDAO()
+ self.providers_dao = OrganizationProvidersDAO()
+ else:
+ self.domains_dao = None
+ self.providers_dao = None
+
+ # ============================================================================
+ # DISCOVERY: Determine available authentication methods
+ # ============================================================================
+
+ async def discover(self, email: str) -> Dict[str, Any]:
+ """
+ Discover authentication methods available for a given email.
+
+ This is the pre-authentication discovery endpoint that helps the frontend
+ determine which auth flows to present to the user.
+
+ Logic:
+ 1. Check user's organization memberships and pending invitations
+ 2. Check for organizations with verified domains matching user's email
+ 3. For each relevant organization:
+ - If org has verified domain + active SSO: enforce SSO-only
+ - Otherwise: aggregate allowed methods from org policy flags
+ 4. SSO providers are shown if user has access to orgs with active SSO
+
+ SSO Enforcement Rules:
+ - SSO can ONLY be the sole auth method if org has BOTH:
+ a) Verified domain matching user's email domain
+ b) Active SSO provider configured
+ - When SSO is enforced, email and social auth are not available
+
+ Auto-join and Domain Restrictions (enforced at login, not discovery):
+ - auto_join: User is auto-added to org on login if domain matches
+ - domains_only: Only users with matching domain can access org
+
+ Response format:
+ {
+ "exists": bool, # Whether user account exists
+ "methods": {
+ "email:password": true, # Only present if available
+ "email:otp": true, # Only present if available
+ "social:google": true, # Only present if available
+ "social:github": true, # Only present if available
+ "sso": [ # Only present if SSO available
+ {"slug": "okta", "name": "ACME SSO"}
+ ]
+ }
+ }
+
+ Note: Only methods that are available (true) are included in the response.
+ Missing methods should be assumed false on the client side.
+ """
+ print(f"[DISCOVERY] Starting discovery for email: {email}")
+
+ # Extract domain from email (if provided)
+ domain = email.split("@")[1] if email and "@" in email else None
+ print(f"[DISCOVERY] Extracted domain: {domain}")
+
+ # Check if user exists only when email looks valid
+ user = None
+ user_exists = False
+ user_id = None
+ if email and "@" in email:
+ user = await db_manager.get_user_with_email(email)
+ user_exists = user is not None
+ user_id = UUID(str(user.id)) if user else None
+ print(f"[DISCOVERY] User exists: {user_exists}, user_id: {user_id}")
+
+ # Get relevant organization IDs (EE only)
+ # Include: memberships, pending invitations, and domain-based access
+ org_ids: List[UUID] = []
+ domain_org_ids: List[
+ UUID
+ ] = [] # Orgs with verified domain matching user's email
+
+ print(f"[DISCOVERY] Is EE: {is_ee()}")
+
+ if is_ee():
+ # 1. User's existing memberships
+ if user_exists and user_id:
+ try:
+ orgs = await db_manager.get_user_organizations(str(user_id))
+ org_ids = [org.id for org in orgs]
+ print(f"[DISCOVERY] User organizations: {org_ids}")
+ except Exception as e:
+ print(f"[DISCOVERY] Error fetching user organizations: {e}")
+ org_ids = []
+
+ # 2. Organizations with pending project invitations
+ if email:
+ try:
+ async with engine.core_session() as session:
+ # Query project_invitations for this email, join with projects to get organization_id
+ stmt = (
+ select(ProjectDB.organization_id)
+ .join(InvitationDB, InvitationDB.project_id == ProjectDB.id)
+ .where(InvitationDB.email == email)
+ .where(~InvitationDB.used)
+ .distinct()
+ )
+ result = await session.execute(stmt)
+ invitation_org_ids = [row[0] for row in result.fetchall()]
+
+ print(
+ f"[DISCOVERY] Pending invitation orgs: {invitation_org_ids}"
+ )
+
+ # Add to org_ids if not already present
+ for invitation_org_id in invitation_org_ids:
+ if invitation_org_id not in org_ids:
+ org_ids.append(invitation_org_id)
+ except Exception as e:
+ print(f"[DISCOVERY] Error fetching pending invitations: {e}")
+
+ # 3. Organizations with verified domain matching user's email
+ if domain and self.domains_dao:
+ domain_dto = await self.domains_dao.get_verified_by_slug(domain)
+ print(f"[DISCOVERY] Domain lookup for {domain}: {domain_dto}")
+ if domain_dto:
+ domain_org_ids.append(domain_dto.organization_id)
+ print(f"[DISCOVERY] Domain org: {domain_dto.organization_id}")
+ # Include in org_ids for policy aggregation
+ if domain_dto.organization_id not in org_ids:
+ org_ids.append(domain_dto.organization_id)
+
+ print(f"[DISCOVERY] Final org_ids: {org_ids}")
+ print(f"[DISCOVERY] Domain org_ids: {domain_org_ids}")
+
+ # Aggregate allowed methods across all organizations (EE only)
+ all_allowed_methods: set[str] = set()
+ has_sso_enforcement = False # Track if any org has SSO + verified domain
+
+ print(
+ f"[DISCOVERY] Starting policy aggregation. EE={is_ee()}, org_ids={len(org_ids) if org_ids else 0}"
+ )
+
+ if is_ee() and org_ids:
+ # Check policy flags for each organization
+ for org_id in org_ids:
+ print(f"[DISCOVERY] Checking org {org_id}")
+ org_flags = await self._get_organization_flags(org_id)
+ print(f"[DISCOVERY] Org {org_id} flags: {org_flags}")
+
+ if org_flags:
+ # Check if this org has verified domain (enables SSO enforcement)
+ has_verified_domain = org_id in domain_org_ids
+ print(
+ f"[DISCOVERY] Org {org_id} has verified domain: {has_verified_domain}"
+ )
+
+ # Check if this org has active SSO providers
+ has_active_sso = False
+ if self.providers_dao:
+ providers = await self.providers_dao.list_by_organization(
+ str(org_id)
+ )
+ print(
+ f"[DISCOVERY] Org {org_id} SSO providers: {[(p.slug, p.flags) for p in providers]}"
+ )
+ has_active_sso = any(
+ p.flags and p.flags.get("is_active", False)
+ for p in providers
+ )
+ print(
+ f"[DISCOVERY] Org {org_id} has active SSO: {has_active_sso}"
+ )
+
+ # SSO enforcement: only SSO allowed if org has both verified domain + active SSO
+ if has_verified_domain and has_active_sso:
+ print(f"[DISCOVERY] Org {org_id} enforcing SSO-only")
+ has_sso_enforcement = True
+ all_allowed_methods.add("sso:*")
+ # Skip adding email/social methods for this org
+ continue
+
+ # Otherwise, check normal policy flags
+ # Default to True if not explicitly set
+ if org_flags.get("allow_email", env.auth.email_enabled):
+ print(f"[DISCOVERY] Org {org_id} allows email")
+ all_allowed_methods.add("email:*")
+ if org_flags.get("allow_social", env.auth.oidc_enabled):
+ print(f"[DISCOVERY] Org {org_id} allows social")
+ all_allowed_methods.add("social:*")
+ if org_flags.get("allow_sso", False):
+ print(f"[DISCOVERY] Org {org_id} allows SSO")
+ all_allowed_methods.add("sso:*")
+
+ print(f"[DISCOVERY] Aggregated methods: {all_allowed_methods}")
+ print(f"[DISCOVERY] SSO enforcement: {has_sso_enforcement}")
+
+ # If user has no organizations, show globally configured auth methods
+ if not all_allowed_methods:
+ # Check what's actually enabled in the SuperTokens configuration
+ if env.auth.email_method == "password":
+ all_allowed_methods.add("email:password")
+ elif env.auth.email_method == "otp":
+ all_allowed_methods.add("email:otp")
+
+ if env.auth.google_enabled:
+ all_allowed_methods.add("social:google")
+
+ if env.auth.google_workspaces_enabled:
+ all_allowed_methods.add("social:google-workspaces")
+
+ if env.auth.github_enabled:
+ all_allowed_methods.add("social:github")
+
+ if env.auth.facebook_enabled:
+ all_allowed_methods.add("social:facebook")
+
+ if env.auth.apple_enabled:
+ all_allowed_methods.add("social:apple")
+
+ if env.auth.discord_enabled:
+ all_allowed_methods.add("social:discord")
+
+ if env.auth.twitter_enabled:
+ all_allowed_methods.add("social:twitter")
+
+ if env.auth.gitlab_enabled:
+ all_allowed_methods.add("social:gitlab")
+
+ if env.auth.bitbucket_enabled:
+ all_allowed_methods.add("social:bitbucket")
+
+ if env.auth.linkedin_enabled:
+ all_allowed_methods.add("social:linkedin")
+
+ if env.auth.okta_enabled:
+ all_allowed_methods.add("social:okta")
+
+ if env.auth.azure_ad_enabled:
+ all_allowed_methods.add("social:azure-ad")
+
+ if env.auth.boxy_saml_enabled:
+ all_allowed_methods.add("social:boxy-saml")
+
+ # Get SSO providers (EE only)
+ # Show SSO providers from user's organizations (if user exists and is a member)
+ sso_providers = []
+ print(
+ f"[DISCOVERY] Collecting SSO providers. EE={is_ee()}, has_providers_dao={self.providers_dao is not None}, org_ids={org_ids}"
+ )
+
+ if is_ee() and self.providers_dao and org_ids:
+ provider_map = {} # Use dict to deduplicate by slug
+
+ # Get SSO providers from all user's organizations
+ for org_id in org_ids:
+ organization = await db_manager.get_organization_by_id(str(org_id))
+ if not organization or not organization.slug:
+ print(
+ f"[DISCOVERY] Org {org_id} missing slug; skipping SSO providers"
+ )
+ continue
+
+ providers = await self.providers_dao.list_by_organization(str(org_id))
+ print(
+ f"[DISCOVERY] Org {org_id} SSO providers (raw): {[(p.slug, p.name, p.flags) for p in providers]}"
+ )
+ for p in providers:
+ is_active = p.flags and p.flags.get("is_active", False)
+ print(f"[DISCOVERY] Provider {p.slug}: is_active={is_active}")
+ if is_active:
+ provider_map[p.slug] = {
+ "id": str(p.id),
+ "slug": p.slug,
+ "third_party_id": f"sso:{organization.slug}:{p.slug}",
+ }
+ print(f"[DISCOVERY] Added provider {p.slug} to map")
+
+ sso_providers = list(provider_map.values())
+ print(f"[DISCOVERY] Final SSO providers: {sso_providers}")
+
+ # Build methods dict - only include methods that are true
+ methods = {}
+
+ print(
+ f"[DISCOVERY] Building response. has_sso_enforcement={has_sso_enforcement}"
+ )
+
+ # If SSO enforcement is active, ONLY return SSO methods
+ if has_sso_enforcement:
+ print("[DISCOVERY] SSO enforcement active, returning SSO-only")
+ # SSO enforcement: only SSO providers, no email or social
+ if sso_providers:
+ methods["sso"] = {"providers": sso_providers}
+ response = {
+ "exists": user_exists,
+ "methods": methods,
+ }
+ print(f"[DISCOVERY] Final response (SSO enforcement): {response}")
+ return response
+
+ # Otherwise, include all allowed methods based on policy
+ # Email methods - check both specific method and wildcard
+ # But respect the configured email_method (only one can be active)
+ if "email:*" in all_allowed_methods:
+ # Organization allows email, use the globally configured method
+ if env.auth.email_method == "password":
+ methods["email:password"] = True
+ elif env.auth.email_method == "otp":
+ methods["email:otp"] = True
+ else:
+ # Use specific methods from all_allowed_methods
+ if "email:password" in all_allowed_methods:
+ methods["email:password"] = True
+ if "email:otp" in all_allowed_methods:
+ methods["email:otp"] = True
+
+ # Social methods - respect environment configuration
+ has_social_wildcard = "social:*" in all_allowed_methods
+
+ if "social:google" in all_allowed_methods or (
+ has_social_wildcard and env.auth.google_enabled
+ ):
+ methods["social:google"] = True
+ if "social:google-workspaces" in all_allowed_methods or (
+ has_social_wildcard and env.auth.google_workspaces_enabled
+ ):
+ methods["social:google-workspaces"] = True
+ if "social:github" in all_allowed_methods or (
+ has_social_wildcard and env.auth.github_enabled
+ ):
+ methods["social:github"] = True
+ if "social:facebook" in all_allowed_methods or (
+ has_social_wildcard and env.auth.facebook_enabled
+ ):
+ methods["social:facebook"] = True
+ if "social:apple" in all_allowed_methods or (
+ has_social_wildcard and env.auth.apple_enabled
+ ):
+ methods["social:apple"] = True
+ if "social:discord" in all_allowed_methods or (
+ has_social_wildcard and env.auth.discord_enabled
+ ):
+ methods["social:discord"] = True
+ if "social:twitter" in all_allowed_methods or (
+ has_social_wildcard and env.auth.twitter_enabled
+ ):
+ methods["social:twitter"] = True
+ if "social:gitlab" in all_allowed_methods or (
+ has_social_wildcard and env.auth.gitlab_enabled
+ ):
+ methods["social:gitlab"] = True
+ if "social:bitbucket" in all_allowed_methods or (
+ has_social_wildcard and env.auth.bitbucket_enabled
+ ):
+ methods["social:bitbucket"] = True
+ if "social:linkedin" in all_allowed_methods or (
+ has_social_wildcard and env.auth.linkedin_enabled
+ ):
+ methods["social:linkedin"] = True
+ if "social:okta" in all_allowed_methods or (
+ has_social_wildcard and env.auth.okta_enabled
+ ):
+ methods["social:okta"] = True
+ if "social:azure-ad" in all_allowed_methods or (
+ has_social_wildcard and env.auth.azure_ad_enabled
+ ):
+ methods["social:azure-ad"] = True
+ if "social:boxy-saml" in all_allowed_methods or (
+ has_social_wildcard and env.auth.boxy_saml_enabled
+ ):
+ methods["social:boxy-saml"] = True
+
+ # SSO - only include if providers are available
+ if sso_providers:
+ methods["sso"] = {"providers": sso_providers}
+ print(f"[DISCOVERY] Including SSO providers in response: {sso_providers}")
+
+ response = {
+ "exists": user_exists,
+ "methods": methods,
+ }
+
+ print(f"[DISCOVERY] Final response: {response}")
+ return response
+
+ # ============================================================================
+ # AUTHENTICATION: Support authentication flows
+ # ============================================================================
+ # Note: Actual authentication is handled by SuperTokens recipes.
+ # See supertokens_overrides.py for:
+ # - Dynamic OIDC provider configuration (get_dynamic_oidc_provider)
+ # - Post-authentication hooks (sign_in_up override)
+ # - Session creation with identities (create_new_session override)
+
+ async def get_user_identities(self, user_id: UUID) -> List[str]:
+ """
+ Get all authentication methods (identities) for a user.
+
+ Returns list of method strings like:
+ - ["email:otp", "social:google", "sso:acme:okta"]
+
+ Used to populate session payload and for policy validation.
+ """
+ identities = await self.identities_dao.list_by_user(user_id)
+ return [identity.method for identity in identities]
+
+ async def validate_provider_access(
+ self, provider_id: UUID, email: Optional[str] = None
+ ) -> bool:
+ """
+ Validate if a user can access a given SSO provider (EE only).
+
+ Checks:
+ 1. Provider exists and is enabled
+ 2. If provider has domain restriction, user's email domain matches
+
+ Args:
+ provider_id: UUID of the organization_providers entry
+ email: User's email (optional, for domain validation)
+
+ Returns:
+ True if user can access this provider
+ """
+ if not is_ee() or not self.providers_dao:
+ return False
+
+ provider = await self.providers_dao.get_by_id(provider_id)
+
+ if not provider or not (provider.flags and provider.flags.get("is_active")):
+ return False
+
+ # Note: domain_id FK removed - SSO providers can handle multiple domains
+ # Domain validation is now handled at discovery time, not provider validation time
+
+ return True
+
+ async def enforce_domain_policies(self, email: str, user_id: UUID) -> None:
+ """
+ Enforce domain-based policies after successful authentication:
+ 1. Auto-join: Automatically add user to organizations with verified domain + auto_join flag
+ 2. Domains-only validation: Block if user's domain doesn't match org's verified domains
+
+ This should be called during login/callback after user is authenticated.
+
+ Args:
+ email: User's email address
+ user_id: Internal user UUID
+
+ Raises:
+ Exception: If domains-only enforcement blocks access
+ """
+ if not is_ee() or not self.domains_dao:
+ return
+
+ # Extract domain from email
+ domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None
+ if not domain:
+ return
+
+ # Check for verified domain matching user's email
+ domain_dto = await self.domains_dao.get_verified_by_slug(domain)
+ if not domain_dto:
+ return
+
+ # Get organization and check flags
+ org_id = domain_dto.organization_id
+ org_flags = await self._get_organization_flags(org_id)
+ if not org_flags:
+ return
+
+ # 1. Auto-join: Add user to organization if auto_join flag is enabled
+ auto_join = org_flags.get("auto_join", False)
+ if auto_join:
+ try:
+ # Check if user is already a member of this organization
+ user_orgs = await db_manager.get_user_organizations(str(user_id))
+ is_member = any(org.id == org_id for org in user_orgs)
+
+ if not is_member:
+ from ee.src.services import db_manager_ee
+ from ee.src.models.db_models import (
+ OrganizationMemberDB,
+ WorkspaceMemberDB,
+ ProjectMemberDB,
+ )
+ from oss.src.dbs.postgres.shared.engine import engine as db_engine
+ from sqlalchemy import select
+
+ organization = await db_manager.get_organization_by_id(str(org_id))
+ user = await db_manager.get_user_with_id(user_id=str(user_id))
+ workspaces = await db_manager_ee.get_organization_workspaces(
+ str(org_id)
+ )
+
+ if not organization or not user or not workspaces:
+ raise ValueError(
+ "Auto-join requires organization, user, and at least one workspace"
+ )
+
+ async with db_engine.core_session() as session:
+ existing_org_member = await session.execute(
+ select(OrganizationMemberDB).filter_by(
+ user_id=user.id, organization_id=organization.id
+ )
+ )
+ if not existing_org_member.scalars().first():
+ session.add(
+ OrganizationMemberDB(
+ user_id=user.id,
+ organization_id=organization.id,
+ role="member",
+ )
+ )
+
+ for workspace in workspaces:
+ existing_workspace_member = await session.execute(
+ select(WorkspaceMemberDB).filter_by(
+ user_id=user.id, workspace_id=workspace.id
+ )
+ )
+ if not existing_workspace_member.scalars().first():
+ session.add(
+ WorkspaceMemberDB(
+ user_id=user.id,
+ workspace_id=workspace.id,
+ role="editor",
+ )
+ )
+
+ projects = await db_manager.fetch_projects_by_workspace(
+ str(workspace.id)
+ )
+ if not projects:
+ continue
+
+ existing_project_members = await session.execute(
+ select(ProjectMemberDB).filter(
+ ProjectMemberDB.project_id.in_(
+ [project.id for project in projects]
+ ),
+ ProjectMemberDB.user_id == user.id,
+ )
+ )
+ existing_project_ids = {
+ member.project_id
+ for member in existing_project_members.scalars().all()
+ }
+
+ for project in projects:
+ if project.id in existing_project_ids:
+ continue
+ session.add(
+ ProjectMemberDB(
+ user_id=user.id,
+ project_id=project.id,
+ role="editor",
+ )
+ )
+
+ await session.commit()
+
+ log.info(
+ "Auto-join: added user to organization with editor access",
+ user_id=str(user_id),
+ organization_id=str(org_id),
+ )
+ except Exception as e:
+ log.error("Error during auto-join: %s", e)
+
+ # 2. Domains-only enforcement: Check if user has access
+ # This is enforced at the organization level via check_organization_access()
+ # when the user tries to access organization resources through the middleware.
+ # No action needed here during login - enforcement happens at access time.
+
+ # ============================================================================
+ # AUTHORIZATION: Validate access based on policies
+ # ============================================================================
+
+ async def check_organization_access(
+ self, user_id: UUID, organization_id: UUID, session_identities: List[str]
+ ) -> Optional[Dict[str, Any]]:
+ """
+ Check if user's identities satisfy organization policy (EE only).
+
+ This is the core authorization logic used by the middleware.
+
+ Args:
+ user_id: User's UUID
+ organization_id: Organization's UUID
+ session_identities: List of authentication methods verified in session
+
+ Returns:
+ None if access allowed
+ Dict with error details if access denied
+
+ Possible error responses:
+ - NOT_A_MEMBER: User is not a member of the organization
+ - AUTH_UPGRADE_REQUIRED: User must authenticate with additional method
+ """
+ # If EE not enabled, allow access (no policy enforcement in OSS)
+ if not is_ee():
+ return None
+
+ # Note: We don't check membership here - that's the responsibility of route handlers
+ # This function only validates authentication method policies
+
+ # Get organization flags
+ org_flags = await self._get_organization_flags(organization_id)
+
+ if not org_flags:
+ # No flags means no restrictions (default allow all)
+ return None
+
+ # Check for root bypass: if user is owner and allow_root is True, bypass policy
+ is_owner = await self._is_organization_owner(user_id, organization_id)
+
+ if is_owner and org_flags.get("allow_root", False):
+ # Owner with root access bypasses policy
+ return None
+
+ # Build allowed methods from flags
+ # Default to True if not explicitly set
+ allowed_methods = []
+
+ allow_email = org_flags.get("allow_email", env.auth.email_enabled)
+ allow_social = org_flags.get("allow_social", env.auth.oidc_enabled)
+ allow_sso = org_flags.get("allow_sso", False)
+
+ if allow_email:
+ allowed_methods.append("email:*")
+ if allow_social:
+ allowed_methods.append("social:*")
+ if allow_sso:
+ allowed_methods.append("sso:*")
+
+ # If no methods are allowed, deny access
+ if not allowed_methods:
+ return {
+ "error": "AUTH_UPGRADE_REQUIRED",
+ "message": "No authentication methods are allowed for this organization",
+ "required_methods": [],
+ "current_identities": session_identities,
+ }
+
+ # Check if identities satisfy allowed_methods
+ matches = self._matches_policy(session_identities, allowed_methods)
+
+ if not matches:
+ # If the session used SSO but the org doesn't allow it (or provider inactive),
+ # block and instruct user to re-auth with allowed methods.
+ sso_identity = next(
+ (
+ identity
+ for identity in session_identities
+ if identity.startswith("sso:")
+ ),
+ None,
+ )
+ if sso_identity and self.providers_dao:
+ org_slug = await self._get_organization_slug(organization_id)
+ provider_slug = (
+ sso_identity.split(":")[2]
+ if len(sso_identity.split(":")) > 2
+ else None
+ )
+ providers = await self.providers_dao.list_by_organization(
+ str(organization_id)
+ )
+ active_provider_slugs = {
+ p.slug
+ for p in providers
+ if p.flags and p.flags.get("is_active", False)
+ }
+ sso_matches_org = bool(
+ org_slug and sso_identity.startswith(f"sso:{org_slug}:")
+ )
+ sso_provider_active = bool(
+ provider_slug and provider_slug in active_provider_slugs
+ )
+
+ if not allow_sso or not sso_matches_org or not sso_provider_active:
+ required_methods = []
+ if allow_email:
+ required_methods.append("email:*")
+ if allow_social:
+ required_methods.append("social:*")
+ return {
+ "error": "AUTH_SSO_DENIED",
+ "message": "SSO is denied for this organization",
+ "required_methods": required_methods,
+ "current_identities": session_identities,
+ }
+ sso_providers = []
+ if "sso:*" in allowed_methods:
+ sso_providers = await self._get_active_sso_providers(organization_id)
+ return {
+ "error": "AUTH_UPGRADE_REQUIRED",
+ "message": "Additional authentication required",
+ "required_methods": allowed_methods,
+ "current_identities": session_identities,
+ "sso_providers": sso_providers,
+ }
+
+ # Check domains_only enforcement
+ domains_only = org_flags.get("domains_only", False)
+ if domains_only and self.domains_dao:
+ # Get user's email to check domain
+ user = await db_manager.get_user(str(user_id))
+ if user and user.email:
+ email_domain = user.email.split("@")[-1].lower()
+
+ # Get verified domains for this organization
+ org_domains = await self.domains_dao.list_by_organization(
+ str(organization_id)
+ )
+ verified_domain_slugs = {
+ d.slug.lower()
+ for d in org_domains
+ if d.flags and d.flags.get("is_verified", False)
+ }
+
+ # If user's domain is not in the verified domains, deny access
+ if email_domain not in verified_domain_slugs:
+ return {
+ "error": "AUTH_DOMAIN_DENIED",
+ "message": f"Your email domain '{email_domain}' is not allowed for this organization",
+ "current_domain": email_domain,
+ "allowed_domains": list(verified_domain_slugs),
+ }
+
+ return None
+
+ def _matches_policy(
+ self, identities: List[str], allowed_methods: List[str]
+ ) -> bool:
+ """
+ Check if user's identities satisfy the allowed_methods policy.
+
+ Supports wildcards:
+ - "email:*" matches "email:otp", "email:password"
+ - "social:*" matches "social:google", "social:github"
+ - "sso:*" matches any SSO provider
+ - "sso:acme:*" matches any provider for organization 'acme'
+
+ This is the same logic as middleware.matches_policy().
+ """
+ for identity in identities:
+ for allowed in allowed_methods:
+ # Exact match
+ if identity == allowed:
+ return True
+
+ # Wildcard match
+ if allowed.endswith(":*"):
+ prefix = allowed[:-2] # Remove ":*"
+ if identity.startswith(f"{prefix}:"):
+ return True
+
+ return False
+
+ async def _get_active_sso_providers(
+ self, organization_id: UUID
+ ) -> List[Dict[str, str]]:
+ if not is_ee() or not self.providers_dao:
+ return []
+
+ organization = await db_manager.get_organization_by_id(str(organization_id))
+ if not organization or not organization.slug:
+ return []
+
+ providers = await self.providers_dao.list_by_organization(str(organization_id))
+ results = []
+ for provider in providers:
+ if provider.flags and provider.flags.get("is_active", False):
+ results.append(
+ {
+ "id": str(provider.id),
+ "slug": provider.slug,
+ "third_party_id": f"sso:{organization.slug}:{provider.slug}",
+ }
+ )
+ return results
+
+ async def _get_organization_flags(
+ self, organization_id: UUID
+ ) -> Optional[Dict[str, Any]]:
+ """
+ Get organization flags from organizations table (EE only).
+
+ Returns flags JSONB field or None if organization not found.
+ """
+ if not is_ee():
+ return None
+
+ async with db_manager.engine.core_session() as session:
+ stmt = select(OrganizationDB.flags).where(
+ OrganizationDB.id == organization_id
+ )
+ result = await session.execute(stmt)
+ flags = result.scalar()
+ return flags or {}
+
+ async def _get_organization_slug(self, organization_id: UUID) -> Optional[str]:
+ if not is_ee():
+ return None
+
+ async with db_manager.engine.core_session() as session:
+ stmt = select(OrganizationDB.slug).where(
+ OrganizationDB.id == organization_id
+ )
+ result = await session.execute(stmt)
+ slug = result.scalar()
+ return slug or None
+
+ async def _is_organization_member(
+ self, user_id: UUID, organization_id: UUID
+ ) -> bool:
+ """
+ Check if user is a member of the organization (EE only).
+ """
+ if not is_ee():
+ return False
+
+ async with db_manager.engine.core_session() as session:
+ stmt = select(OrganizationMemberDB).where(
+ OrganizationMemberDB.user_id == user_id,
+ OrganizationMemberDB.organization_id == organization_id,
+ )
+ result = await session.execute(stmt)
+ return result.scalar() is not None
+
+ async def _is_organization_owner(
+ self, user_id: UUID, organization_id: UUID
+ ) -> bool:
+ """
+ Check if user is the owner of the organization (EE only).
+ """
+ if not is_ee():
+ return False
+
+ async with db_manager.engine.core_session() as session:
+ stmt = select(OrganizationMemberDB.role).where(
+ OrganizationMemberDB.user_id == user_id,
+ OrganizationMemberDB.organization_id == organization_id,
+ )
+ result = await session.execute(stmt)
+ role = result.scalar()
+ return role == "owner"
diff --git a/api/oss/src/core/auth/state_store.py b/api/oss/src/core/auth/state_store.py
new file mode 100644
index 0000000000..8d9569be27
--- /dev/null
+++ b/api/oss/src/core/auth/state_store.py
@@ -0,0 +1,50 @@
+"""In-memory state store for OIDC flows. TODO: Move to Redis for production."""
+
+from typing import Dict, Optional
+from datetime import datetime, timedelta
+import asyncio
+
+
+class StateStore:
+ """Simple in-memory state store with expiration."""
+
+ def __init__(self):
+ self._store: Dict[str, Dict] = {}
+ self._expiry: Dict[str, datetime] = {}
+
+ async def set(self, key: str, value: Dict, ttl_seconds: int = 600) -> None:
+ """Store a value with TTL (default 10 minutes)."""
+ self._store[key] = value
+ self._expiry[key] = datetime.utcnow() + timedelta(seconds=ttl_seconds)
+ await self._cleanup_expired()
+
+ async def get(self, key: str) -> Optional[Dict]:
+ """Get a value, return None if expired or not found."""
+ await self._cleanup_expired()
+
+ if key not in self._store:
+ return None
+
+ if key in self._expiry and datetime.utcnow() > self._expiry[key]:
+ del self._store[key]
+ del self._expiry[key]
+ return None
+
+ return self._store[key]
+
+ async def delete(self, key: str) -> None:
+ """Delete a value."""
+ self._store.pop(key, None)
+ self._expiry.pop(key, None)
+
+ async def _cleanup_expired(self) -> None:
+ """Remove expired entries."""
+ now = datetime.utcnow()
+ expired_keys = [k for k, exp in self._expiry.items() if now > exp]
+ for key in expired_keys:
+ self._store.pop(key, None)
+ self._expiry.pop(key, None)
+
+
+# Singleton instance
+state_store = StateStore()
diff --git a/api/oss/src/core/auth/supertokens_config.py b/api/oss/src/core/auth/supertokens_config.py
new file mode 100644
index 0000000000..256e049f89
--- /dev/null
+++ b/api/oss/src/core/auth/supertokens_config.py
@@ -0,0 +1,306 @@
+"""SuperTokens configuration and initialization."""
+
+from typing import Dict, List, Any, Optional
+from supertokens_python import init, InputAppInfo, SupertokensConfig
+from supertokens_python.recipe import (
+ passwordless,
+ session,
+ dashboard,
+ thirdparty,
+)
+from supertokens_python.recipe.passwordless import (
+ ContactEmailOnlyConfig,
+ InputOverrideConfig as PasswordlessInputOverrideConfig,
+)
+from supertokens_python.recipe.thirdparty import (
+ ProviderInput,
+ ProviderConfig,
+ ProviderClientConfig,
+ InputOverrideConfig as ThirdPartyInputOverrideConfig,
+)
+from supertokens_python.recipe.session import (
+ InputOverrideConfig as SessionInputOverrideConfig,
+)
+
+from oss.src.utils.env import env
+from oss.src.core.auth.supertokens_overrides import (
+ override_thirdparty_functions,
+ override_thirdparty_apis,
+ override_passwordless_functions,
+ override_session_functions,
+)
+
+
+def get_supertokens_config() -> Dict[str, Any]:
+ """Get SuperTokens configuration from environment."""
+ return {
+ "connection_uri": env.supertokens.uri_core,
+ "api_key": env.supertokens.api_key,
+ }
+
+
+def get_app_info() -> InputAppInfo:
+ """Get SuperTokens app info."""
+ # Extract domain from full URL (e.g., "http://localhost/api" -> "http://localhost")
+ from urllib.parse import urlparse
+
+ api_parsed = urlparse(env.agenta.api_url)
+ api_domain = f"{api_parsed.scheme}://{api_parsed.netloc}"
+ api_gateway_path = api_parsed.path or "/"
+ # Avoid double /api when app is already mounted under root_path="/api".
+ if api_gateway_path == "/api":
+ api_gateway_path = "/"
+
+ app_info = InputAppInfo(
+ app_name="Agenta",
+ api_domain=api_domain,
+ website_domain=env.agenta.web_url,
+ api_gateway_path=api_gateway_path,
+ api_base_path="/auth",
+ website_base_path="/auth",
+ )
+ print(
+ "[SUPERTOKENS] AppInfo: "
+ f"api_domain={api_domain} api_gateway_path={api_gateway_path} "
+ f"api_base_path=/auth website_domain={env.agenta.web_url}"
+ )
+ return app_info
+
+
+def get_thirdparty_providers() -> List[ProviderInput]:
+ """
+ Get third-party OAuth providers configuration.
+
+ This includes:
+ - Social providers (Google, GitHub, etc.)
+ - Dynamic OIDC providers will be added at runtime via override callbacks
+ """
+ providers = []
+
+ def add_provider(
+ *,
+ provider_id: str,
+ client_id: str,
+ client_secret: str | None,
+ additional_config: Dict[str, Any] | None = None,
+ ) -> None:
+ providers.append(
+ ProviderInput(
+ config=ProviderConfig(
+ third_party_id=provider_id,
+ clients=[
+ ProviderClientConfig(
+ client_id=client_id,
+ client_secret=client_secret,
+ additional_config=additional_config,
+ ),
+ ],
+ )
+ )
+ )
+
+ # Google OAuth
+ if env.auth.google_enabled:
+ assert env.auth.google_oauth_client_id is not None
+ assert env.auth.google_oauth_client_secret is not None
+ add_provider(
+ provider_id="google",
+ client_id=env.auth.google_oauth_client_id,
+ client_secret=env.auth.google_oauth_client_secret,
+ )
+
+ # Google Workspaces OAuth
+ if env.auth.google_workspaces_enabled:
+ assert env.auth.google_workspaces_oauth_client_id is not None
+ assert env.auth.google_workspaces_oauth_client_secret is not None
+ add_provider(
+ provider_id="google-workspaces",
+ client_id=env.auth.google_workspaces_oauth_client_id,
+ client_secret=env.auth.google_workspaces_oauth_client_secret,
+ additional_config={
+ "hd": env.auth.google_workspaces_hd,
+ }
+ if env.auth.google_workspaces_hd
+ else None,
+ )
+
+ # GitHub OAuth
+ if env.auth.github_enabled:
+ assert env.auth.github_oauth_client_id is not None
+ assert env.auth.github_oauth_client_secret is not None
+ add_provider(
+ provider_id="github",
+ client_id=env.auth.github_oauth_client_id,
+ client_secret=env.auth.github_oauth_client_secret,
+ additional_config={"scope": ["user:email"]},
+ )
+
+ # Facebook OAuth
+ if env.auth.facebook_enabled:
+ assert env.auth.facebook_oauth_client_id is not None
+ assert env.auth.facebook_oauth_client_secret is not None
+ add_provider(
+ provider_id="facebook",
+ client_id=env.auth.facebook_oauth_client_id,
+ client_secret=env.auth.facebook_oauth_client_secret,
+ )
+
+ # Apple OAuth
+ if env.auth.apple_enabled:
+ assert env.auth.apple_oauth_client_id is not None
+ additional_config = None
+ if (
+ env.auth.apple_key_id
+ and env.auth.apple_team_id
+ and env.auth.apple_private_key
+ ):
+ additional_config = {
+ "keyId": env.auth.apple_key_id,
+ "teamId": env.auth.apple_team_id,
+ "privateKey": env.auth.apple_private_key,
+ }
+ add_provider(
+ provider_id="apple",
+ client_id=env.auth.apple_oauth_client_id,
+ client_secret=env.auth.apple_oauth_client_secret,
+ additional_config=additional_config,
+ )
+
+ # Discord OAuth
+ if env.auth.discord_enabled:
+ assert env.auth.discord_oauth_client_id is not None
+ assert env.auth.discord_oauth_client_secret is not None
+ add_provider(
+ provider_id="discord",
+ client_id=env.auth.discord_oauth_client_id,
+ client_secret=env.auth.discord_oauth_client_secret,
+ )
+
+ # Twitter OAuth
+ if env.auth.twitter_enabled:
+ assert env.auth.twitter_oauth_client_id is not None
+ assert env.auth.twitter_oauth_client_secret is not None
+ add_provider(
+ provider_id="twitter",
+ client_id=env.auth.twitter_oauth_client_id,
+ client_secret=env.auth.twitter_oauth_client_secret,
+ )
+
+ # GitLab OAuth
+ if env.auth.gitlab_enabled:
+ assert env.auth.gitlab_oauth_client_id is not None
+ assert env.auth.gitlab_oauth_client_secret is not None
+ add_provider(
+ provider_id="gitlab",
+ client_id=env.auth.gitlab_oauth_client_id,
+ client_secret=env.auth.gitlab_oauth_client_secret,
+ additional_config={
+ "gitlabBaseUrl": env.auth.gitlab_base_url,
+ }
+ if env.auth.gitlab_base_url
+ else None,
+ )
+
+ # Bitbucket OAuth
+ if env.auth.bitbucket_enabled:
+ assert env.auth.bitbucket_oauth_client_id is not None
+ assert env.auth.bitbucket_oauth_client_secret is not None
+ add_provider(
+ provider_id="bitbucket",
+ client_id=env.auth.bitbucket_oauth_client_id,
+ client_secret=env.auth.bitbucket_oauth_client_secret,
+ )
+
+ # LinkedIn OAuth
+ if env.auth.linkedin_enabled:
+ assert env.auth.linkedin_oauth_client_id is not None
+ assert env.auth.linkedin_oauth_client_secret is not None
+ add_provider(
+ provider_id="linkedin",
+ client_id=env.auth.linkedin_oauth_client_id,
+ client_secret=env.auth.linkedin_oauth_client_secret,
+ )
+
+ # Okta OAuth
+ if env.auth.okta_enabled:
+ assert env.auth.okta_oauth_client_id is not None
+ assert env.auth.okta_oauth_client_secret is not None
+ assert env.auth.okta_domain is not None
+ add_provider(
+ provider_id="okta",
+ client_id=env.auth.okta_oauth_client_id,
+ client_secret=env.auth.okta_oauth_client_secret,
+ additional_config={
+ "oktaDomain": env.auth.okta_domain,
+ },
+ )
+
+ # Azure AD OAuth
+ if env.auth.azure_ad_enabled:
+ assert env.auth.azure_ad_oauth_client_id is not None
+ assert env.auth.azure_ad_oauth_client_secret is not None
+ assert env.auth.azure_ad_directory_id is not None
+ add_provider(
+ provider_id="azure-ad",
+ client_id=env.auth.azure_ad_oauth_client_id,
+ client_secret=env.auth.azure_ad_oauth_client_secret,
+ additional_config={
+ "directoryId": env.auth.azure_ad_directory_id,
+ },
+ )
+
+ # BoxySAML OAuth
+ if env.auth.boxy_saml_enabled:
+ assert env.auth.boxy_saml_oauth_client_id is not None
+ assert env.auth.boxy_saml_oauth_client_secret is not None
+ assert env.auth.boxy_saml_url is not None
+ add_provider(
+ provider_id="boxy-saml",
+ client_id=env.auth.boxy_saml_oauth_client_id,
+ client_secret=env.auth.boxy_saml_oauth_client_secret,
+ additional_config={
+ "boxyURL": env.auth.boxy_saml_url,
+ },
+ )
+
+ return providers
+
+
+def init_supertokens():
+ """Initialize SuperTokens with recipes and dynamic provider support."""
+
+ init(
+ supertokens_config=SupertokensConfig(**get_supertokens_config()),
+ app_info=get_app_info(),
+ framework="fastapi",
+ recipe_list=[
+ # Email OTP (passwordless)
+ passwordless.init(
+ contact_config=ContactEmailOnlyConfig(),
+ flow_type="USER_INPUT_CODE_AND_MAGIC_LINK",
+ override=PasswordlessInputOverrideConfig(
+ functions=override_passwordless_functions,
+ ),
+ ),
+ # Third-party OAuth (social + dynamic OIDC)
+ thirdparty.init(
+ sign_in_and_up_feature=thirdparty.SignInAndUpFeature(
+ providers=get_thirdparty_providers()
+ ),
+ override=ThirdPartyInputOverrideConfig(
+ functions=override_thirdparty_functions,
+ apis=override_thirdparty_apis,
+ ),
+ ),
+ # Session management with custom identities payload
+ session.init(
+ get_token_transfer_method=lambda _, __, ___: "cookie",
+ override=SessionInputOverrideConfig(
+ functions=override_session_functions,
+ ),
+ ),
+ # SuperTokens dashboard
+ dashboard.init(),
+ ],
+ mode="asgi",
+ )
diff --git a/api/oss/src/core/auth/supertokens_overrides.py b/api/oss/src/core/auth/supertokens_overrides.py
new file mode 100644
index 0000000000..f373756f8c
--- /dev/null
+++ b/api/oss/src/core/auth/supertokens_overrides.py
@@ -0,0 +1,762 @@
+"""SuperTokens override functions for dynamic OIDC providers and custom session handling."""
+
+from typing import Dict, Any, List, Optional, Union
+from uuid import UUID
+
+from oss.src.utils.logging import get_module_logger
+
+from supertokens_python.recipe.thirdparty.provider import (
+ Provider,
+ ProviderInput,
+ ProviderConfig,
+ ProviderClientConfig,
+)
+from supertokens_python.recipe.thirdparty.interfaces import (
+ RecipeInterface as ThirdPartyRecipeInterface,
+ APIInterface as ThirdPartyAPIInterface,
+ SignInUpOkResult,
+)
+from supertokens_python.recipe.passwordless.interfaces import (
+ RecipeInterface as PasswordlessRecipeInterface,
+ ConsumeCodeOkResult,
+)
+from supertokens_python.recipe.emailpassword.interfaces import (
+ RecipeInterface as EmailPasswordRecipeInterface,
+ SignInOkResult as EmailPasswordSignInOkResult,
+ SignUpOkResult as EmailPasswordSignUpOkResult,
+)
+from supertokens_python.recipe.session.interfaces import (
+ RecipeInterface as SessionRecipeInterface,
+)
+from supertokens_python.types import User, RecipeUserId
+
+from oss.src.utils.common import is_ee
+from oss.src.dbs.postgres.users.dao import IdentitiesDAO
+from oss.src.core.users.types import UserIdentityCreate
+from oss.src.services import db_manager
+from oss.src.core.auth.service import AuthService
+
+log = get_module_logger(__name__)
+
+# DAOs for accessing user identities (always available)
+identities_dao = IdentitiesDAO()
+
+# Organization providers DAO (EE only)
+if is_ee():
+ from ee.src.dbs.postgres.organizations.dao import OrganizationProvidersDAO
+ from oss.src.core.secrets.services import VaultService
+ from oss.src.dbs.postgres.secrets.dao import SecretsDAO
+
+ providers_dao = OrganizationProvidersDAO()
+else:
+ providers_dao = None
+
+# Auth service for domain policy enforcement
+auth_service = AuthService()
+
+
+def _merge_session_identities(
+ session: Optional[Any], method: Optional[str]
+) -> List[str]:
+ session_identities: List[str] = []
+ if session is not None:
+ try:
+ payload = session.get_access_token_payload()
+ session_identities = payload.get("session_identities") or []
+ except Exception:
+ session_identities = []
+ if method:
+ if method not in session_identities:
+ session_identities = list(session_identities) + [method]
+ return session_identities or ([method] if method else [])
+
+
+async def get_dynamic_oidc_provider(third_party_id: str) -> Optional[ProviderInput]:
+ """
+ Fetch dynamic OIDC provider configuration from database (EE only).
+
+ third_party_id format: "sso:{organization_slug}:{provider_slug}"
+ """
+ # OIDC providers require EE
+ if not is_ee() or providers_dao is None:
+ log.debug(f"SSO provider {third_party_id} requested but EE not enabled")
+ return None
+
+ try:
+ # Parse third_party_id: "sso:{organization_slug}:{provider_slug}"
+ if not third_party_id.startswith("sso:"):
+ return None
+
+ parts = third_party_id.split(":", 2)
+ if len(parts) != 3:
+ return None
+
+ _, organization_slug, provider_slug = parts
+
+ from oss.src.services import db_manager
+
+ organization = await db_manager.get_organization_by_slug(organization_slug)
+ if not organization:
+ return None
+
+ # Fetch provider from database by organization_id and provider_slug
+ provider = await providers_dao.get_by_slug(provider_slug, str(organization.id))
+ if not provider or not (provider.flags and provider.flags.get("is_active")):
+ return None
+
+ # Extract OIDC config
+ vault_service = VaultService(SecretsDAO())
+ secret = await vault_service.get_secret(
+ secret_id=provider.secret_id,
+ organization_id=organization.id,
+ )
+ if not secret:
+ log.debug(f"Secret not found for provider id={provider.id}")
+ return None
+
+ data = secret.data
+ provider_settings = None
+ if hasattr(data, "provider"):
+ provider_settings = data.provider.model_dump()
+ elif isinstance(data, dict):
+ provider_settings = data.get("provider")
+
+ if not isinstance(provider_settings, dict):
+ log.debug(f"Invalid provider secret format for provider id={provider.id}")
+ return None
+
+ issuer_url = provider_settings.get("issuer_url")
+ client_id = provider_settings.get("client_id")
+ client_secret = provider_settings.get("client_secret")
+ scopes = provider_settings.get("scopes", ["openid", "profile", "email"])
+
+ if not issuer_url or not client_id or not client_secret:
+ return None
+
+ # Build ProviderInput for SuperTokens
+ return ProviderInput(
+ config=ProviderConfig(
+ third_party_id=third_party_id,
+ clients=[
+ ProviderClientConfig(
+ client_id=client_id,
+ client_secret=client_secret,
+ scope=scopes,
+ )
+ ],
+ oidc_discovery_endpoint=f"{issuer_url}/.well-known/openid-configuration",
+ )
+ )
+ except Exception as e:
+ # Log error but don't crash
+ log.debug(f"Error fetching dynamic OIDC provider {third_party_id}: {e}")
+ return None
+
+
+def override_thirdparty_functions(
+ original_implementation: ThirdPartyRecipeInterface,
+) -> ThirdPartyRecipeInterface:
+ """Override third-party recipe functions to support dynamic providers."""
+
+ original_sign_in_up = original_implementation.sign_in_up
+ original_get_provider = original_implementation.get_provider
+
+ async def sign_in_up(
+ third_party_id: str,
+ third_party_user_id: str,
+ email: str,
+ is_verified: bool,
+ oauth_tokens: Dict[str, Any],
+ raw_user_info_from_provider: Dict[str, Any],
+ session: Optional[Any],
+ should_try_linking_with_session_user: Optional[bool],
+ tenant_id: str,
+ user_context: Dict[str, Any],
+ ) -> SignInUpOkResult:
+ """
+ Override sign_in_up to:
+ 1. Create user_identity record after successful authentication
+ 2. Populate session with user_identities array
+ """
+ internal_user = None
+ # Call original implementation
+ result = await original_sign_in_up(
+ third_party_id=third_party_id,
+ third_party_user_id=third_party_user_id,
+ email=email,
+ is_verified=is_verified,
+ oauth_tokens=oauth_tokens,
+ raw_user_info_from_provider=raw_user_info_from_provider,
+ session=session,
+ should_try_linking_with_session_user=should_try_linking_with_session_user,
+ tenant_id=tenant_id,
+ user_context=user_context,
+ )
+
+ # Determine method string based on third_party_id
+ if third_party_id.startswith("sso:"):
+ # Format: sso:{organization_slug}:{provider_slug}
+ method = third_party_id
+ elif third_party_id == "google":
+ method = "social:google"
+ elif third_party_id == "github":
+ method = "social:github"
+ else:
+ method = f"social:{third_party_id}"
+
+ log.debug(
+ f"[AUTH-IDENTITY] third_party_id={third_party_id} method={method} email={email}"
+ )
+
+ # Extract domain from email
+ domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None
+
+ # Create or update user_identity
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ # Get internal user ID from database (not SuperTokens ID)
+ internal_user = await get_user_with_email(email)
+ if not internal_user:
+ raise Exception(f"User not found for email {email}")
+
+ internal_user_id = internal_user.id
+
+ # Check if identity already exists
+ existing = await identities_dao.get_by_method_subject(
+ method=method,
+ subject=third_party_user_id,
+ )
+
+ if not existing:
+ # Create new identity
+ await identities_dao.create(
+ UserIdentityCreate(
+ user_id=internal_user_id,
+ method=method,
+ subject=third_party_user_id,
+ domain=domain,
+ )
+ )
+ log.debug(
+ "[AUTH-IDENTITY] created",
+ {
+ "user_id": str(internal_user_id),
+ "method": method,
+ "subject": third_party_user_id,
+ },
+ )
+ else:
+ log.debug(
+ "[AUTH-IDENTITY] existing",
+ {
+ "user_id": str(internal_user_id),
+ "method": method,
+ "subject": third_party_user_id,
+ },
+ )
+ except Exception:
+ # Log error but don't block authentication
+ log.debug("[AUTH-IDENTITY] create failed", exc_info=True)
+
+ # Fetch all user identities for session payload
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ internal_user = await get_user_with_email(email)
+ if internal_user:
+ all_identities = await identities_dao.list_by_user(internal_user.id)
+ identities_array = [identity.method for identity in all_identities]
+ else:
+ identities_array = [method]
+ except Exception:
+ identities_array = [method] # Fallback to current method only
+
+ # Store identity context for session creation
+ # user_identities = all known methods for user
+ # session_identities = methods verified in this session (accumulated)
+ user_context["user_identities"] = identities_array
+ session_identities = _merge_session_identities(session, method)
+ user_context["session_identities"] = session_identities
+ log.debug(
+ "[AUTH-IDENTITY] session_identities merge",
+ {
+ "method": method,
+ "session_identities": session_identities,
+ "user_identities": identities_array,
+ },
+ )
+ log.debug(
+ "[AUTH-IDENTITY] session user_identities",
+ {
+ "user_id": str(internal_user.id) if internal_user else None,
+ "user_identities": identities_array,
+ },
+ )
+
+ # Enforce domain-based policies (auto-join, domains-only)
+ if internal_user:
+ try:
+ await auth_service.enforce_domain_policies(
+ email=email,
+ user_id=internal_user.id,
+ )
+ except Exception as e:
+ log.debug(f"Error enforcing domain policies: {e}")
+
+ return result
+
+ original_implementation.sign_in_up = sign_in_up
+
+ async def get_provider(
+ third_party_id: str,
+ client_type: Optional[str],
+ tenant_id: str,
+ user_context: Dict[str, Any],
+ ):
+ provider = await original_get_provider(
+ third_party_id=third_party_id,
+ client_type=client_type,
+ tenant_id=tenant_id,
+ user_context=user_context,
+ )
+ if provider is not None:
+ return provider
+
+ if not third_party_id.startswith("sso:"):
+ return None
+
+ provider_input = await get_dynamic_oidc_provider(third_party_id)
+ if provider_input is None:
+ return None
+
+ from supertokens_python.recipe.thirdparty.recipe_implementation import (
+ find_and_create_provider_instance,
+ )
+
+ return await find_and_create_provider_instance(
+ [provider_input],
+ third_party_id,
+ client_type,
+ user_context,
+ )
+
+ original_implementation.get_provider = get_provider
+ return original_implementation
+
+
+def override_thirdparty_apis(
+ original_implementation: ThirdPartyAPIInterface,
+) -> ThirdPartyAPIInterface:
+ """Override third-party API interface if needed."""
+ # For now, no API overrides needed
+ return original_implementation
+
+
+def override_session_functions(
+ original_implementation: SessionRecipeInterface,
+) -> SessionRecipeInterface:
+ """Override session functions to include identities in payload."""
+
+ original_create_new_session = original_implementation.create_new_session
+
+ async def create_new_session(
+ user_id: str,
+ recipe_user_id: RecipeUserId,
+ access_token_payload: Optional[Dict[str, Any]],
+ session_data_in_database: Optional[Dict[str, Any]],
+ disable_anti_csrf: Optional[bool],
+ tenant_id: str,
+ user_context: Dict[str, Any],
+ ):
+ """
+ Override create_new_session to inject user_identities array into access token payload.
+ """
+ # Get identity context from user_context (populated by auth overrides)
+ user_identities = user_context.get("user_identities", [])
+ session_identities = user_context.get("session_identities", user_identities)
+
+ # Merge with existing payload
+ if access_token_payload is None:
+ access_token_payload = {}
+
+ access_token_payload["user_identities"] = user_identities
+ access_token_payload["session_identities"] = session_identities
+
+ # Call original implementation
+ result = await original_create_new_session(
+ user_id=user_id,
+ recipe_user_id=recipe_user_id,
+ access_token_payload=access_token_payload,
+ session_data_in_database=session_data_in_database,
+ disable_anti_csrf=disable_anti_csrf,
+ tenant_id=tenant_id,
+ user_context=user_context,
+ )
+
+ return result
+
+ original_implementation.create_new_session = create_new_session
+ return original_implementation
+
+
+def override_passwordless_functions(
+ original_implementation: PasswordlessRecipeInterface,
+) -> PasswordlessRecipeInterface:
+ """Override passwordless recipe functions to track email:otp identity."""
+
+ original_consume_code = original_implementation.consume_code
+
+ async def consume_code(
+ pre_auth_session_id: str,
+ user_input_code: Optional[str],
+ device_id: Optional[str],
+ link_code: Optional[str],
+ session: Optional[Any],
+ should_try_linking_with_session_user: Optional[bool],
+ tenant_id: str,
+ user_context: Dict[str, Any],
+ ) -> Union[ConsumeCodeOkResult, Any]:
+ """
+ Override consume_code to:
+ 1. Create user_identity record for email:otp after successful login
+ 2. Populate session with user_identities array
+ """
+ # Call original implementation
+ result = await original_consume_code(
+ pre_auth_session_id=pre_auth_session_id,
+ user_input_code=user_input_code,
+ device_id=device_id,
+ link_code=link_code,
+ session=session,
+ should_try_linking_with_session_user=should_try_linking_with_session_user,
+ tenant_id=tenant_id,
+ user_context=user_context,
+ )
+
+ # Only process if successful
+ if not isinstance(result, ConsumeCodeOkResult):
+ return result
+
+ # Determine method and subject
+ method = "email:otp"
+ user_id_str = result.user.id
+ email = result.user.emails[0] if result.user.emails else None
+
+ if not email:
+ # Can't create identity without email
+ user_context["user_identities"] = [method]
+ session_identities = _merge_session_identities(session, method)
+ user_context["session_identities"] = session_identities
+ log.debug(
+ "[AUTH-IDENTITY] session_identities merge",
+ {
+ "method": method,
+ "session_identities": session_identities,
+ "user_identities": [method],
+ },
+ )
+ return result
+
+ # Extract domain from email
+ domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None
+
+ # Create or update user_identity
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ # Get internal user ID from database (not SuperTokens ID)
+ internal_user = await get_user_with_email(email)
+ if not internal_user:
+ raise Exception(f"User not found for email {email}")
+
+ internal_user_id = internal_user.id
+
+ # Check if identity already exists
+ existing = await identities_dao.get_by_method_subject(
+ method=method,
+ subject=email, # For email:otp, subject is the email
+ )
+
+ if not existing:
+ # Create new identity
+ await identities_dao.create(
+ UserIdentityCreate(
+ user_id=internal_user_id,
+ method=method,
+ subject=email,
+ domain=domain,
+ # created_by_id is optional, leaving it as None
+ )
+ )
+ except Exception:
+ # Log error but don't block authentication
+ pass
+
+ # Fetch all user identities for session payload
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ internal_user = await get_user_with_email(email)
+ if internal_user:
+ all_identities = await identities_dao.list_by_user(internal_user.id)
+ identities_array = [identity.method for identity in all_identities]
+ else:
+ identities_array = [method]
+ except Exception:
+ identities_array = [method] # Fallback to current method only
+
+ # Store identity context for session creation
+ # user_identities = all known methods for user
+ # session_identities = methods verified in this session (accumulated)
+ user_context["user_identities"] = identities_array
+ session_identities = _merge_session_identities(session, method)
+ user_context["session_identities"] = session_identities
+ log.debug(
+ "[AUTH-IDENTITY] session_identities merge",
+ {
+ "method": method,
+ "session_identities": session_identities,
+ "user_identities": identities_array,
+ },
+ )
+
+ # Enforce domain-based policies (auto-join, domains-only)
+ if internal_user:
+ try:
+ await auth_service.enforce_domain_policies(
+ email=email,
+ user_id=internal_user.id,
+ )
+ except Exception as e:
+ log.debug(f"Error enforcing domain policies: {e}")
+
+ return result
+
+ original_implementation.consume_code = consume_code
+ return original_implementation
+
+
+def override_emailpassword_functions(
+ original_implementation: EmailPasswordRecipeInterface,
+) -> EmailPasswordRecipeInterface:
+ """Override email/password recipe functions to track email:password identity."""
+
+ original_sign_in = original_implementation.sign_in
+ original_sign_up = original_implementation.sign_up
+
+ async def sign_in(
+ email: str,
+ password: str,
+ tenant_id: str,
+ session: Optional[Any],
+ should_try_linking_with_session_user: Optional[bool],
+ user_context: Dict[str, Any],
+ ) -> Union[EmailPasswordSignInOkResult, Any]:
+ """
+ Override sign_in to:
+ 1. Create user_identity record for email:password after successful login
+ 2. Populate session with user_identities array
+ """
+
+ # Call original implementation
+ result = await original_sign_in(
+ email=email,
+ password=password,
+ tenant_id=tenant_id,
+ session=session,
+ should_try_linking_with_session_user=should_try_linking_with_session_user,
+ user_context=user_context,
+ )
+
+ # Only process if successful
+ if not isinstance(result, EmailPasswordSignInOkResult):
+ return result
+
+ # Method for email/password
+ method = "email:password"
+
+ # Extract domain from email
+ domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None
+
+ # Create or update user_identity
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ # Get internal user ID from database (not SuperTokens ID)
+ internal_user = await get_user_with_email(email)
+ if not internal_user:
+ raise Exception(f"User not found for email {email}")
+
+ internal_user_id = internal_user.id
+
+ # Check if identity already exists
+ existing = await identities_dao.get_by_method_subject(
+ method=method,
+ subject=email, # For email:password, subject is the email
+ )
+
+ if not existing:
+ # Create new identity
+ await identities_dao.create(
+ UserIdentityCreate(
+ user_id=internal_user_id,
+ method=method,
+ subject=email,
+ domain=domain,
+ # created_by_id is optional, leaving it as None
+ )
+ )
+ except Exception:
+ # Log error but don't block authentication
+ pass
+
+ # Fetch all user identities for session payload
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ internal_user = await get_user_with_email(email)
+ if internal_user:
+ all_identities = await identities_dao.list_by_user(internal_user.id)
+ identities_array = [identity.method for identity in all_identities]
+ else:
+ identities_array = [method]
+ except Exception:
+ identities_array = [method] # Fallback to current method only
+
+ # Store identity context for session creation
+ # user_identities = all known methods for user
+ # session_identities = methods verified in this session (accumulated)
+ user_context["user_identities"] = identities_array
+ session_identities = _merge_session_identities(session, method)
+ user_context["session_identities"] = session_identities
+ log.debug(
+ "[AUTH-IDENTITY] session_identities merge",
+ {
+ "method": method,
+ "session_identities": session_identities,
+ "user_identities": identities_array,
+ },
+ )
+
+ # Enforce domain-based policies (auto-join, domains-only)
+ if internal_user:
+ try:
+ await auth_service.enforce_domain_policies(
+ email=email,
+ user_id=internal_user.id,
+ )
+ except Exception as e:
+ log.debug(f"Error enforcing domain policies: {e}")
+
+ return result
+
+ async def sign_up(
+ email: str,
+ password: str,
+ tenant_id: str,
+ session: Optional[Any],
+ should_try_linking_with_session_user: Optional[bool],
+ user_context: Dict[str, Any],
+ ) -> Union[EmailPasswordSignUpOkResult, Any]:
+ """
+ Override sign_up to:
+ 1. Create user_identity record for email:password after successful signup
+ 2. Populate session with user_identities array
+ """
+
+ # Call original implementation
+ result = await original_sign_up(
+ email=email,
+ password=password,
+ tenant_id=tenant_id,
+ session=session,
+ should_try_linking_with_session_user=should_try_linking_with_session_user,
+ user_context=user_context,
+ )
+
+ # Only process if successful
+ if not isinstance(result, EmailPasswordSignUpOkResult):
+ return result
+
+ # Method for email/password
+ method = "email:password"
+
+ # Extract domain from email
+ domain = email.split("@")[1] if "@" in email and email.count("@") == 1 else None
+
+ # Create or update user_identity
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ # Get internal user ID from database (not SuperTokens ID)
+ internal_user = await get_user_with_email(email)
+ if not internal_user:
+ raise Exception(f"User not found for email {email}")
+
+ internal_user_id = internal_user.id
+
+ # Check if identity already exists
+ existing = await identities_dao.get_by_method_subject(
+ method=method,
+ subject=email, # For email:password, subject is the email
+ )
+
+ if not existing:
+ # Create new identity
+ await identities_dao.create(
+ UserIdentityCreate(
+ user_id=internal_user_id,
+ method=method,
+ subject=email,
+ domain=domain,
+ # created_by_id is optional, leaving it as None
+ )
+ )
+ except Exception:
+ # Log error but don't block authentication
+ pass
+
+ # Fetch all user identities for session payload
+ try:
+ from oss.src.services.db_manager import get_user_with_email
+
+ internal_user = await get_user_with_email(email)
+ if internal_user:
+ all_identities = await identities_dao.list_by_user(internal_user.id)
+ identities_array = [identity.method for identity in all_identities]
+ else:
+ identities_array = [method]
+ except Exception:
+ identities_array = [method] # Fallback to current method only
+
+ # Store identity context for session creation
+ # user_identities = all known methods for user
+ # session_identities = methods verified in this session (accumulated)
+ user_context["user_identities"] = identities_array
+ session_identities = _merge_session_identities(session, method)
+ user_context["session_identities"] = session_identities
+ log.debug(
+ "[AUTH-IDENTITY] session_identities merge",
+ {
+ "method": method,
+ "session_identities": session_identities,
+ "user_identities": identities_array,
+ },
+ )
+
+ # Enforce domain-based policies (auto-join, domains-only)
+ if internal_user:
+ try:
+ await auth_service.enforce_domain_policies(
+ email=email,
+ user_id=internal_user.id,
+ )
+ except Exception as e:
+ log.debug(f"Error enforcing domain policies: {e}")
+
+ return result
+
+ original_implementation.sign_in = sign_in
+ original_implementation.sign_up = sign_up
+ return original_implementation
diff --git a/api/oss/src/core/auth/types.py b/api/oss/src/core/auth/types.py
new file mode 100644
index 0000000000..68ce3f6381
--- /dev/null
+++ b/api/oss/src/core/auth/types.py
@@ -0,0 +1,82 @@
+"""Core authentication types (OSS)."""
+
+from enum import Enum
+
+
+class MethodKind(str, Enum):
+ """
+ Valid authentication method patterns.
+
+ Supports exact matches and wildcards:
+ - email:otp - Email OTP authentication
+ - email:password - Email/password authentication (future)
+ - email:* - Any email-based authentication
+ - social:google - Google OAuth
+ - social:github - GitHub OAuth
+ - social:* - Any social provider
+ - sso:{organization_slug}:{provider_slug} - Specific SSO provider for organization
+ - sso:{organization_slug}:* - Any SSO provider for organization
+ - sso:* - Any SSO provider (any organization)
+ """
+
+ EMAIL_OTP = "email:otp"
+ EMAIL_PASSWORD = "email:password"
+ EMAIL_WILDCARD = "email:*"
+ SOCIAL_GOOGLE = "social:google"
+ SOCIAL_GITHUB = "social:github"
+ SOCIAL_WILDCARD = "social:*"
+ SSO_WILDCARD = "sso:*"
+
+ @classmethod
+ def is_valid_pattern(cls, pattern: str) -> bool:
+ """
+ Check if a pattern is a valid method kind.
+
+ Allows:
+ - Exact enum values
+ - SSO patterns: sso:{organization_slug}:{provider_slug} or sso:{organization_slug}:*
+ """
+ # Check if it's a known enum value
+ if pattern in cls._value2member_map_:
+ return True
+
+ # Check SSO patterns
+ if pattern.startswith("sso:"):
+ parts = pattern.split(":")
+ if len(parts) == 3:
+ organization_slug, provider = parts[1], parts[2]
+ # Validate organization_slug is not empty
+ if organization_slug and (provider == "*" or provider):
+ return True
+
+ return False
+
+ @classmethod
+ def matches_pattern(cls, identity: str, allowed_pattern: str) -> bool:
+ """
+ Check if an identity matches an allowed pattern.
+
+ Args:
+ identity: Authentication method (e.g., "email:otp", "social:google")
+ allowed_pattern: Pattern to match against (supports wildcards)
+
+ Returns:
+ True if identity matches the pattern
+
+ Examples:
+ matches_pattern("email:otp", "email:*") → True
+ matches_pattern("social:google", "social:*") → True
+ matches_pattern("sso:acme:okta", "sso:acme:*") → True
+ matches_pattern("email:otp", "sso:*") → False
+ """
+ # Exact match
+ if identity == allowed_pattern:
+ return True
+
+ # Wildcard match
+ if allowed_pattern.endswith(":*"):
+ prefix = allowed_pattern[:-2] # Remove ":*"
+ if identity.startswith(f"{prefix}:"):
+ return True
+
+ return False
diff --git a/api/oss/src/core/evaluations/tasks/batch.py b/api/oss/src/core/evaluations/tasks/batch.py
index 99bff2e6fd..1556bce18e 100644
--- a/api/oss/src/core/evaluations/tasks/batch.py
+++ b/api/oss/src/core/evaluations/tasks/batch.py
@@ -157,8 +157,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
diff --git a/api/oss/src/core/evaluations/tasks/legacy.py b/api/oss/src/core/evaluations/tasks/legacy.py
index 9d7e86ac25..ca1e937623 100644
--- a/api/oss/src/core/evaluations/tasks/legacy.py
+++ b/api/oss/src/core/evaluations/tasks/legacy.py
@@ -466,7 +466,7 @@ async def setup_evaluation(
references={
"testset": testset_references["artifact"],
# "testset_variant":
- # "testset_revision":
+ "testset_revision": testset_references["revision"],
},
)
if testset and testset.id
diff --git a/api/oss/src/core/evaluations/tasks/live.py b/api/oss/src/core/evaluations/tasks/live.py
index b22e963824..9006f393ad 100644
--- a/api/oss/src/core/evaluations/tasks/live.py
+++ b/api/oss/src/core/evaluations/tasks/live.py
@@ -139,8 +139,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
diff --git a/api/oss/src/core/evaluators/service.py b/api/oss/src/core/evaluators/service.py
index b0a76728f9..c8ef569393 100644
--- a/api/oss/src/core/evaluators/service.py
+++ b/api/oss/src/core/evaluators/service.py
@@ -1,13 +1,9 @@
from typing import Optional, List
from uuid import UUID, uuid4
-from json import loads
from oss.src.utils.helpers import get_slug_from_name_and_id
from oss.src.services.db_manager import fetch_evaluator_config
from oss.src.core.workflows.dtos import (
- WorkflowFlags,
- WorkflowQueryFlags,
- #
WorkflowCreate,
WorkflowEdit,
WorkflowQuery,
@@ -17,8 +13,6 @@
WorkflowVariantEdit,
WorkflowVariantQuery,
#
- WorkflowRevisionData,
- #
WorkflowRevisionCreate,
WorkflowRevisionEdit,
WorkflowRevisionCommit,
@@ -35,11 +29,7 @@
SimpleEvaluatorEdit,
SimpleEvaluatorQuery,
SimpleEvaluatorFlags,
- SimpleEvaluatorQueryFlags,
- #
EvaluatorFlags,
- EvaluatorQueryFlags,
- #
Evaluator,
EvaluatorQuery,
EvaluatorRevisionsLog,
@@ -1435,11 +1425,33 @@ def _transfer_evaluator_revision_data(
else None
)
headers = None
+ # TODO: This function reconstructs output schemas from old evaluator settings.
+ # When fully migrating to the new workflow-based evaluator system, the output
+ # schema should be stored directly in the evaluator revision (workflow revision)
+ # at configuration time, rather than being inferred from settings here.
+ # For evaluators with dynamic outputs (auto_ai_critique, json_multi_field_match),
+ # the frontend/API should build and save the complete output schema when the
+ # user configures the evaluator.
outputs_schema = None
if str(old_evaluator.evaluator_key) == "auto_ai_critique":
json_schema = old_evaluator.settings_values.get("json_schema", None)
if json_schema and isinstance(json_schema, dict):
outputs_schema = json_schema.get("schema", None)
+ # Handle json_multi_field_match with dynamic field-based properties
+ if str(old_evaluator.evaluator_key) == "json_multi_field_match":
+ # Build dynamic properties based on configured fields
+ fields = old_evaluator.settings_values.get("fields", [])
+ properties = {"aggregate_score": {"type": "number"}}
+ for field in fields:
+ # Each field becomes a numeric score (0 or 1)
+ properties[field] = {"type": "number"}
+ outputs_schema = {
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "type": "object",
+ "properties": properties,
+ "required": ["aggregate_score"],
+ "additionalProperties": False,
+ }
if not outputs_schema:
properties = (
{"score": {"type": "number"}, "success": {"type": "boolean"}}
diff --git a/api/oss/src/core/git/dtos.py b/api/oss/src/core/git/dtos.py
index ff49f5ba89..9ebd98863c 100644
--- a/api/oss/src/core/git/dtos.py
+++ b/api/oss/src/core/git/dtos.py
@@ -90,6 +90,7 @@ class RevisionCommit(Slug, Header, Metadata):
artifact_id: Optional[UUID] = None
variant_id: Optional[UUID] = None
+ revision_id: Optional[UUID] = None
class RevisionsLog(BaseModel):
diff --git a/api/oss/src/core/organizations/__init__.py b/api/oss/src/core/organizations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/core/organizations/types.py b/api/oss/src/core/organizations/types.py
new file mode 100644
index 0000000000..a7b1683051
--- /dev/null
+++ b/api/oss/src/core/organizations/types.py
@@ -0,0 +1,57 @@
+"""Core authentication method types (OSS)."""
+
+from enum import Enum
+
+
+# ============================================================================
+# AUTHENTICATION METHOD KINDS
+# ============================================================================
+
+
+class MethodKind(str, Enum):
+ """
+ Valid authentication method patterns for organization policies.
+
+ Supports exact matches and wildcards:
+ - email:otp - Email OTP authentication
+ - email:password - Email/password authentication (future)
+ - email:* - Any email-based authentication
+ - social:google - Google OAuth
+ - social:github - GitHub OAuth
+ - social:* - Any social provider
+ - sso:{organization_slug}:{provider_slug} - Specific SSO provider for organization
+ - sso:{organization_slug}:* - Any SSO provider for organization
+ - sso:* - Any SSO provider (any organization)
+ """
+
+ EMAIL_OTP = "email:otp"
+ EMAIL_PASSWORD = "email:password"
+ EMAIL_WILDCARD = "email:*"
+ SOCIAL_GOOGLE = "social:google"
+ SOCIAL_GITHUB = "social:github"
+ SOCIAL_WILDCARD = "social:*"
+ SSO_WILDCARD = "sso:*"
+
+ @classmethod
+ def is_valid_pattern(cls, pattern: str) -> bool:
+ """
+ Check if a pattern is a valid method kind.
+
+ Allows:
+ - Exact enum values
+ - SSO patterns: sso:{organization_slug}:{provider_slug} or sso:{organization_slug}:*
+ """
+ # Check if it's a known enum value
+ if pattern in cls._value2member_map_:
+ return True
+
+ # Check SSO patterns
+ if pattern.startswith("sso:"):
+ parts = pattern.split(":")
+ if len(parts) == 3:
+ organization_slug, provider = parts[1], parts[2]
+ # Validate organization_slug is not empty
+ if organization_slug and (provider == "*" or provider):
+ return True
+
+ return False
diff --git a/api/oss/src/core/secrets/dtos.py b/api/oss/src/core/secrets/dtos.py
index 7286701488..86825cb1fd 100644
--- a/api/oss/src/core/secrets/dtos.py
+++ b/api/oss/src/core/secrets/dtos.py
@@ -1,6 +1,6 @@
from typing import Optional, Union, List, Dict, Any
-from pydantic import BaseModel, field_validator, model_validator
+from pydantic import BaseModel, Field, field_validator, model_validator
from oss.src.core.secrets.enums import (
SecretKind,
@@ -45,14 +45,31 @@ class CustomProviderDTO(BaseModel):
model_keys: Optional[List[str]] = None
+class SSOProviderSettingsDTO(BaseModel):
+ client_id: str
+ client_secret: str
+ issuer_url: str
+ scopes: List[str]
+ extra: Dict[str, Any] = Field(default_factory=dict)
+
+
+class SSOProviderDTO(BaseModel):
+ provider: SSOProviderSettingsDTO
+
+
class SecretDTO(BaseModel):
kind: SecretKind
- data: Union[StandardProviderDTO, CustomProviderDTO]
+ data: Union[StandardProviderDTO, CustomProviderDTO, SSOProviderDTO]
@model_validator(mode="before")
def validate_secret_data_based_on_kind(cls, values: Dict[str, Any]):
kind = values.get("kind")
+ if isinstance(kind, SecretKind):
+ kind = kind.value
data = values.get("data", {})
+ if isinstance(data, BaseModel):
+ data = data.model_dump()
+ values["data"] = data
if kind == SecretKind.PROVIDER_KEY.value:
if not isinstance(data, dict):
@@ -82,6 +99,21 @@ def validate_secret_data_based_on_kind(cls, values: Dict[str, Any]):
raise ValueError(
"The provided kind in data is not a valid CustomProviderKind enum"
)
+ elif kind == SecretKind.SSO_PROVIDER.value:
+ if not isinstance(data, dict):
+ raise ValueError(
+ "The provided request secret dto is not a valid type for SSOProviderDTO"
+ )
+ provider = data.get("provider")
+ if not isinstance(provider, dict):
+ raise ValueError(
+ "The provided request secret dto is missing required fields for SSOProviderSettingsDTO"
+ )
+ required_fields = {"client_id", "client_secret", "issuer_url", "scopes"}
+ if not required_fields.issubset(provider.keys()):
+ raise ValueError(
+ "The provided request secret dto is missing required fields for SSOProviderSettingsDTO"
+ )
else:
raise ValueError("The provided kind is not a valid SecretKind enum")
diff --git a/api/oss/src/core/secrets/enums.py b/api/oss/src/core/secrets/enums.py
index ee7b24751a..695260ffa2 100644
--- a/api/oss/src/core/secrets/enums.py
+++ b/api/oss/src/core/secrets/enums.py
@@ -4,6 +4,7 @@
class SecretKind(str, Enum):
PROVIDER_KEY = "provider_key"
CUSTOM_PROVIDER = "custom_provider"
+ SSO_PROVIDER = "sso_provider"
class StandardProviderKind(str, Enum):
diff --git a/api/oss/src/core/secrets/interfaces.py b/api/oss/src/core/secrets/interfaces.py
index 6bc27078c9..6c2eda45a7 100644
--- a/api/oss/src/core/secrets/interfaces.py
+++ b/api/oss/src/core/secrets/interfaces.py
@@ -15,32 +15,38 @@ def __init__(self):
async def create(
self,
*,
- project_id: UUID,
+ project_id: Optional[UUID] = None,
+ organization_id: Optional[UUID] = None,
create_secret_dto: CreateSecretDTO,
) -> SecretResponseDTO:
raise NotImplementedError
async def get(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: Optional[UUID] = None,
+ organization_id: Optional[UUID] = None,
) -> Optional[SecretResponseDTO]:
raise NotImplementedError
- async def list(self, project_id: UUID) -> List[SecretResponseDTO]:
+ async def list(
+ self, project_id: Optional[UUID] = None, organization_id: Optional[UUID] = None
+ ) -> List[SecretResponseDTO]:
raise NotImplementedError
async def update(
self,
- project_id: UUID,
secret_id: UUID,
update_secret_dto: UpdateSecretDTO,
+ project_id: Optional[UUID] = None,
+ organization_id: Optional[UUID] = None,
) -> Optional[SecretResponseDTO]:
raise NotImplementedError
async def delete(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: Optional[UUID] = None,
+ organization_id: Optional[UUID] = None,
) -> None:
raise NotImplementedError
diff --git a/api/oss/src/core/secrets/services.py b/api/oss/src/core/secrets/services.py
index 540adfb3aa..f986ed911a 100644
--- a/api/oss/src/core/secrets/services.py
+++ b/api/oss/src/core/secrets/services.py
@@ -14,7 +14,8 @@ def __init__(self, secrets_dao: SecretsDAOInterface):
async def create_secret(
self,
*,
- project_id: UUID,
+ project_id: UUID | None = None,
+ organization_id: UUID | None = None,
create_secret_dto: CreateSecretDTO,
):
with set_data_encryption_key(
@@ -22,57 +23,71 @@ async def create_secret(
):
secret_dto = await self.secrets_dao.create(
project_id=project_id,
+ organization_id=organization_id,
create_secret_dto=create_secret_dto,
)
return secret_dto
async def get_secret(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: UUID | None = None,
+ organization_id: UUID | None = None,
):
with set_data_encryption_key(
data_encryption_key=self._data_encryption_key,
):
secret_dto = await self.secrets_dao.get(
- project_id=project_id,
secret_id=secret_id,
+ project_id=project_id,
+ organization_id=organization_id,
)
return secret_dto
- async def list_secrets(self, project_id: UUID):
+ async def list_secrets(
+ self,
+ project_id: UUID | None = None,
+ organization_id: UUID | None = None,
+ ):
with set_data_encryption_key(
data_encryption_key=self._data_encryption_key,
):
- secrets_dtos = await self.secrets_dao.list(project_id=project_id)
+ secrets_dtos = await self.secrets_dao.list(
+ project_id=project_id,
+ organization_id=organization_id,
+ )
return secrets_dtos
async def update_secret(
self,
- project_id: UUID,
secret_id: UUID,
update_secret_dto: UpdateSecretDTO,
+ project_id: UUID | None = None,
+ organization_id: UUID | None = None,
):
with set_data_encryption_key(
data_encryption_key=self._data_encryption_key,
):
secret_dto = await self.secrets_dao.update(
- project_id=project_id,
secret_id=secret_id,
update_secret_dto=update_secret_dto,
+ project_id=project_id,
+ organization_id=organization_id,
)
return secret_dto
async def delete_secret(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: UUID | None = None,
+ organization_id: UUID | None = None,
) -> None:
with set_data_encryption_key(
data_encryption_key=self._data_encryption_key,
):
await self.secrets_dao.delete(
- project_id=project_id,
secret_id=secret_id,
+ project_id=project_id,
+ organization_id=organization_id,
)
return
diff --git a/api/oss/src/core/secrets/utils.py b/api/oss/src/core/secrets/utils.py
index 9edc02035f..9d7e1e838b 100644
--- a/api/oss/src/core/secrets/utils.py
+++ b/api/oss/src/core/secrets/utils.py
@@ -33,12 +33,12 @@ async def get_user_llm_providers_secrets(project_id: str) -> Dict[str, Any]:
if not secrets:
return {}
- # 2: exclude custom_provider secrets
+ # 2: include only standard provider keys
# value of secrets: [{data: {kind: ..., provider: {key: ...}}}]
secrets = [
secret.model_dump(include={"data"})
for secret in secrets
- if secret.kind != "custom_provider"
+ if secret.kind == "provider_key"
]
# 3: convert secrets to readable format
diff --git a/api/oss/src/core/testcases/service.py b/api/oss/src/core/testcases/service.py
index 3afd298dd4..1b1db82f20 100644
--- a/api/oss/src/core/testcases/service.py
+++ b/api/oss/src/core/testcases/service.py
@@ -58,6 +58,31 @@ async def fetch_testcases(
project_id: UUID,
#
testcase_ids: Optional[List[UUID]] = None,
+ ) -> List[Testcase]:
+ blobs = await self.testcases_dao.fetch_blobs(
+ project_id=project_id,
+ #
+ blob_ids=testcase_ids or [],
+ )
+
+ if not blobs:
+ return []
+
+ _testcases = [
+ Testcase(
+ **blob.model_dump(mode="json"),
+ )
+ for blob in blobs
+ ]
+
+ return _testcases
+
+ async def query_testcases(
+ self,
+ *,
+ project_id: UUID,
+ #
+ testcase_ids: Optional[List[UUID]] = None,
#
testset_id: Optional[UUID] = None,
#
diff --git a/api/oss/src/core/testsets/dtos.py b/api/oss/src/core/testsets/dtos.py
index a4f88381e1..5c372dd81b 100644
--- a/api/oss/src/core/testsets/dtos.py
+++ b/api/oss/src/core/testsets/dtos.py
@@ -1,4 +1,4 @@
-from typing import Optional, List, Dict
+from typing import Optional, List, Tuple
from uuid import UUID
from pydantic import BaseModel, Field
@@ -171,74 +171,52 @@ class TestsetRevisionQuery(RevisionQuery):
flags: Optional[TestsetFlags] = None
-class TestsetRevisionCommit(
- RevisionCommit,
- TestsetIdAlias,
- TestsetVariantIdAlias,
-):
- flags: Optional[TestsetFlags] = None
-
- data: Optional[TestsetRevisionData] = None
-
- def model_post_init(self, __context) -> None:
- sync_alias("testset_id", "artifact_id", self)
- sync_alias("testset_variant_id", "variant_id", self)
-
+class TestsetRevisionDeltaColumns(BaseModel):
+ """Column-level operations applied to ALL testcases in the revision."""
-class TestsetColumnRename(BaseModel):
- """Column rename operation"""
-
- old_name: str
- new_name: str
+ # Add columns: array of column names to add
+ add: Optional[List[str]] = None
+ # Remove columns: array of column names to remove
+ remove: Optional[List[str]] = None
+ # Replace columns: array of (old column name, new column name) to replace
+ replace: Optional[List[Tuple[str, str]]] = None
-class TestsetColumnOperations(BaseModel):
- """Column-level operations applied to ALL testcases in the revision"""
+class TestsetRevisionDeltaRows(BaseModel):
+ """Row-level operations applied to testcases in the revision."""
- # Rename columns: array of {old_name, new_name}
- rename: Optional[List[TestsetColumnRename]] = None
- # Add columns: array of column names to add (initialized to empty string)
- add: Optional[List[str]] = None
- # Delete columns: array of column names to remove
- delete: Optional[List[str]] = None
+ # Add rows: array of testcases to add
+ add: Optional[List[Testcase]] = None
+ # Remove rows: array of testcase IDs to remove
+ remove: Optional[List[UUID]] = None
+ # Replace rows: array of testcases to replace
+ replace: Optional[List[Testcase]] = None
-class TestsetRevisionPatchOperations(BaseModel):
- """Operations to apply to a testset revision"""
+class TestsetRevisionDelta(BaseModel):
+ """Operations to apply to a testset revision."""
- # Testcases to update (existing testcases with modified data)
- update: Optional[List[Testcase]] = None
- # New testcases to create
- create: Optional[List[Testcase]] = None
- # Testcase IDs to delete
- delete: Optional[List[UUID]] = None
- # Column-level operations (applied to ALL testcases)
- columns: Optional[TestsetColumnOperations] = None
+ # Row-level operations
+ rows: Optional[TestsetRevisionDeltaRows] = None
+ # Column-level operations
+ columns: Optional[TestsetRevisionDeltaColumns] = None
-class TestsetRevisionPatch(
+class TestsetRevisionCommit(
+ RevisionCommit,
TestsetIdAlias,
TestsetVariantIdAlias,
+ TestsetRevisionIdAlias,
):
- """Patch request for updating a testset revision with delta changes"""
-
flags: Optional[TestsetFlags] = None
- # Base revision to apply patch to (defaults to latest if not specified)
- base_revision_id: Optional[UUID] = None
-
- # Commit message
- message: Optional[str] = None
-
- # Revision description (for the new revision)
- description: Optional[str] = None
-
- # Patch operations
- operations: Optional[TestsetRevisionPatchOperations] = None
+ data: Optional[TestsetRevisionData] = None
+ delta: Optional[TestsetRevisionDelta] = None
def model_post_init(self, __context) -> None:
sync_alias("testset_id", "artifact_id", self)
sync_alias("testset_variant_id", "variant_id", self)
+ sync_alias("testset_revision_id", "revision_id", self)
class SimpleTestset(Identifier, Slug, Lifecycle, Header, Metadata):
diff --git a/api/oss/src/core/testsets/service.py b/api/oss/src/core/testsets/service.py
index 5c3ba2274f..847ae594c5 100644
--- a/api/oss/src/core/testsets/service.py
+++ b/api/oss/src/core/testsets/service.py
@@ -1,4 +1,4 @@
-from typing import Dict, Optional, List
+from typing import Dict, Optional, List, Any
from uuid import UUID, uuid4
from oss.src.utils.logging import get_module_logger
@@ -50,7 +50,6 @@
TestsetRevisionEdit,
TestsetRevisionQuery,
TestsetRevisionCommit,
- TestsetRevisionPatch,
)
from oss.src.apis.fastapi.testsets.utils import (
csv_file_to_json_array,
@@ -779,6 +778,14 @@ async def commit_testset_revision(
#
include_testcases: Optional[bool] = None,
) -> Optional[TestsetRevision]:
+ if testset_revision_commit.delta and not testset_revision_commit.data:
+ return await self._commit_testset_revision_delta(
+ project_id=project_id,
+ user_id=user_id,
+ testset_revision_commit=testset_revision_commit,
+ include_testcases=include_testcases,
+ )
+
if testset_revision_commit.data and testset_revision_commit.data.testcases:
if testset_revision_commit.data.testcases:
for testcase in testset_revision_commit.data.testcases:
@@ -862,55 +869,40 @@ async def log_testset_revisions(
return testset_revisions
- async def patch_testset_revision(
+ async def _commit_testset_revision_delta(
self,
*,
project_id: UUID,
user_id: UUID,
#
- testset_revision_patch: TestsetRevisionPatch,
+ testset_revision_commit: TestsetRevisionCommit,
+ #
+ include_testcases: Optional[bool] = None,
) -> Optional[TestsetRevision]:
- """
- Apply a patch to a testset revision.
-
- This method:
- 1. Fetches the base revision (latest if not specified) with all testcases
- 2. Loads all current testcase data
- 3. Applies the patch operations to build a complete testcases list:
- - update: Replace testcase data for matching IDs
- - create: Add new testcases
- - delete: Remove testcases by ID
- 4. Calls the regular commit flow with the full testcases data
-
- This approach ensures consistency with the regular commit flow and
- avoids any deduplication issues.
- """
+ """Apply delta operations to a base revision and commit as a new revision."""
# Get the base revision to patch
base_revision = await self.fetch_testset_revision(
project_id=project_id,
- testset_ref=Reference(id=testset_revision_patch.testset_id),
+ testset_ref=Reference(id=testset_revision_commit.testset_id),
testset_revision_ref=(
- Reference(id=testset_revision_patch.base_revision_id)
- if testset_revision_patch.base_revision_id
+ Reference(id=testset_revision_commit.revision_id)
+ if testset_revision_commit.revision_id
else None
),
)
if not base_revision:
log.error(
- f"Base revision not found for testset {testset_revision_patch.testset_id}"
+ f"Base revision not found for testset {testset_revision_commit.testset_id}"
)
return None
- # Load all current testcases from the base revision
+ # Load all current testcases from the base revision, preserving order.
current_testcases: List[Testcase] = []
- if base_revision.data and base_revision.data.testcase_ids:
- current_testcases = await self.testcases_service.fetch_testcases(
- project_id=project_id,
- testcase_ids=base_revision.data.testcase_ids,
- )
+ if base_revision.data and base_revision.data.testcases:
+ current_testcases = list(base_revision.data.testcases)
- operations = testset_revision_patch.operations
+ operations = testset_revision_commit.delta
if not operations:
# No operations, just return the base revision
return base_revision
@@ -918,20 +910,20 @@ async def patch_testset_revision(
# Apply column operations to ALL testcases first
# This ensures column changes are applied even to testcases not in update list
if operations.columns:
+ replace_map = {}
+ if operations.columns.replace:
+ replace_map = {old: new for old, new in operations.columns.replace}
+ remove_set = set(operations.columns.remove or [])
for tc in current_testcases:
if tc.data:
- # Apply column renames
- if operations.columns.rename:
- for rename_op in operations.columns.rename:
- if rename_op.old_name in tc.data:
- tc.data[rename_op.new_name] = tc.data.pop(
- rename_op.old_name
- )
-
- # Apply column deletions
- if operations.columns.delete:
- for col_name in operations.columns.delete:
- tc.data.pop(col_name, None)
+ # Preserve column order for replace/remove.
+ updated_data: Dict[str, Any] = {}
+ for key, value in tc.data.items():
+ if key in remove_set:
+ continue
+ new_key = replace_map.get(key, key)
+ updated_data[new_key] = value
+ tc.data = updated_data
# Apply column additions (initialize to empty string)
if operations.columns.add:
@@ -939,88 +931,69 @@ async def patch_testset_revision(
if col_name not in tc.data:
tc.data[col_name] = ""
- # Build a map of current testcases by ID for easy lookup
- testcases_by_id: Dict[UUID, Testcase] = {
- tc.id: tc for tc in current_testcases if tc.id
- }
-
- # Track IDs to delete
- ids_to_delete: set[UUID] = set()
- if operations.delete:
- ids_to_delete.update(operations.delete)
-
- # Apply update operations - replace data for matching IDs
- if operations.update:
- for updated_tc in operations.update:
- if updated_tc.id and updated_tc.id in testcases_by_id:
- # Create a new Testcase with updated data
- testcases_by_id[updated_tc.id] = Testcase(
- id=None, # Will be assigned by create_testcases
- set_id=testset_revision_patch.testset_id,
- data=updated_tc.data,
- )
- # Mark old ID for removal (we'll create a new testcase)
- ids_to_delete.add(updated_tc.id)
+ # Build final testcases list, preserving base order.
+ remove_set: set[UUID] = (
+ set(operations.rows.remove or []) if operations.rows else set()
+ )
+ replace_map: Dict[UUID, Testcase] = {}
+ if operations.rows and operations.rows.replace:
+ replace_map = {
+ tc.id: tc for tc in operations.rows.replace if tc.id is not None
+ }
- # Build final testcases list:
- # 1. Keep existing testcases that weren't deleted or updated
- # 2. Add updated testcases (with new data)
- # 3. Add new testcases from create operations
+ # 1) Replace in place, 2) remove wherever it appears, 3) add at the end.
final_testcases: List[Testcase] = []
-
- # Add existing testcases that weren't deleted
- for tc_id, tc in testcases_by_id.items():
- if tc_id not in ids_to_delete:
- # Keep existing testcase data
- final_testcases.append(
- Testcase(
- id=None, # Will be assigned by create_testcases
- set_id=testset_revision_patch.testset_id,
- data=tc.data,
- )
+ for tc in current_testcases:
+ if not tc.id:
+ continue
+ updated_tc = replace_map.get(tc.id)
+ if updated_tc is not None:
+ candidate = Testcase(
+ id=None,
+ set_id=testset_revision_commit.testset_id,
+ data=updated_tc.data,
)
+ else:
+ candidate = Testcase(
+ id=None,
+ set_id=testset_revision_commit.testset_id,
+ data=tc.data,
+ )
+ if tc.id in remove_set:
+ continue
+ final_testcases.append(candidate)
- # Add updated testcases
- if operations.update:
- for updated_tc in operations.update:
- if updated_tc.id:
- final_testcases.append(
- Testcase(
- id=None,
- set_id=testset_revision_patch.testset_id,
- data=updated_tc.data,
- )
- )
-
- # Add new testcases from create operations
- if operations.create:
- for new_tc in operations.create:
+ # 3) Add at the end.
+ if operations.rows and operations.rows.add:
+ for new_tc in operations.rows.add:
final_testcases.append(
Testcase(
id=None,
- set_id=testset_revision_patch.testset_id,
+ set_id=testset_revision_commit.testset_id,
data=new_tc.data,
)
)
# Get variant_id from base revision (required for commit)
variant_id = (
- testset_revision_patch.testset_variant_id
+ testset_revision_commit.testset_variant_id
or base_revision.testset_variant_id
)
- # Generate a unique slug for the new revision
- revision_slug = uuid4().hex[-12:]
+ # Generate a unique slug for the new revision if missing
+ revision_slug = testset_revision_commit.slug or uuid4().hex[-12:]
# Create commit request with full testcases data
# This will go through the regular commit flow
testset_revision_commit = TestsetRevisionCommit(
slug=revision_slug,
- testset_id=testset_revision_patch.testset_id,
+ testset_id=testset_revision_commit.testset_id,
testset_variant_id=variant_id,
- message=testset_revision_patch.message or "Patched testset revision",
- description=testset_revision_patch.description or base_revision.description,
- flags=testset_revision_patch.flags,
+ message=testset_revision_commit.message or "Patched testset revision",
+ description=(
+ testset_revision_commit.description or base_revision.description
+ ),
+ flags=testset_revision_commit.flags,
data=TestsetRevisionData(
testcases=final_testcases,
),
@@ -1031,6 +1004,7 @@ async def patch_testset_revision(
project_id=project_id,
user_id=user_id,
testset_revision_commit=testset_revision_commit,
+ include_testcases=include_testcases,
)
## -------------------------------------------------------------------------
diff --git a/api/oss/src/core/users/__init__.py b/api/oss/src/core/users/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/core/users/types.py b/api/oss/src/core/users/types.py
new file mode 100644
index 0000000000..ecebaa0b60
--- /dev/null
+++ b/api/oss/src/core/users/types.py
@@ -0,0 +1,45 @@
+from datetime import datetime
+from uuid import UUID
+from pydantic import BaseModel, field_validator
+from typing import Optional
+
+from oss.src.core.auth.types import MethodKind
+
+
+class UserIdentity(BaseModel):
+ id: UUID
+ user_id: UUID
+ method: str
+ subject: str
+ domain: Optional[str] = None
+ created_at: datetime
+ updated_at: Optional[datetime] = None
+ deleted_at: Optional[datetime] = None
+ created_by_id: Optional[UUID] = None
+ updated_by_id: Optional[UUID] = None
+ deleted_by_id: Optional[UUID] = None
+
+ class Config:
+ from_attributes = True
+
+ @field_validator("method")
+ @classmethod
+ def validate_method(cls, value: str) -> str:
+ if not MethodKind.is_valid_pattern(value):
+ raise ValueError(f"Invalid auth method: {value}")
+ return value
+
+
+class UserIdentityCreate(BaseModel):
+ user_id: UUID
+ method: str
+ subject: str
+ domain: Optional[str] = None
+ created_by_id: Optional[UUID] = None
+
+ @field_validator("method")
+ @classmethod
+ def validate_method(cls, value: str) -> str:
+ if not MethodKind.is_valid_pattern(value):
+ raise ValueError(f"Invalid auth method: {value}")
+ return value
diff --git a/api/oss/src/crons/queries.sh b/api/oss/src/crons/queries.sh
index b9e8c7a6e1..e742275b03 100644
--- a/api/oss/src/crons/queries.sh
+++ b/api/oss/src/crons/queries.sh
@@ -1,7 +1,8 @@
#!/bin/sh
set -eu
-AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-)
+AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2- || true)
+AGENTA_AUTH_KEY="${AGENTA_AUTH_KEY:-replace-me}"
TRIGGER_INTERVAL=$(awk 'NR==2 {split($1, a, "/"); print (a[2] ? a[2] : 1)}' /etc/cron.d/queries-cron)
NOW_UTC=$(date -u "+%Y-%m-%dT%H:%M:00Z")
MINUTE=$(date -u "+%M" | sed 's/^0*//')
@@ -21,4 +22,4 @@ curl \
-H "Authorization: Access ${AGENTA_AUTH_KEY}" \
"http://api:8000/admin/evaluations/runs/refresh?trigger_interval=${TRIGGER_INTERVAL}&trigger_datetime=${TRIGGER_DATETIME}" || echo "❌ CURL failed"
-echo "[$(date)] queries.sh done" >> /proc/1/fd/1
\ No newline at end of file
+echo "[$(date)] queries.sh done" >> /proc/1/fd/1
diff --git a/api/oss/src/dbs/postgres/blobs/dao.py b/api/oss/src/dbs/postgres/blobs/dao.py
index 85a8616e87..0929ae517c 100644
--- a/api/oss/src/dbs/postgres/blobs/dao.py
+++ b/api/oss/src/dbs/postgres/blobs/dao.py
@@ -452,7 +452,7 @@ async def query_blobs(
stmt = apply_windowing(
stmt=stmt,
DBE=self.BlobDBE,
- attribute="id", # UUID7 - use id for cursor-based pagination
+ attribute="created_at", # Blob IDs are content-hashed (UUID5), use timestamp for ordering
order="ascending", # data-style
windowing=windowing,
)
@@ -464,6 +464,24 @@ async def query_blobs(
if not blob_dbes:
return []
+ # If blob_ids were provided, preserve their order in the result
+ if blob_query.blob_ids:
+ _blobs = {
+ blob_dbe.id: map_dbe_to_dto( # type: ignore
+ DTO=Blob,
+ dbe=blob_dbe, # type: ignore
+ )
+ for blob_dbe in blob_dbes
+ }
+
+ blobs = [
+ _blobs[blob_id]
+ for blob_id in blob_query.blob_ids
+ if blob_id in _blobs
+ ]
+
+ return blobs
+
blobs = [
map_dbe_to_dto(
DTO=Blob,
diff --git a/api/oss/src/dbs/postgres/organizations/__init__.py b/api/oss/src/dbs/postgres/organizations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/dbs/postgres/secrets/dao.py b/api/oss/src/dbs/postgres/secrets/dao.py
index 51811500eb..62981cebb0 100644
--- a/api/oss/src/dbs/postgres/secrets/dao.py
+++ b/api/oss/src/dbs/postgres/secrets/dao.py
@@ -21,13 +21,32 @@ class SecretsDAO(SecretsDAOInterface):
def __init__(self):
pass
+ @staticmethod
+ def _validate_scope(project_id: UUID | None, organization_id: UUID | None) -> None:
+ if bool(project_id) == bool(organization_id):
+ raise ValueError(
+ "Exactly one of project_id or organization_id must be provided."
+ )
+
+ @staticmethod
+ def _scope_filter(project_id: UUID | None, organization_id: UUID | None) -> dict:
+ SecretsDAO._validate_scope(project_id, organization_id)
+ return (
+ {"project_id": project_id}
+ if project_id
+ else {"organization_id": organization_id}
+ )
+
async def create(
self,
- project_id: UUID,
+ project_id: UUID | None,
+ organization_id: UUID | None,
create_secret_dto: CreateSecretDTO,
):
+ self._validate_scope(project_id, organization_id)
secrets_dbe = map_secrets_dto_to_dbe(
project_id=project_id,
+ organization_id=organization_id,
secret_dto=create_secret_dto,
)
async with engine.core_session() as session:
@@ -39,13 +58,15 @@ async def create(
async def get(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: UUID | None,
+ organization_id: UUID | None,
):
async with engine.core_session() as session:
+ scope_filter = self._scope_filter(project_id, organization_id)
stmt = select(SecretsDBE).filter_by(
id=secret_id,
- project_id=project_id,
+ **scope_filter,
)
result = await session.execute(stmt) # type: ignore
secrets_dbe = result.scalar()
@@ -56,9 +77,10 @@ async def get(
secrets_dto = map_secrets_dbe_to_dto(secrets_dbe=secrets_dbe)
return secrets_dto
- async def list(self, project_id: UUID):
+ async def list(self, project_id: UUID | None, organization_id: UUID | None):
async with engine.core_session() as session:
- stmt = select(SecretsDBE).filter_by(project_id=project_id)
+ scope_filter = self._scope_filter(project_id, organization_id)
+ stmt = select(SecretsDBE).filter_by(**scope_filter)
results = await session.execute(stmt) # type: ignore
secrets_dbes = results.scalars().all()
@@ -70,14 +92,16 @@ async def list(self, project_id: UUID):
async def update(
self,
- project_id: UUID,
secret_id: UUID,
update_secret_dto: UpdateSecretDTO,
+ project_id: UUID | None,
+ organization_id: UUID | None,
):
async with engine.core_session() as session:
+ scope_filter = self._scope_filter(project_id, organization_id)
stmt = select(SecretsDBE).filter_by(
id=secret_id,
- project_id=project_id,
+ **scope_filter,
)
result = await session.execute(stmt)
secrets_dbe = result.scalar()
@@ -97,13 +121,15 @@ async def update(
async def delete(
self,
- project_id: UUID,
secret_id: UUID,
+ project_id: UUID | None,
+ organization_id: UUID | None,
):
async with engine.core_session() as session:
+ scope_filter = self._scope_filter(project_id, organization_id)
stmt = select(SecretsDBE).filter_by(
id=secret_id,
- project_id=project_id,
+ **scope_filter,
)
result = await session.execute(stmt) # type: ignore
vault_secret_dbe = result.scalar()
diff --git a/api/oss/src/dbs/postgres/secrets/dbas.py b/api/oss/src/dbs/postgres/secrets/dbas.py
index ede7cac05f..6f01fff7c5 100644
--- a/api/oss/src/dbs/postgres/secrets/dbas.py
+++ b/api/oss/src/dbs/postgres/secrets/dbas.py
@@ -3,14 +3,13 @@
from oss.src.core.secrets.enums import SecretKind
from oss.src.dbs.postgres.shared.dbas import (
- ProjectScopeDBA,
LegacyLifecycleDBA,
HeaderDBA,
)
from oss.src.dbs.postgres.secrets.custom_fields import PGPString
-class SecretsDBA(ProjectScopeDBA, LegacyLifecycleDBA, HeaderDBA):
+class SecretsDBA(LegacyLifecycleDBA, HeaderDBA):
__abstract__ = True
id = Column(
@@ -22,3 +21,11 @@ class SecretsDBA(ProjectScopeDBA, LegacyLifecycleDBA, HeaderDBA):
)
kind = Column(SQLEnum(SecretKind, name="secretkind_enum")) # type: ignore
data = Column(PGPString()) # type: ignore
+ project_id = Column(
+ UUID(as_uuid=True),
+ nullable=True,
+ )
+ organization_id = Column(
+ UUID(as_uuid=True),
+ nullable=True,
+ )
diff --git a/api/oss/src/dbs/postgres/secrets/mappings.py b/api/oss/src/dbs/postgres/secrets/mappings.py
index e79aeadd87..14397c3194 100644
--- a/api/oss/src/dbs/postgres/secrets/mappings.py
+++ b/api/oss/src/dbs/postgres/secrets/mappings.py
@@ -13,12 +13,16 @@
def map_secrets_dto_to_dbe(
- *, project_id: uuid.UUID, secret_dto: CreateSecretDTO
+ *,
+ project_id: uuid.UUID | None,
+ organization_id: uuid.UUID | None,
+ secret_dto: CreateSecretDTO,
) -> SecretsDBE:
vault_secret_dbe = SecretsDBE(
name=secret_dto.header.name if secret_dto.header else None,
description=(secret_dto.header.description if secret_dto.header else None),
project_id=project_id,
+ organization_id=organization_id,
kind=secret_dto.secret.kind.value,
data=json.dumps(secret_dto.secret.data.model_dump(exclude_none=True)),
)
diff --git a/api/oss/src/dbs/postgres/shared/dbas.py b/api/oss/src/dbs/postgres/shared/dbas.py
index c6ada32dff..5b86d86c66 100644
--- a/api/oss/src/dbs/postgres/shared/dbas.py
+++ b/api/oss/src/dbs/postgres/shared/dbas.py
@@ -37,7 +37,7 @@ class LegacyLifecycleDBA:
created_at = Column(
TIMESTAMP(timezone=True),
server_default=func.current_timestamp(),
- nullable=False,
+ nullable=True,
)
updated_at = Column(
TIMESTAMP(timezone=True),
diff --git a/api/oss/src/dbs/postgres/users/__init__.py b/api/oss/src/dbs/postgres/users/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/oss/src/dbs/postgres/users/dao.py b/api/oss/src/dbs/postgres/users/dao.py
new file mode 100644
index 0000000000..2fbe1c71b9
--- /dev/null
+++ b/api/oss/src/dbs/postgres/users/dao.py
@@ -0,0 +1,67 @@
+from uuid import UUID
+from typing import Optional, List
+from sqlalchemy import select
+from sqlalchemy.exc import IntegrityError
+
+from oss.src.dbs.postgres.shared.engine import engine
+from oss.src.dbs.postgres.users.dbes import UserIdentityDBE
+from oss.src.dbs.postgres.users.mappings import (
+ map_identity_dbe_to_dto,
+ map_create_dto_to_dbe,
+)
+from oss.src.core.users.types import UserIdentity, UserIdentityCreate
+
+
+class IdentitiesDAO:
+ async def create(self, dto: UserIdentityCreate) -> UserIdentity:
+ identity_dbe = map_create_dto_to_dbe(dto)
+
+ async with engine.core_session() as session:
+ try:
+ session.add(identity_dbe)
+ await session.commit()
+ await session.refresh(identity_dbe)
+ except IntegrityError:
+ await session.rollback()
+ stmt = select(UserIdentityDBE).filter_by(
+ method=dto.method,
+ subject=dto.subject,
+ )
+ result = await session.execute(stmt)
+ identity_dbe = result.scalar()
+ if identity_dbe is None:
+ raise
+
+ return map_identity_dbe_to_dto(identity_dbe)
+
+ async def get_by_method_subject(
+ self, method: str, subject: str
+ ) -> Optional[UserIdentity]:
+ async with engine.core_session() as session:
+ stmt = select(UserIdentityDBE).filter_by(
+ method=method,
+ subject=subject,
+ )
+ result = await session.execute(stmt)
+ identity_dbe = result.scalar()
+
+ if identity_dbe is None:
+ return None
+
+ return map_identity_dbe_to_dto(identity_dbe)
+
+ async def list_by_user(self, user_id: UUID) -> List[UserIdentity]:
+ async with engine.core_session() as session:
+ stmt = select(UserIdentityDBE).filter_by(user_id=user_id)
+ result = await session.execute(stmt)
+ identity_dbes = result.scalars().all()
+
+ return [map_identity_dbe_to_dto(dbe) for dbe in identity_dbes]
+
+ async def list_by_domain(self, domain: str) -> List[UserIdentity]:
+ async with engine.core_session() as session:
+ stmt = select(UserIdentityDBE).filter_by(domain=domain)
+ result = await session.execute(stmt)
+ identity_dbes = result.scalars().all()
+
+ return [map_identity_dbe_to_dto(dbe) for dbe in identity_dbes]
diff --git a/api/oss/src/dbs/postgres/users/dbas.py b/api/oss/src/dbs/postgres/users/dbas.py
new file mode 100644
index 0000000000..9726fd6e40
--- /dev/null
+++ b/api/oss/src/dbs/postgres/users/dbas.py
@@ -0,0 +1,32 @@
+import uuid_utils.compat as uuid
+from sqlalchemy import Column, String, UUID
+
+from oss.src.dbs.postgres.shared.dbas import LifecycleDBA
+
+
+class UserIdentityDBA(LifecycleDBA):
+ __abstract__ = True
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(
+ UUID(as_uuid=True),
+ nullable=False,
+ )
+ method = Column(
+ String,
+ nullable=False,
+ )
+ subject = Column(
+ String,
+ nullable=False,
+ )
+ domain = Column(
+ String,
+ nullable=True,
+ )
diff --git a/api/oss/src/dbs/postgres/users/dbes.py b/api/oss/src/dbs/postgres/users/dbes.py
new file mode 100644
index 0000000000..40f2b7c4cb
--- /dev/null
+++ b/api/oss/src/dbs/postgres/users/dbes.py
@@ -0,0 +1,34 @@
+from sqlalchemy import (
+ ForeignKeyConstraint,
+ UniqueConstraint,
+ Index,
+)
+
+from oss.src.dbs.postgres.shared.base import Base
+from oss.src.dbs.postgres.users.dbas import UserIdentityDBA
+
+
+class UserIdentityDBE(Base, UserIdentityDBA):
+ __tablename__ = "user_identities"
+
+ __table_args__ = (
+ ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ondelete="CASCADE",
+ ),
+ UniqueConstraint(
+ "method",
+ "subject",
+ name="uq_user_identities_method_subject",
+ ),
+ Index(
+ "ix_user_identities_user_method",
+ "user_id",
+ "method",
+ ),
+ Index(
+ "ix_user_identities_domain",
+ "domain",
+ ),
+ )
diff --git a/api/oss/src/dbs/postgres/users/mappings.py b/api/oss/src/dbs/postgres/users/mappings.py
new file mode 100644
index 0000000000..f11d6aaf1e
--- /dev/null
+++ b/api/oss/src/dbs/postgres/users/mappings.py
@@ -0,0 +1,23 @@
+from oss.src.core.users.types import UserIdentity, UserIdentityCreate
+from oss.src.dbs.postgres.users.dbes import UserIdentityDBE
+
+
+def map_identity_dbe_to_dto(identity_dbe: UserIdentityDBE) -> UserIdentity:
+ return UserIdentity(
+ id=identity_dbe.id,
+ user_id=identity_dbe.user_id,
+ method=identity_dbe.method,
+ subject=identity_dbe.subject,
+ domain=identity_dbe.domain,
+ created_at=identity_dbe.created_at,
+ updated_at=identity_dbe.updated_at,
+ )
+
+
+def map_create_dto_to_dbe(dto: UserIdentityCreate) -> UserIdentityDBE:
+ return UserIdentityDBE(
+ user_id=dto.user_id,
+ method=dto.method,
+ subject=dto.subject,
+ domain=dto.domain,
+ )
diff --git a/api/oss/src/models/api/evaluation_model.py b/api/oss/src/models/api/evaluation_model.py
index 82e9f35cd1..dc006e11d5 100644
--- a/api/oss/src/models/api/evaluation_model.py
+++ b/api/oss/src/models/api/evaluation_model.py
@@ -20,6 +20,7 @@ class LegacyEvaluator(BaseModel):
oss: Optional[bool] = False
requires_llm_api_keys: Optional[bool] = False
tags: List[str]
+ archived: Optional[bool] = False
class EvaluatorConfig(BaseModel):
diff --git a/api/oss/src/models/api/organization_models.py b/api/oss/src/models/api/organization_models.py
index 8809330d5f..02e8232e6a 100644
--- a/api/oss/src/models/api/organization_models.py
+++ b/api/oss/src/models/api/organization_models.py
@@ -1,4 +1,5 @@
from typing import Optional, List, Dict, Any
+from uuid import UUID
from pydantic import BaseModel, Field
@@ -7,10 +8,19 @@
class Organization(BaseModel):
id: str
- name: str
- owner: str
- description: str
- type: Optional[str] = None
+ slug: Optional[str] = None
+ #
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+ #
+ owner_id: UUID
+ #
+ members: List[str] = Field(default_factory=list)
+ invitations: List = Field(default_factory=list)
workspaces: List[str] = Field(default_factory=list)
diff --git a/api/oss/src/models/db_models.py b/api/oss/src/models/db_models.py
index aafb1f9ccb..02fe011341 100644
--- a/api/oss/src/models/db_models.py
+++ b/api/oss/src/models/db_models.py
@@ -32,18 +32,54 @@ class OrganizationDB(Base):
unique=True,
nullable=False,
)
- name = Column(String, default="agenta")
- description = Column(
+ slug = Column(
String,
- default="The open-source LLM developer platform for cross-functional teams.",
+ unique=True,
+ nullable=True,
)
- type = Column(String, nullable=True)
- owner = Column(String, nullable=True) # TODO: deprecate and remove
+ #
+ name = Column(String, nullable=True)
+ description = Column(String, nullable=True)
+ #
+ flags = Column(JSONB, nullable=True)
+ tags = Column(JSONB, nullable=True)
+ meta = Column(JSONB, nullable=True)
+ #
+ owner_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("users.id", ondelete="RESTRICT"),
+ nullable=False,
+ )
+ #
created_at = Column(
- DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ DateTime(timezone=True),
+ default=lambda: datetime.now(timezone.utc),
+ nullable=False,
)
+ #
updated_at = Column(
- DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ DateTime(timezone=True),
+ default=lambda: datetime.now(timezone.utc),
+ nullable=True,
+ )
+ deleted_at = Column(
+ DateTime(timezone=True),
+ nullable=True,
+ )
+ created_by_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("users.id", ondelete="RESTRICT"),
+ nullable=False,
+ )
+ updated_by_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("users.id", ondelete="SET NULL"),
+ nullable=True,
+ )
+ deleted_by_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("users.id", ondelete="SET NULL"),
+ nullable=True,
)
diff --git a/api/oss/src/models/shared_models.py b/api/oss/src/models/shared_models.py
index c2849bc6f5..149a69509e 100644
--- a/api/oss/src/models/shared_models.py
+++ b/api/oss/src/models/shared_models.py
@@ -3,6 +3,16 @@
from typing import Any, Dict, Optional
+class OrganizationFlags(BaseModel):
+ is_demo: bool = False
+ is_personal: bool = False
+
+
+class OrganizationQueryFlags(BaseModel):
+ is_demo: Optional[bool] = None
+ is_personal: Optional[bool] = None
+
+
class ConfigDB(BaseModel):
config_name: str
parameters: Dict[str, Any] = Field(default_factory=dict)
diff --git a/api/oss/src/resources/evaluators/evaluators.py b/api/oss/src/resources/evaluators/evaluators.py
index aaac1a9ef7..392b23be45 100644
--- a/api/oss/src/resources/evaluators/evaluators.py
+++ b/api/oss/src/resources/evaluators/evaluators.py
@@ -375,6 +375,7 @@
"name": "JSON Field Match",
"key": "field_match_test",
"direct_use": False,
+ "archived": True, # Deprecated - use json_multi_field_match instead
"settings_template": {
"json_field": {
"label": "JSON Field",
@@ -398,6 +399,33 @@
"oss": True,
"tags": ["classifiers"],
},
+ {
+ "name": "JSON Multi-Field Match",
+ "key": "json_multi_field_match",
+ "direct_use": False,
+ "settings_template": {
+ "fields": {
+ "label": "Fields to Compare",
+ "type": "fields_tags_editor", # Custom type - tag-based add/remove editor
+ "required": True,
+ "description": "Add fields to compare using dot notation for nested paths (e.g., user.name)",
+ },
+ "correct_answer_key": {
+ "label": "Expected Answer Column",
+ "default": "correct_answer",
+ "type": "string",
+ "required": True,
+ "description": "Column name containing the expected JSON object",
+ "ground_truth_key": True,
+ "advanced": True, # Hidden in advanced section
+ },
+ },
+ "description": "Compares configured fields in expected JSON against LLM output. Each field becomes a separate metric (0 or 1), with an aggregate_score showing the percentage of matching fields. Useful for entity extraction validation.",
+ "requires_testcase": "always",
+ "requires_trace": "always",
+ "oss": True,
+ "tags": ["classifiers"],
+ },
{
"name": "JSON Diff Match",
"key": "auto_json_diff",
diff --git a/api/oss/src/routers/admin_router.py b/api/oss/src/routers/admin_router.py
index 42c2d8ef36..8a2134b79b 100644
--- a/api/oss/src/routers/admin_router.py
+++ b/api/oss/src/routers/admin_router.py
@@ -435,9 +435,12 @@ async def create_account(
user = LegacyUserResponse(id=str(user_db.id))
create_org_payload = CreateOrganization(
- name=account.scope.name,
- owner=str(user.id),
- type="default",
+ name="Organization",
+ #
+ is_demo=False,
+ is_personal=False,
+ #
+ owner_id=UUID(str(user_db.id)),
)
organization_db, workspace_db, project_db = await legacy_create_organization(
diff --git a/api/oss/src/routers/evaluation_router.py b/api/oss/src/routers/evaluation_router.py
index 65cdc87c0a..27b1af4e31 100644
--- a/api/oss/src/routers/evaluation_router.py
+++ b/api/oss/src/routers/evaluation_router.py
@@ -106,8 +106,8 @@
)
# Redis client and TracingWorker for publishing spans to Redis Streams
-if env.REDIS_URI_DURABLE:
- redis_client = Redis.from_url(env.REDIS_URI_DURABLE, decode_responses=False)
+if env.redis.uri_durable:
+ redis_client = Redis.from_url(env.redis.uri_durable, decode_responses=False)
tracing_worker = TracingWorker(
service=tracing_service,
redis_client=redis_client,
diff --git a/api/oss/src/routers/organization_router.py b/api/oss/src/routers/organization_router.py
index b9df822d28..1cf27d9343 100644
--- a/api/oss/src/routers/organization_router.py
+++ b/api/oss/src/routers/organization_router.py
@@ -72,10 +72,17 @@ async def list_organizations(
response = [
Organization(
id=str(organization_db.id),
+ slug=str(organization_db.slug),
+ #
name=str(organization_db.name),
- owner=organization_db.owner,
description=str(organization_db.description),
- type=organization_db.type, # type: ignore
+ #
+ flags=organization_db.flags,
+ tags=organization_db.tags,
+ meta=organization_db.meta,
+ #
+ owner_id=organization_db.owner_id,
+ #
workspaces=[str(active_workspace.id)] if not is_ee() else [],
).model_dump(exclude_unset=True)
for organization_db in organizations_db
@@ -151,10 +158,17 @@ async def fetch_organization_details(
return OrganizationDetails(
id=str(organization_db.id),
+ slug=str(organization_db.slug),
+ #
name=str(organization_db.name),
- owner=organization_db.owner,
description=str(organization_db.description),
- type=organization_db.type, # type: ignore
+ #
+ flags=organization_db.flags,
+ tags=organization_db.tags,
+ meta=organization_db.meta,
+ #
+ owner_id=organization_db.owner_id,
+ #
default_workspace={
"id": str(active_workspace.id),
"name": str(active_workspace.name),
@@ -192,60 +206,71 @@ async def invite_user_to_organization(
HTTPException: If there is an error assigning the role to the user.
"""
- if len(payload) != 1:
- return JSONResponse(
- status_code=400,
- content={"detail": "Only one user can be invited at a time."},
- )
-
- if is_ee():
- user_org_workspace_data = await get_user_org_and_workspace_id(
- request.state.user_id
- )
- project = await db_manager_ee.get_project_by_workspace(workspace_id)
- has_permission = await check_rbac_permission(
- user_org_workspace_data=user_org_workspace_data,
- project_id=str(project.id),
- role=WorkspaceRole.WORKSPACE_ADMIN,
- )
- if not has_permission:
+ try:
+ if len(payload) != 1:
return JSONResponse(
- status_code=403,
- content={
- "detail": "You do not have permission to perform this action. Please contact your Organization Owner"
- },
+ status_code=400,
+ content={"detail": "Only one user can be invited at a time."},
)
- owner = await db_manager.get_organization_owner(organization_id)
- owner_domain = owner.email.split("@")[-1].lower() if owner else ""
- user_domain = payload[0].email.split("@")[-1].lower()
- skip_meter = owner_domain != "agenta.ai" and user_domain == "agenta.ai"
-
- if not skip_meter:
- check, _, _ = await check_entitlements(
- organization_id=request.state.organization_id,
- key=Gauge.USERS,
- delta=1,
+ if is_ee():
+ user_org_workspace_data = await get_user_org_and_workspace_id(
+ request.state.user_id
)
+ project = await db_manager_ee.get_project_by_workspace(workspace_id)
+ has_permission = await check_rbac_permission(
+ user_org_workspace_data=user_org_workspace_data,
+ project_id=str(project.id),
+ role=WorkspaceRole.WORKSPACE_ADMIN,
+ )
+ if not has_permission:
+ return JSONResponse(
+ status_code=403,
+ content={
+ "detail": "You do not have permission to perform this action. Please contact your Organization Owner"
+ },
+ )
+
+ owner = await db_manager.get_organization_owner(organization_id)
+ owner_domain = owner.email.split("@")[-1].lower() if owner else ""
+ user_domain = payload[0].email.split("@")[-1].lower()
+ skip_meter = owner_domain != "agenta.ai" and user_domain == "agenta.ai"
+
+ if not skip_meter:
+ check, _, _ = await check_entitlements(
+ organization_id=request.state.organization_id,
+ key=Gauge.USERS,
+ delta=1,
+ )
+
+ if not check:
+ return NOT_ENTITLED_RESPONSE(Tracker.GAUGES)
+
+ invite_user = await workspace_manager.invite_user_to_workspace(
+ payload=payload,
+ organization_id=organization_id,
+ project_id=str(project.id),
+ workspace_id=workspace_id,
+ user_uid=request.state.user_id,
+ )
+ return invite_user
- if not check:
- return NOT_ENTITLED_RESPONSE(Tracker.GAUGES)
-
- invite_user = await workspace_manager.invite_user_to_workspace(
- payload=payload,
+ invitation_response = await organization_service.invite_user_to_organization(
+ payload=payload[0],
+ project_id=request.state.project_id,
+ user_id=request.state.user_id,
+ )
+ return invitation_response
+ except Exception:
+ log.error(
+ "Invite user failed",
organization_id=organization_id,
- project_id=str(project.id),
workspace_id=workspace_id,
- user_uid=request.state.user_id,
+ project_id=getattr(request.state, "project_id", None),
+ user_id=getattr(request.state, "user_id", None),
+ exc_info=True,
)
- return invite_user
-
- invitation_response = await organization_service.invite_user_to_organization(
- payload=payload[0],
- project_id=request.state.project_id,
- user_id=request.state.user_id,
- )
- return invitation_response
+ raise
@router.post(
diff --git a/api/oss/src/routers/projects_router.py b/api/oss/src/routers/projects_router.py
index 8a9c0b29de..4bf76680be 100644
--- a/api/oss/src/routers/projects_router.py
+++ b/api/oss/src/routers/projects_router.py
@@ -58,7 +58,7 @@ async def _assert_org_owner(request: Request):
if not organization:
raise HTTPException(status_code=404, detail="Organization not found")
- if str(organization.owner) != str(user_id):
+ if str(organization.owner_id) != str(user_id):
raise HTTPException(
status_code=403,
detail="Only the organization owner can perform this action",
@@ -69,7 +69,7 @@ async def _assert_org_owner(request: Request):
def _get_oss_user_role(organization, user_id: str) -> str:
"""Owner vs editor logic used across OSS endpoints."""
- return "owner" if str(organization.owner) == str(user_id) else "editor"
+ return "owner" if str(organization.owner_id) == str(user_id) else "editor"
async def _get_ee_membership_for_project(user_id, project_id):
diff --git a/api/oss/src/routers/user_profile.py b/api/oss/src/routers/user_profile.py
index 0b082467b6..2d8ceda662 100644
--- a/api/oss/src/routers/user_profile.py
+++ b/api/oss/src/routers/user_profile.py
@@ -1,12 +1,13 @@
-from fastapi import Request
+from fastapi import Request, HTTPException
from fastapi.responses import JSONResponse
from oss.src.utils.logging import get_module_logger
-from oss.src.utils.caching import get_cache, set_cache
+from oss.src.utils.caching import get_cache, set_cache, invalidate_cache
from oss.src.utils.common import is_ee
from oss.src.utils.common import APIRouter
from oss.src.models.api.user_models import User
+from oss.src.models.api.user_models import UserUpdate
from oss.src.services import db_manager, user_service
@@ -63,6 +64,33 @@ async def user_profile(request: Request):
return user
+@router.put("/username", operation_id="update_user_username")
+async def update_user_username(request: Request, payload: UserUpdate):
+ username = (payload.username or "").strip()
+ if not username:
+ raise HTTPException(status_code=400, detail="Username is required.")
+
+ user = await db_manager.update_user_username(
+ user_id=request.state.user_id,
+ username=username,
+ )
+
+ await invalidate_cache(
+ project_id=request.state.project_id,
+ user_id=request.state.user_id,
+ namespace="user_profile",
+ )
+
+ return User(
+ id=str(user.id),
+ uid=str(user.uid),
+ email=str(user.email),
+ username=str(user.username),
+ created_at=str(user.created_at),
+ updated_at=str(user.updated_at),
+ )
+
+
@router.post("/reset-password", operation_id="reset_user_password")
async def reset_user_password(request: Request, user_id: str):
if is_ee():
diff --git a/api/oss/src/services/admin_manager.py b/api/oss/src/services/admin_manager.py
index 0541bc2c1a..750e47fd0b 100644
--- a/api/oss/src/services/admin_manager.py
+++ b/api/oss/src/services/admin_manager.py
@@ -30,9 +30,12 @@
class CreateOrganization(BaseModel):
name: str
- owner: str
description: Optional[str] = None
- type: Optional[str] = None
+ #
+ is_demo: bool = False
+ is_personal: bool = False
+ #
+ owner_id: UUID
class CreateWorkspace(BaseModel):
@@ -73,21 +76,24 @@ class UserRequest(BaseModel):
class OrganizationRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
+ is_personal: bool
class WorkspaceRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
+ #
is_default: bool
#
organization_ref: Reference
class ProjectRequest(BaseModel):
- name: str
- description: str
+ name: Optional[str] = None
+ description: Optional[str] = None
is_default: bool
#
workspace_ref: Reference
@@ -159,8 +165,15 @@ async def legacy_create_organization(
) -> Union[OrganizationDB, WorkspaceDB]:
async with engine.core_session() as session:
create_org_data = payload.model_dump(exclude_unset=True)
- if "owner" not in create_org_data:
- create_org_data["owner"] = str(user.id)
+
+ create_org_data["flags"] = {
+ "is_demo": payload.is_demo,
+ "is_personal": payload.is_personal,
+ }
+
+ # Set required audit fields
+ create_org_data["owner_id"] = user.id
+ create_org_data["created_by_id"] = user.id
# create organization
organization_db = OrganizationDB(**create_org_data)
@@ -172,14 +185,7 @@ async def legacy_create_organization(
# construct workspace payload
workspace_payload = CreateWorkspace(
name=payload.name,
- type=payload.type if payload.type else "",
- description=(
- "Default Workspace"
- if payload.type == "default"
- else payload.description
- if payload.description
- else ""
- ),
+ type="default",
)
# create workspace
@@ -210,7 +216,7 @@ async def legacy_create_workspace(
await session.refresh(workspace, attribute_names=["organization"])
project_db = await legacy_create_project(
- project_name="Default Project",
+ project_name="Default",
organization_id=str(organization.id),
workspace_id=str(workspace.id),
session=session,
@@ -275,13 +281,13 @@ async def create_user(
session.add(user_db)
+ await session.commit()
+
log.info(
"[scopes] user created",
user_id=user_db.id,
)
- await session.commit()
-
response = Reference(id=user_db.id)
return response
@@ -289,27 +295,26 @@ async def create_user(
async def create_organization(
request: OrganizationRequest,
+ created_by_id: uuid.UUID,
) -> Reference:
async with engine.core_session() as session:
organization_db = OrganizationDB(
- # id=uuid7() # use default
- #
name=request.name,
description=request.description,
- #
- owner="", # move 'owner' from here to membership 'role'
- # type=... # remove 'type'
+ flags={"is_demo": False, "is_personal": False},
+ owner_id=created_by_id,
+ created_by_id=created_by_id,
)
session.add(organization_db)
+ await session.commit()
+
log.info(
"[scopes] organization created",
organization_id=organization_db.id,
)
- await session.commit()
-
response = Reference(id=organization_db.id)
return response
@@ -331,14 +336,14 @@ async def create_workspace(
session.add(workspace_db)
+ await session.commit()
+
log.info(
"[scopes] workspace created",
organization_id=workspace_db.organization_id,
workspace_id=workspace_db.id,
)
- await session.commit()
-
response = Reference(id=workspace_db.id)
return response
@@ -361,6 +366,8 @@ async def create_project(
session.add(project_db)
+ await session.commit()
+
log.info(
"[scopes] project created",
organization_id=project_db.organization_id,
@@ -368,8 +375,6 @@ async def create_project(
project_id=project_db.id,
)
- await session.commit()
-
response = Reference(id=project_db.id)
return response
diff --git a/api/oss/src/services/auth_service.py b/api/oss/src/services/auth_service.py
index fc44984ae0..88ffa87e02 100644
--- a/api/oss/src/services/auth_service.py
+++ b/api/oss/src/services/auth_service.py
@@ -93,7 +93,17 @@ async def authentication_middleware(request: Request, call_next):
"""
try:
- await _authenticate(request)
+ if "authorisationurl" in request.url.path:
+ log.info(
+ "[AUTH-ROUTE] authorisationurl path=%s root_path=%s raw_path=%s",
+ request.scope.get("path"),
+ request.scope.get("root_path"),
+ request.scope.get("raw_path"),
+ )
+
+ await _check_authentication_token(request)
+
+ await _check_organization_policy(request)
response = await call_next(request)
@@ -134,7 +144,7 @@ async def authentication_middleware(request: Request, call_next):
)
-async def _authenticate(request: Request):
+async def _check_authentication_token(request: Request):
try:
if request.url.path.startswith(_PUBLIC_ENDPOINTS):
return
@@ -155,6 +165,7 @@ async def _authenticate(request: Request):
access_token = auth_header[len(_ACCESS_TOKEN_PREFIX) :]
return await verify_access_token(
+ request=request,
access_token=access_token,
)
@@ -233,6 +244,7 @@ async def _authenticate(request: Request):
async def verify_access_token(
+ request: Request,
access_token: str,
):
try:
@@ -242,6 +254,8 @@ async def verify_access_token(
if access_token != _SECRET_KEY:
raise UnauthorizedException()
+ request.state.admin = True
+
return
except UnauthorizedException as exc:
@@ -450,8 +464,6 @@ async def verify_bearer_token(
organization_id = project.organization_id
elif not query_project_id and query_workspace_id:
- log.warning("[AUTH] Missing project_id in query params!")
-
workspace = await db_manager.get_workspace(
workspace_id=query_workspace_id,
)
@@ -474,8 +486,6 @@ async def verify_bearer_token(
organization_id = workspace.organization_id
else:
- log.warning("[AUTH] Missing project_id in query params!")
-
if is_ee():
workspace_id = await db_manager_ee.get_default_workspace_id(
user_id=user_id,
@@ -753,3 +763,98 @@ async def sign_secret_token(
except Exception as exc: # pylint: disable=bare-except
raise InternalServerErrorException() from exc
+
+
+async def _check_organization_policy(request: Request):
+ """
+ Check organization authentication policy for EE mode.
+
+ This is called after authentication to ensure the user's authentication method
+ is allowed by the organization's policy flags.
+
+ Skips policy checks for:
+ - Admin endpoints (using ACCESS_TOKEN)
+ - Invitation-related routes to allow users to accept invitations
+ """
+ if not is_ee():
+ return
+
+ if hasattr(request.state, "admin") and request.state.admin:
+ return
+
+ # Skip policy check for invitation routes
+ # Users must be able to accept invitations regardless of org auth policies
+ invitation_paths = [
+ "/invite/accept",
+ "/invite/resend",
+ "/invite",
+ ]
+
+ if any(path in request.url.path for path in invitation_paths):
+ return
+
+ # Skip policy checks for org-agnostic endpoints (no explicit org context).
+ # This prevents SSO logins from being blocked by the default org policy
+ # before the frontend can redirect to the intended SSO org.
+ if (
+ request.url.path in {"/api/profile", "/api/organizations"}
+ or request.url.path.startswith("/api/projects")
+ or request.url.path.startswith("/api/organizations/")
+ ):
+ # NOTE: These endpoints are hit during initial login bootstrap before the FE
+ # redirects to the intended org (e.g., SSO org). Enforcing org policy here
+ # can incorrectly fail against the default org and log the user out.
+ return
+
+ organization_id = (
+ request.state.organization_id
+ if hasattr(request.state, "organization_id")
+ else None
+ )
+ user_id = request.state.user_id if hasattr(request.state, "user_id") else None
+
+ if not organization_id or not user_id:
+ return
+
+ from uuid import UUID
+ from oss.src.core.auth.service import AuthService
+
+ # Get identities from session
+ try:
+ session = await get_session(request) # type: ignore
+ payload = session.get_access_token_payload() if session else {} # type: ignore
+ session_identities = payload.get("session_identities") or []
+ user_identities = payload.get("user_identities", [])
+ except Exception:
+ session_identities = []
+ user_identities = []
+ return # Skip policy check on session errors
+
+ auth_service = AuthService()
+ policy_error = await auth_service.check_organization_access(
+ UUID(user_id), UUID(organization_id), session_identities
+ )
+
+ if policy_error:
+ # Only enforce auth policy errors; skip membership errors (route handlers handle those)
+ error_code = policy_error.get("error")
+ if error_code in {
+ "AUTH_UPGRADE_REQUIRED",
+ "AUTH_SSO_DENIED",
+ "AUTH_DOMAIN_DENIED",
+ }:
+ detail = {
+ "error": policy_error.get("error"),
+ "message": policy_error.get(
+ "message",
+ "Authentication method not allowed for this organization",
+ ),
+ "required_methods": policy_error.get("required_methods", []),
+ "session_identities": session_identities,
+ "user_identities": user_identities,
+ "sso_providers": policy_error.get("sso_providers", []),
+ "current_domain": policy_error.get("current_domain"),
+ "allowed_domains": policy_error.get("allowed_domains", []),
+ }
+ raise HTTPException(status_code=403, detail=detail)
+ # If NOT_A_MEMBER, skip - let route handlers deal with it
diff --git a/api/oss/src/services/db_manager.py b/api/oss/src/services/db_manager.py
index 9c48adf279..48be7d88d0 100644
--- a/api/oss/src/services/db_manager.py
+++ b/api/oss/src/services/db_manager.py
@@ -19,6 +19,7 @@
from oss.src.models import converters
from oss.src.services import user_service
from oss.src.utils.common import is_ee
+from oss.src.utils.env import env
from oss.src.dbs.postgres.shared.engine import engine
from oss.src.services.json_importer_helper import get_json
from oss.src.utils.helpers import get_slug_from_name_and_id
@@ -1071,10 +1072,12 @@ async def check_if_user_exists_and_create_organization(user_email: str):
)
if user is None and (total_users == 0):
- organization_name = user_email.split("@")[0]
- organization_db = await create_organization(name=organization_name)
+ organization_db = await create_organization(
+ name="Organization",
+ )
workspace_db = await create_workspace(
- name=organization_name, organization_id=str(organization_db.id)
+ name="Default",
+ organization_id=str(organization_db.id),
)
# update default project with organization and workspace ids
@@ -1082,7 +1085,7 @@ async def check_if_user_exists_and_create_organization(user_email: str):
values_to_update={
"organization_id": organization_db.id,
"workspace_id": workspace_db.id,
- "project_name": organization_name,
+ "project_name": "Default",
}
)
return organization_db
@@ -1302,7 +1305,7 @@ async def _assign_user_to_organization_oss(
await get_organization_owner(organization_id=organization_id)
except (NoResultFound, ValueError):
await update_organization(
- organization_id=organization_id, values_to_update={"owner": str(user_db.id)}
+ organization_id=organization_id, values_to_update={"owner_id": user_db.id}
)
# Get project belonging to organization
@@ -1339,28 +1342,52 @@ async def get_default_workspace_id_oss() -> str:
return str(workspaces[0].id)
-async def create_organization(name: str):
+async def create_organization(
+ name: str,
+ owner_id: Optional[uuid.UUID] = None,
+ created_by_id: Optional[uuid.UUID] = None,
+):
"""Create a new organization in the database.
Args:
name (str): The name of the organization
+ owner_id (Optional[uuid.UUID]): The UUID of the organization owner
+ created_by_id (Optional[uuid.UUID]): The UUID of the user who created the organization
Returns:
OrganizationDB: instance of organization
"""
async with engine.core_session() as session:
- organization_db = OrganizationDB(name=name)
+ # For bootstrap scenario, use a placeholder UUID if not provided
+ _owner_id = owner_id or uuid.uuid4()
+ _created_by_id = created_by_id or _owner_id
+
+ organization_db = OrganizationDB(
+ name=name,
+ flags={
+ "is_demo": False,
+ "is_personal": False,
+ "allow_email": env.auth.email_enabled,
+ "allow_social": env.auth.oidc_enabled,
+ "allow_sso": False,
+ "allow_root": False,
+ "domains_only": False,
+ "auto_join": False,
+ },
+ owner_id=_owner_id,
+ created_by_id=_created_by_id,
+ )
session.add(organization_db)
+ await session.commit()
+
log.info(
"[scopes] organization created",
organization_id=organization_db.id,
)
- await session.commit()
-
return organization_db
@@ -1385,14 +1412,14 @@ async def create_workspace(name: str, organization_id: str):
session.add(workspace_db)
+ await session.commit()
+
log.info(
"[scopes] workspace created",
organization_id=organization_id,
workspace_id=workspace_db.id,
)
- await session.commit()
-
return workspace_db
@@ -1413,6 +1440,15 @@ async def update_organization(organization_id: str, values_to_update: Dict[str,
if organization is None:
raise Exception(f"Organization with ID {organization_id} not found")
+ # Validate slug immutability: once set, cannot be changed
+ if "slug" in values_to_update:
+ new_slug = values_to_update["slug"]
+ if organization.slug is not None and new_slug != organization.slug:
+ raise ValueError(
+ f"Organization slug cannot be changed once set. "
+ f"Current slug: '{organization.slug}'"
+ )
+
for key, value in values_to_update.items():
if hasattr(organization, key):
setattr(organization, key, value)
@@ -1433,7 +1469,7 @@ async def create_or_update_default_project(values_to_update: Dict[str, Any]):
project = result.scalar()
if project is None:
- project = ProjectDB(project_name="Default Project", is_default=True)
+ project = ProjectDB(project_name="Default", is_default=True)
session.add(project)
@@ -1478,6 +1514,25 @@ async def get_organization_by_id(organization_id: str) -> OrganizationDB:
return organization
+async def get_organization_by_slug(organization_slug: str) -> OrganizationDB:
+ """
+ Retrieve an organization from the database by its slug.
+
+ Args:
+ organization_slug (str): The slug of the organization
+
+ Returns:
+ OrganizationDB: The organization object if found, None otherwise.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDB).filter_by(slug=organization_slug)
+ )
+ organization = result.scalar()
+ return organization
+
+
async def get_organization_owner(organization_id: str):
"""
Retrieve the owner of an organization from the database by its ID.
@@ -1497,7 +1552,39 @@ async def get_organization_owner(organization_id: str):
if organization is None:
raise NoResultFound(f"Organization with ID {organization_id} not found")
- return await get_user_with_id(user_id=str(organization.owner))
+ return await get_user_with_id(user_id=str(organization.owner_id))
+
+
+async def get_user_organizations(user_id: str) -> List[OrganizationDB]:
+ """
+ Retrieve all organizations that a user is a member of.
+
+ Args:
+ user_id (str): The ID of the user
+
+ Returns:
+ List[OrganizationDB]: List of organizations the user belongs to
+ """
+ # Import OrganizationMemberDB conditionally (EE only)
+ if is_ee():
+ from ee.src.models.db_models import OrganizationMemberDB
+
+ async with engine.core_session() as session:
+ # Query organizations through organization_members table
+ result = await session.execute(
+ select(OrganizationDB)
+ .join(
+ OrganizationMemberDB,
+ OrganizationDB.id == OrganizationMemberDB.organization_id,
+ )
+ .filter(OrganizationMemberDB.user_id == uuid.UUID(user_id))
+ )
+ organizations = result.scalars().all()
+ return list(organizations)
+ else:
+ # OSS mode: return empty list or implement simplified logic
+ # In OSS, users might only have one default organization
+ return []
async def get_workspace(workspace_id: str) -> WorkspaceDB:
@@ -1623,6 +1710,23 @@ async def get_user_with_id(user_id: str) -> UserDB:
return user
+async def update_user_username(user_id: str, username: str) -> UserDB:
+ """Update a user's username."""
+
+ async with engine.core_session() as session:
+ result = await session.execute(select(UserDB).filter_by(id=uuid.UUID(user_id)))
+ user = result.scalars().first()
+ if user is None:
+ log.error("Failed to get user with id for username update")
+ raise NoResultFound(f"User with id {user_id} not found")
+
+ user.username = username
+ user.updated_at = datetime.now(timezone.utc)
+ await session.commit()
+ await session.refresh(user)
+ return user
+
+
async def get_user_with_email(email: str):
"""
Retrieves a user from the database based on their email address.
diff --git a/api/oss/src/services/email_service.py b/api/oss/src/services/email_service.py
index 650de559d9..1fa88996e2 100644
--- a/api/oss/src/services/email_service.py
+++ b/api/oss/src/services/email_service.py
@@ -16,7 +16,10 @@
log.info("✓ SendGrid enabled")
else:
sg = None
- log.warn("✗ SendGrid disabled")
+ if env.sendgrid.api_key and not env.sendgrid.from_address:
+ log.warn("✗ SendGrid disabled: missing sender email address")
+ else:
+ log.warn("✗ SendGrid disabled")
def read_email_template(template_file_path):
diff --git a/api/oss/src/services/evaluators_service.py b/api/oss/src/services/evaluators_service.py
index fc676b2b48..f3bddaf66c 100644
--- a/api/oss/src/services/evaluators_service.py
+++ b/api/oss/src/services/evaluators_service.py
@@ -1,33 +1,30 @@
-import re
import json
+import re
import traceback
-from typing import Any, Dict, Union, List, Optional
+from typing import Any, Dict, List, Optional, Union
-import litellm
import httpx
+import litellm
+from agenta.sdk.managers.secrets import SecretsManager
from fastapi import HTTPException
from openai import AsyncOpenAI
-
-# COMMENTED OUT: autoevals dependency removed
-# from autoevals.ragas import Faithfulness, ContextRelevancy
-
-from oss.src.utils.logging import get_module_logger
-from oss.src.services.security import sandbox
-from oss.src.models.shared_models import Error, Result
from oss.src.models.api.evaluation_model import (
EvaluatorInputInterface,
- EvaluatorOutputInterface,
EvaluatorMappingInputInterface,
EvaluatorMappingOutputInterface,
+ EvaluatorOutputInterface,
)
+from oss.src.models.shared_models import Error, Result
+
+# COMMENTED OUT: autoevals dependency removed
+# from autoevals.ragas import Faithfulness, ContextRelevancy
+from oss.src.utils.logging import get_module_logger
from oss.src.utils.traces import (
- remove_trace_prefix,
process_distributed_trace_into_trace_tree,
get_field_value_from_trace_tree,
)
from agenta.sdk.contexts.running import RunningContext
-from agenta.sdk.managers.secrets import SecretsManager
from agenta.sdk.models.workflows import (
WorkflowServiceRequest,
WorkflowServiceRequestData,
@@ -261,7 +258,7 @@ async def auto_exact_match(
message=str(e),
),
)
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -360,6 +357,139 @@ async def field_match_test(input: EvaluatorInputInterface) -> EvaluatorOutputInt
return {"outputs": {"success": result}}
+def get_nested_value(obj: Any, path: str) -> Any:
+ """
+ Get value from nested object using resolve_any() with graceful None on failure.
+
+ Supports multiple path formats:
+ - Dot notation: "user.address.city", "items.0.name"
+ - JSON Path: "$.user.address.city", "$.items[0].name"
+ - JSON Pointer: "/user/address/city", "/items/0/name"
+
+ Args:
+ obj: The object to traverse (dict or nested structure)
+ path: Path expression in any supported format
+
+ Returns:
+ The value at the specified path, or None if path doesn't exist or resolution fails
+ """
+ if obj is None:
+ return None
+
+ try:
+ return resolve_any(path, obj)
+ except (KeyError, IndexError, ValueError, TypeError, ImportError):
+ return None
+
+
+async def auto_json_multi_field_match(
+ inputs: Dict[str, Any], # pylint: disable=unused-argument
+ output: Union[str, Dict[str, Any]],
+ data_point: Dict[str, Any],
+ app_params: Dict[str, Any], # pylint: disable=unused-argument
+ settings_values: Dict[str, Any],
+ lm_providers_keys: Dict[str, Any], # pylint: disable=unused-argument
+) -> Result:
+ """
+ Evaluator that compares multiple configured fields in expected JSON against LLM output JSON.
+ Each configured field becomes a separate score in the output.
+
+ Returns a Result with:
+ - type="object" containing one score per configured field plus overall score
+ - Each field score is 1.0 (match) or 0.0 (no match)
+ - Overall 'score' is the average of all field scores
+ """
+ try:
+ output = validate_string_output("json_multi_field_match", output)
+ correct_answer = get_correct_answer(data_point, settings_values)
+ eval_inputs = {"ground_truth": correct_answer, "prediction": output}
+ response = await json_multi_field_match(
+ input=EvaluatorInputInterface(
+ **{"inputs": eval_inputs, "settings": settings_values}
+ )
+ )
+ return Result(type="object", value=response["outputs"])
+ except ValueError as e:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(
+ message=str(e),
+ ),
+ )
+ except Exception:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(
+ message="Error during JSON Multi-Field Match evaluation",
+ stacktrace=str(traceback.format_exc()),
+ ),
+ )
+
+
+async def json_multi_field_match(
+ input: EvaluatorInputInterface,
+) -> EvaluatorOutputInterface:
+ """
+ Compare configured fields in expected JSON against LLM output JSON.
+ Each configured field becomes a separate score in the output.
+
+ Args:
+ input: EvaluatorInputInterface with:
+ - inputs.prediction: JSON string from LLM output
+ - inputs.ground_truth: JSON string from test data column
+ - settings.fields: List of field paths (strings) e.g., ["name", "email", "user.address.city"]
+
+ Returns:
+ EvaluatorOutputInterface with one score per configured field plus overall score
+ """
+ fields = input.settings.get("fields", [])
+
+ if not fields:
+ raise ValueError("No fields configured for comparison")
+
+ # Parse both JSON objects
+ prediction = input.inputs.get("prediction", "")
+ ground_truth = input.inputs.get("ground_truth", "")
+
+ try:
+ if isinstance(ground_truth, str):
+ expected = json.loads(ground_truth)
+ else:
+ expected = ground_truth
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON in ground truth: {str(e)}")
+
+ try:
+ if isinstance(prediction, str):
+ actual = json.loads(prediction)
+ else:
+ actual = prediction
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON in prediction: {str(e)}")
+
+ results: Dict[str, Any] = {}
+ matches = 0
+
+ for field_path in fields:
+ # Support nested fields with dot notation
+ expected_val = get_nested_value(expected, field_path)
+ actual_val = get_nested_value(actual, field_path)
+
+ # Exact match comparison (v1 - always exact)
+ match = expected_val == actual_val
+
+ results[field_path] = 1.0 if match else 0.0
+ if match:
+ matches += 1
+
+ # Aggregate score is the percentage of matching fields
+ results["aggregate_score"] = matches / len(fields) if fields else 0.0
+
+ return {"outputs": results}
+
+
async def auto_webhook_test(
inputs: Dict[str, Any],
output: Union[str, Dict[str, Any]],
@@ -383,7 +513,7 @@ async def auto_webhook_test(
type="error",
value=None,
error=Error(
- message=f"[webhook evaluation] HTTP - {repr(e)}",
+ message=f"[webhook evaluator] HTTP - {repr(e)}",
stacktrace=traceback.format_exc(),
),
)
@@ -392,7 +522,7 @@ async def auto_webhook_test(
type="error",
value=None,
error=Error(
- message=f"[webhook evaluation] JSON - {repr(e)}",
+ message=f"[webhook evaluator] JSON - {repr(e)}",
stacktrace=traceback.format_exc(),
),
)
@@ -401,7 +531,7 @@ async def auto_webhook_test(
type="error",
value=None,
error=Error(
- message=f"[webhook evaluation] Exception - {repr(e)} ",
+ message=f"[webhook evaluator] Exception - {repr(e)} ",
stacktrace=traceback.format_exc(),
),
)
@@ -437,13 +567,13 @@ async def auto_custom_code_run(
"prediction": output,
"ground_truth": correct_answer,
}
- response = await custom_code_run(
+ response = await sdk_custom_code_run(
input=EvaluatorInputInterface(
**{"inputs": inputs, "settings": settings_values}
)
)
return Result(type="number", value=response["outputs"]["score"])
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -454,18 +584,6 @@ async def auto_custom_code_run(
)
-async def custom_code_run(input: EvaluatorInputInterface) -> EvaluatorOutputInterface:
- result = sandbox.execute_code_safely(
- app_params=input.inputs["app_config"],
- inputs=input.inputs,
- output=input.inputs["prediction"],
- correct_answer=input.inputs["ground_truth"],
- code=input.settings["code"],
- datapoint=input.inputs["ground_truth"],
- )
- return {"outputs": {"score": result}}
-
-
async def sdk_custom_code_run(
input: EvaluatorInputInterface,
) -> EvaluatorOutputInterface:
@@ -483,7 +601,7 @@ async def sdk_custom_code_run(
)
threshold = settings.get("threshold", 0.5)
- runtime = settings.get("runtime")
+ runtime = settings.get("runtime", "python")
workflow = sdk_auto_custom_code_run(
code=str(code),
@@ -504,6 +622,12 @@ async def sdk_custom_code_run(
)
response = await workflow.invoke(request=request)
+
+ # Check for error status and propagate it
+ if response.status and response.status.code and response.status.code >= 400:
+ error_message = response.status.message or "Custom code execution failed"
+ raise RuntimeError(error_message)
+
result = response.data.outputs if response.data else None
if isinstance(result, dict) and "score" in result:
@@ -560,7 +684,7 @@ async def auto_ai_critique(
)
)
return Result(type="number", value=response["outputs"]["score"])
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -571,9 +695,7 @@ async def auto_ai_critique(
)
-import json
-import re
-from typing import Any, Dict, Iterable, Tuple, Optional
+from typing import Any, Dict, Iterable, Tuple
try:
import jsonpath # ✅ use module API
@@ -841,7 +963,7 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac
if inputs and isinstance(inputs, dict) and correct_answer_key:
correct_answer = inputs[correct_answer_key]
- secrets = await SecretsManager.retrieve_secrets()
+ secrets, _, _ = await SecretsManager.retrieve_secrets()
openai_api_key = None # secrets.get("OPENAI_API_KEY")
anthropic_api_key = None # secrets.get("ANTHROPIC_API_KEY")
@@ -1025,7 +1147,7 @@ async def ai_critique(input: EvaluatorInputInterface) -> EvaluatorOutputInterfac
if inputs and isinstance(inputs, dict) and correct_answer_key:
correct_answer = inputs[correct_answer_key]
- secrets = await SecretsManager.retrieve_secrets()
+ secrets, _, _ = await SecretsManager.retrieve_secrets()
openai_api_key = None # secrets.get("OPENAI_API_KEY")
anthropic_api_key = None # secrets.get("ANTHROPIC_API_KEY")
@@ -1210,7 +1332,7 @@ async def auto_starts_with(
)
)
return Result(type="bool", value=response["outputs"]["success"])
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1252,7 +1374,7 @@ async def auto_ends_with(
)
result = Result(type="bool", value=response["outputs"]["success"])
return result
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1294,7 +1416,7 @@ async def auto_contains(
)
result = Result(type="bool", value=response["outputs"]["success"])
return result
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1336,7 +1458,7 @@ async def auto_contains_any(
)
result = Result(type="bool", value=response["outputs"]["success"])
return result
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1379,7 +1501,7 @@ async def auto_contains_all(
)
result = Result(type="bool", value=response["outputs"]["success"])
return result
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1427,7 +1549,7 @@ async def auto_contains_json(
input=EvaluatorInputInterface(**{"inputs": {"prediction": output}})
)
return Result(type="bool", value=response["outputs"]["success"])
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1445,7 +1567,7 @@ async def contains_json(input: EvaluatorInputInterface) -> EvaluatorOutputInterf
potential_json = str(input.inputs["prediction"])[start_index:end_index]
json.loads(potential_json)
contains_json = True
- except (ValueError, json.JSONDecodeError) as e:
+ except (ValueError, json.JSONDecodeError):
contains_json = False
return {"outputs": {"success": contains_json}}
@@ -1908,7 +2030,7 @@ async def auto_levenshtein_distance(
message=str(e),
),
)
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -1948,7 +2070,7 @@ async def auto_similarity_match(
message=str(e),
),
)
- except Exception as e: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
return Result(
type="error",
value=None,
@@ -2058,6 +2180,7 @@ async def auto_semantic_similarity(
"auto_exact_match": auto_exact_match,
"auto_regex_test": auto_regex_test,
"field_match_test": auto_field_match_test,
+ "json_multi_field_match": auto_json_multi_field_match,
"auto_webhook_test": auto_webhook_test,
"auto_custom_code_run": auto_custom_code_run,
"auto_ai_critique": auto_ai_critique,
@@ -2080,6 +2203,7 @@ async def auto_semantic_similarity(
"auto_exact_match": exact_match,
"auto_regex_test": regex_test,
"field_match_test": field_match_test,
+ "json_multi_field_match": json_multi_field_match,
"auto_webhook_test": webhook_test,
"auto_custom_code_run": sdk_custom_code_run,
"auto_ai_critique": ai_critique,
diff --git a/api/oss/src/services/organization_service.py b/api/oss/src/services/organization_service.py
index 53d0d35852..401e5e8072 100644
--- a/api/oss/src/services/organization_service.py
+++ b/api/oss/src/services/organization_service.py
@@ -119,7 +119,10 @@ async def send_invitation_email(
username_placeholder=user.username,
action_placeholder="invited you to join",
workspace_placeholder="their organization",
- call_to_action=f"""Click the link below to accept the invitation:
Accept Invitation""",
+ call_to_action=(
+ "Click the link below to accept the invitation:
"
+ f'Accept Invitation'
+ ),
)
if not env.sendgrid.from_address:
@@ -249,8 +252,13 @@ async def resend_user_organization_invite(
if existing_invitation:
invitation = existing_invitation
elif existing_role:
- # Create a new invitation
- invitation = await create_invitation("editor", project_id, payload.email)
+ # Create a new invitation with the previous role
+ invitation = await create_invitation(existing_role, project_id, payload.email)
+ else:
+ raise HTTPException(
+ status_code=404,
+ detail="No existing invitation found for the user",
+ )
# Get project by id
project_db = await db_manager.get_project_by_id(project_id=project_id)
diff --git a/api/oss/src/services/security/sandbox.py b/api/oss/src/services/security/sandbox.py
deleted file mode 100644
index 1102dac308..0000000000
--- a/api/oss/src/services/security/sandbox.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from typing import Union, Text, Dict, Any
-
-from RestrictedPython import safe_builtins, compile_restricted, utility_builtins
-from RestrictedPython.Eval import (
- default_guarded_getiter,
- default_guarded_getitem,
-)
-from RestrictedPython.Guards import (
- guarded_iter_unpack_sequence,
- full_write_guard,
-)
-
-
-def is_import_safe(python_code: Text) -> bool:
- """Checks if the imports in the python code contains a system-level import.
-
- Args:
- python_code (str): The Python code to be executed
-
- Returns:
- bool - module is secured or not
- """
-
- disallowed_imports = ["os", "subprocess", "threading", "multiprocessing"]
- for import_ in disallowed_imports:
- if import_ in python_code:
- return False
- return True
-
-
-def execute_code_safely(
- app_params: Dict[str, str],
- inputs: Dict[str, str],
- output: Union[str, Dict[str, Any]],
- correct_answer: str, # for backward compatibility reasons
- code: Text,
- datapoint: Dict[str, str],
-) -> Union[float, None]:
- """
- Execute the provided Python code safely using RestrictedPython.
-
- Args:
- - app_params (Dict[str, str]): The parameters of the app variant.
- - inputs (dict): Inputs to be used during code execution.
- - output (str): The output of the app variant after being called.
- - correct_answer (str): The correct answer (or target) of the app variant.
- - code (Text): The Python code to be executed.
- - datapoint (Dict[str, str]): The test datapoint.
-
- Returns:
- - (float): Result of the execution if successful. Should be between 0 and 1.
- - None if execution fails or result is not a float between 0 and 1.
- """
- # Define the available built-ins
- local_builtins = safe_builtins.copy()
-
- # Add the __import__ built-in function to the local builtins
- local_builtins["__import__"] = __import__
-
- # Define supported packages
- allowed_imports = [
- "math",
- "random",
- "datetime",
- "json",
- "httpx",
- "typing",
- ]
-
- # Create a dictionary to simulate allowed imports
- allowed_modules = {}
- for package_name in allowed_imports:
- allowed_modules[package_name] = __import__(package_name)
-
- # Add the allowed modules to the local built-ins
- local_builtins.update(allowed_modules)
- local_builtins.update(utility_builtins)
-
- # Define the environment for the code execution
- environment = {
- "_getiter_": default_guarded_getiter,
- "_getitem_": default_guarded_getitem,
- "_iter_unpack_sequence_": guarded_iter_unpack_sequence,
- "_write_": full_write_guard,
- "__builtins__": local_builtins,
- }
-
- # Compile the code in a restricted environment
- byte_code = compile_restricted(code, filename="", mode="exec")
-
- # Call the evaluation function, extract the result if it exists
- # and is a float between 0 and 1
- try:
- # Execute the code
- exec(byte_code, environment)
-
- # Call the evaluation function, extract the result
- result = environment["evaluate"](app_params, inputs, output, correct_answer)
-
- # Attempt to convert result to float
- if isinstance(result, (float, int, str)):
- try:
- result = float(result)
- except ValueError as e:
- raise ValueError(f"Result cannot be converted to float: {e}")
-
- if not isinstance(result, float):
- raise TypeError(f"Result is not a float after conversion: {type(result)}")
-
- return result
-
- except KeyError as e:
- raise KeyError(f"Missing expected key in environment: {e}")
-
- except SyntaxError as e:
- raise SyntaxError(f"Syntax error in provided code: {e}")
-
- except Exception as e:
- raise RuntimeError(f"Error during code execution: {e}")
diff --git a/api/oss/src/services/user_service.py b/api/oss/src/services/user_service.py
index e05662da68..d254510e72 100644
--- a/api/oss/src/services/user_service.py
+++ b/api/oss/src/services/user_service.py
@@ -1,5 +1,5 @@
from sqlalchemy.future import select
-from sqlalchemy.exc import NoResultFound
+from sqlalchemy.exc import NoResultFound, IntegrityError
from supertokens_python.recipe.emailpassword.asyncio import create_reset_password_link
from oss.src.utils.env import env
@@ -12,32 +12,87 @@
log = get_module_logger(__name__)
-async def create_new_user(payload: dict) -> UserDB:
+async def check_user_exists(email: str) -> bool:
"""
- This function creates a new user.
+ Check if a user with the given email already exists.
Args:
- payload (dict): The payload data to create the user.
+ email (str): The email to check.
Returns:
- UserDB: The created user object.
+ bool: True if user exists, False otherwise.
"""
+ user = await db_manager.get_user_with_email(email)
+ return user is not None
- async with engine.core_session() as session:
- user = UserDB(**payload)
- session.add(user)
+async def delete_user(user_id: str) -> None:
+ """
+ Delete a user by their ID.
- log.info(
- "[scopes] user created",
- user_id=user.id,
- )
+ Args:
+ user_id (str): The ID of the user to delete.
+ Raises:
+ NoResultFound: If user with the given ID is not found.
+ """
+ async with engine.core_session() as session:
+ result = await session.execute(select(UserDB).filter_by(id=user_id))
+ user = result.scalars().first()
+
+ if not user:
+ raise NoResultFound(f"User with id {user_id} not found.")
+
+ await session.delete(user)
await session.commit()
- await session.refresh(user)
- return user
+async def create_new_user(payload: dict) -> UserDB:
+ """
+ Create a new user or return existing user if already exists (idempotent).
+
+ This function is safe to call multiple times in parallel with the same email.
+ It implements check-before-create with error fallback to handle race conditions.
+
+ Args:
+ payload (dict): The payload data to create the user (must include 'email').
+
+ Returns:
+ UserDB: The created or existing user object.
+ """
+
+ # Check if user already exists (happy path optimization)
+ existing_user = await db_manager.get_user_with_email(payload["email"])
+ if existing_user:
+ return existing_user
+
+ # Attempt to create new user
+ try:
+ async with engine.core_session() as session:
+ user = UserDB(**payload)
+
+ session.add(user)
+
+ await session.commit()
+
+ await session.refresh(user)
+
+ log.info(
+ "[scopes] user created",
+ user_id=user.id,
+ )
+
+ return user
+
+ except IntegrityError:
+ # Race condition: another request created user between check and create
+ # Fetch and return the existing user
+ existing_user = await db_manager.get_user_with_email(payload["email"])
+ if existing_user:
+ return existing_user
+ else:
+ # Should never happen, but re-raise if user still doesn't exist
+ raise
async def update_user(user_uid: str, payload: UserUpdate) -> UserDB:
diff --git a/api/oss/src/utils/caching.py b/api/oss/src/utils/caching.py
index b87b4a0ffe..543888e683 100644
--- a/api/oss/src/utils/caching.py
+++ b/api/oss/src/utils/caching.py
@@ -12,6 +12,7 @@
log = get_module_logger(__name__)
+AGENTA_LOCK_TTL = 15 # 5 seconds
AGENTA_CACHE_TTL = 5 * 60 # 5 minutes
AGENTA_CACHE_LOCAL_TTL = 60 # 60 seconds for local in-memory cache (Layer 1)
@@ -315,6 +316,10 @@ async def set_cache(
value: Optional[Any] = None,
ttl: Optional[int] = AGENTA_CACHE_TTL,
) -> Optional[bool]:
+ # Noop if caching is disabled
+ if not env.redis.cache_enabled:
+ return None
+
try:
cache_name = _pack(
namespace=namespace,
@@ -386,6 +391,10 @@ async def get_cache(
jitter: Optional[float] = AGENTA_CACHE_JITTER_SPREAD,
leakage: Optional[float] = AGENTA_CACHE_LEAKAGE_PROBABILITY,
) -> Optional[Any]:
+ # Noop if caching is disabled - always return cache miss
+ if not env.redis.cache_enabled:
+ return None
+
try:
cache_name = _pack(
namespace=namespace,
@@ -441,6 +450,10 @@ async def invalidate_cache(
project_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Optional[bool]:
+ # Noop if caching is disabled
+ if not env.redis.cache_enabled:
+ return None
+
try:
cache_name = None
@@ -535,3 +548,131 @@ async def invalidate_cache(
log.warn(e)
return None
+
+
+async def acquire_lock(
+ namespace: str,
+ key: Optional[Union[str, dict]] = None,
+ project_id: Optional[str] = None,
+ user_id: Optional[str] = None,
+ ttl: int = AGENTA_LOCK_TTL,
+) -> Optional[str]:
+ """Acquire a distributed lock using Redis SET NX (atomic check-and-set).
+
+ This prevents race conditions in distributed systems by ensuring only one
+ process can acquire the lock at a time.
+
+ Args:
+ namespace: Lock namespace (e.g., "account-creation", "task-processing")
+ key: Unique identifier for the lock (e.g., email, user_id, task_id)
+ project_id: Optional project scope
+ user_id: Optional user scope
+ ttl: Lock expiration time in seconds (default: 10). Auto-releases after TTL.
+
+ Returns:
+ Lock key string if lock was acquired, None if lock is already held by another process.
+
+ Example:
+ lock_key = await acquire_lock(namespace="account-creation", key=email, ttl=10)
+ if not lock_key:
+ # Another process has the lock
+ return
+
+ try:
+ # Do work while holding the lock
+ await create_account(email)
+ finally:
+ # Always release the lock
+ await release_lock(lock_key)
+ """
+ try:
+ lock_key = _pack(
+ namespace=f"lock:{namespace}",
+ key=key,
+ project_id=project_id,
+ user_id=user_id,
+ )
+
+ # Atomic SET NX: Returns True if lock acquired, False if already held
+ acquired = await r.set(lock_key, "1", nx=True, ex=ttl)
+
+ if acquired:
+ if CACHE_DEBUG:
+ log.debug(
+ "[lock] ACQUIRED",
+ key=lock_key,
+ ttl=ttl,
+ )
+ return lock_key
+ else:
+ if CACHE_DEBUG:
+ log.debug(
+ "[lock] BLOCKED",
+ key=lock_key,
+ )
+ return None
+
+ except Exception as e:
+ log.error(
+ f"[lock] ACQUIRE ERROR: namespace={namespace} key={key} error={e}",
+ exc_info=True,
+ )
+ return None
+
+
+async def release_lock(
+ namespace: str,
+ key: Optional[Union[str, dict]] = None,
+ project_id: Optional[str] = None,
+ user_id: Optional[str] = None,
+) -> bool:
+ """Release a distributed lock acquired with acquire_lock().
+
+ Args:
+ namespace: Lock namespace (same as used in acquire_lock)
+ key: Lock key (same as used in acquire_lock)
+ project_id: Optional project ID (same as used in acquire_lock)
+ user_id: Optional user ID (same as used in acquire_lock)
+
+ Returns:
+ True if lock was released, False if already expired or on error
+
+ Example:
+ lock_acquired = await acquire_lock(namespace="account-creation", key=email)
+ if lock_acquired:
+ try:
+ # ... critical section ...
+ finally:
+ await release_lock(namespace="account-creation", key=email)
+ """
+ try:
+ lock_key = _pack(
+ namespace=f"lock:{namespace}",
+ key=key,
+ project_id=project_id,
+ user_id=user_id,
+ )
+
+ deleted = await r.delete(lock_key)
+
+ if deleted:
+ if CACHE_DEBUG:
+ log.debug(
+ "[lock] RELEASED",
+ key=lock_key,
+ )
+ return True
+ else:
+ if CACHE_DEBUG:
+ log.debug(
+ "[lock] ALREADY EXPIRED",
+ key=lock_key,
+ )
+ return False
+
+ except Exception as e:
+ log.error(
+ f"[lock] RELEASE ERROR: namespace={namespace} key={key} error={e}",
+ exc_info=True,
+ )
+ return False
diff --git a/api/oss/src/utils/env.py b/api/oss/src/utils/env.py
index d7e6fba7b7..7298f5a000 100644
--- a/api/oss/src/utils/env.py
+++ b/api/oss/src/utils/env.py
@@ -43,27 +43,92 @@ def validate_config(self) -> None:
class AuthConfig(BaseModel):
"""Authentication configuration - auto-detects enabled methods from env vars"""
- authn_email: str | None = os.getenv("AGENTA_AUTHN_EMAIL")
+ supertokens_email_disabled: bool = (
+ os.getenv("SUPERTOKENS_EMAIL_DISABLED") or "false"
+ ).lower() in _TRUTHY
google_oauth_client_id: str | None = os.getenv("GOOGLE_OAUTH_CLIENT_ID")
google_oauth_client_secret: str | None = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
+ google_workspaces_oauth_client_id: str | None = os.getenv(
+ "GOOGLE_WORKSPACES_OAUTH_CLIENT_ID"
+ )
+ google_workspaces_oauth_client_secret: str | None = os.getenv(
+ "GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET"
+ )
+ google_workspaces_hd: str | None = os.getenv("GOOGLE_WORKSPACES_HD")
+
github_oauth_client_id: str | None = os.getenv("GITHUB_OAUTH_CLIENT_ID")
github_oauth_client_secret: str | None = os.getenv("GITHUB_OAUTH_CLIENT_SECRET")
+ facebook_oauth_client_id: str | None = os.getenv("FACEBOOK_OAUTH_CLIENT_ID")
+ facebook_oauth_client_secret: str | None = os.getenv("FACEBOOK_OAUTH_CLIENT_SECRET")
+
+ apple_oauth_client_id: str | None = os.getenv("APPLE_OAUTH_CLIENT_ID")
+ apple_oauth_client_secret: str | None = os.getenv("APPLE_OAUTH_CLIENT_SECRET")
+ apple_key_id: str | None = os.getenv("APPLE_KEY_ID")
+ apple_team_id: str | None = os.getenv("APPLE_TEAM_ID")
+ apple_private_key: str | None = os.getenv("APPLE_PRIVATE_KEY")
+
+ discord_oauth_client_id: str | None = os.getenv("DISCORD_OAUTH_CLIENT_ID")
+ discord_oauth_client_secret: str | None = os.getenv("DISCORD_OAUTH_CLIENT_SECRET")
+
+ twitter_oauth_client_id: str | None = os.getenv("TWITTER_OAUTH_CLIENT_ID")
+ twitter_oauth_client_secret: str | None = os.getenv("TWITTER_OAUTH_CLIENT_SECRET")
+
+ gitlab_oauth_client_id: str | None = os.getenv("GITLAB_OAUTH_CLIENT_ID")
+ gitlab_oauth_client_secret: str | None = os.getenv("GITLAB_OAUTH_CLIENT_SECRET")
+ gitlab_base_url: str | None = os.getenv("GITLAB_BASE_URL")
+
+ bitbucket_oauth_client_id: str | None = os.getenv("BITBUCKET_OAUTH_CLIENT_ID")
+ bitbucket_oauth_client_secret: str | None = os.getenv(
+ "BITBUCKET_OAUTH_CLIENT_SECRET"
+ )
+
+ linkedin_oauth_client_id: str | None = os.getenv("LINKEDIN_OAUTH_CLIENT_ID")
+ linkedin_oauth_client_secret: str | None = os.getenv("LINKEDIN_OAUTH_CLIENT_SECRET")
+
+ okta_oauth_client_id: str | None = os.getenv("OKTA_OAUTH_CLIENT_ID")
+ okta_oauth_client_secret: str | None = os.getenv("OKTA_OAUTH_CLIENT_SECRET")
+ okta_domain: str | None = os.getenv("OKTA_DOMAIN")
+
+ azure_ad_oauth_client_id: str | None = os.getenv(
+ "AZURE_AD_OAUTH_CLIENT_ID"
+ ) or os.getenv("ACTIVE_DIRECTORY_OAUTH_CLIENT_ID")
+ azure_ad_oauth_client_secret: str | None = os.getenv(
+ "AZURE_AD_OAUTH_CLIENT_SECRET"
+ ) or os.getenv("ACTIVE_DIRECTORY_OAUTH_CLIENT_SECRET")
+ azure_ad_directory_id: str | None = os.getenv("AZURE_AD_DIRECTORY_ID") or os.getenv(
+ "ACTIVE_DIRECTORY_DIRECTORY_ID"
+ )
+
+ boxy_saml_oauth_client_id: str | None = os.getenv("BOXY_SAML_OAUTH_CLIENT_ID")
+ boxy_saml_oauth_client_secret: str | None = os.getenv(
+ "BOXY_SAML_OAUTH_CLIENT_SECRET"
+ )
+ boxy_saml_url: str | None = os.getenv("BOXY_SAML_URL")
+
model_config = ConfigDict(extra="ignore")
def model_post_init(self, _):
- """Ensure at least one auth method is enabled; fallback to password email."""
- if not self.authn_email and not self.oidc_enabled:
- self.authn_email = "password"
+ """Keep config normalized without relying on deprecated AGENTA_AUTHN_EMAIL."""
+ return
@property
def email_method(self) -> str:
"""Returns email auth method: 'password', 'otp', or '' (disabled)"""
- if self.authn_email in ("password", "otp"):
- return self.authn_email
- return ""
+ if self.supertokens_email_disabled:
+ return ""
+
+ sendgrid_enabled = bool(
+ os.getenv("SENDGRID_API_KEY")
+ and (
+ os.getenv("SENDGRID_FROM_ADDRESS")
+ or os.getenv("AGENTA_AUTHN_EMAIL_FROM")
+ or os.getenv("AGENTA_SEND_EMAIL_FROM_ADDRESS")
+ )
+ )
+ return "otp" if sendgrid_enabled else "password"
@property
def email_enabled(self) -> bool:
@@ -75,15 +140,107 @@ def google_enabled(self) -> bool:
"""Google OAuth enabled if both credentials present"""
return bool(self.google_oauth_client_id and self.google_oauth_client_secret)
+ @property
+ def google_workspaces_enabled(self) -> bool:
+ """Google Workspaces OAuth enabled if both credentials present"""
+ return bool(
+ self.google_workspaces_oauth_client_id
+ and self.google_workspaces_oauth_client_secret
+ )
+
@property
def github_enabled(self) -> bool:
"""GitHub OAuth enabled if both credentials present"""
return bool(self.github_oauth_client_id and self.github_oauth_client_secret)
+ @property
+ def facebook_enabled(self) -> bool:
+ """Facebook OAuth enabled if both credentials present"""
+ return bool(self.facebook_oauth_client_id and self.facebook_oauth_client_secret)
+
+ @property
+ def apple_enabled(self) -> bool:
+ """Apple OAuth enabled if client ID present and secret or key data provided"""
+ return bool(
+ self.apple_oauth_client_id
+ and (
+ self.apple_oauth_client_secret
+ or (self.apple_key_id and self.apple_team_id and self.apple_private_key)
+ )
+ )
+
+ @property
+ def discord_enabled(self) -> bool:
+ """Discord OAuth enabled if both credentials present"""
+ return bool(self.discord_oauth_client_id and self.discord_oauth_client_secret)
+
+ @property
+ def twitter_enabled(self) -> bool:
+ """Twitter OAuth enabled if both credentials present"""
+ return bool(self.twitter_oauth_client_id and self.twitter_oauth_client_secret)
+
+ @property
+ def gitlab_enabled(self) -> bool:
+ """GitLab OAuth enabled if both credentials present"""
+ return bool(self.gitlab_oauth_client_id and self.gitlab_oauth_client_secret)
+
+ @property
+ def bitbucket_enabled(self) -> bool:
+ """Bitbucket OAuth enabled if both credentials present"""
+ return bool(
+ self.bitbucket_oauth_client_id and self.bitbucket_oauth_client_secret
+ )
+
+ @property
+ def linkedin_enabled(self) -> bool:
+ """LinkedIn OAuth enabled if both credentials present"""
+ return bool(self.linkedin_oauth_client_id and self.linkedin_oauth_client_secret)
+
+ @property
+ def okta_enabled(self) -> bool:
+ """Okta OAuth enabled if credentials and domain are present"""
+ return bool(
+ self.okta_oauth_client_id
+ and self.okta_oauth_client_secret
+ and self.okta_domain
+ )
+
+ @property
+ def azure_ad_enabled(self) -> bool:
+ """Azure AD OAuth enabled if credentials and directory ID are present"""
+ return bool(
+ self.azure_ad_oauth_client_id
+ and self.azure_ad_oauth_client_secret
+ and self.azure_ad_directory_id
+ )
+
+ @property
+ def boxy_saml_enabled(self) -> bool:
+ """BoxySAML OAuth enabled if credentials and Boxy URL are present"""
+ return bool(
+ self.boxy_saml_oauth_client_id
+ and self.boxy_saml_oauth_client_secret
+ and self.boxy_saml_url
+ )
+
@property
def oidc_enabled(self) -> bool:
"""Any OIDC provider enabled"""
- return self.google_enabled or self.github_enabled
+ return (
+ self.google_enabled
+ or self.google_workspaces_enabled
+ or self.github_enabled
+ or self.facebook_enabled
+ or self.apple_enabled
+ or self.discord_enabled
+ or self.twitter_enabled
+ or self.gitlab_enabled
+ or self.bitbucket_enabled
+ or self.linkedin_enabled
+ or self.okta_enabled
+ or self.azure_ad_enabled
+ or self.boxy_saml_enabled
+ )
@property
def any_enabled(self) -> bool:
@@ -96,17 +253,24 @@ def validate_config(self) -> None:
if not self.any_enabled:
raise ValueError(
"At least one authentication method must be configured:\n"
- " - AGENTA_AUTHN_EMAIL=password or AGENTA_AUTHN_EMAIL=otp\n"
- " - GOOGLE_OAUTH_CLIENT_ID + GOOGLE_OAUTH_CLIENT_SECRET\n"
- " - GITHUB_OAUTH_CLIENT_ID + GITHUB_OAUTH_CLIENT_SECRET\n"
+ " - SUPERTOKENS_EMAIL_DISABLED must be false (or unset) for email auth\n"
+ " - Any supported OAuth provider credentials, e.g.\n"
+ " GOOGLE_OAUTH_CLIENT_ID + GOOGLE_OAUTH_CLIENT_SECRET\n"
+ " GITHUB_OAUTH_CLIENT_ID + GITHUB_OAUTH_CLIENT_SECRET\n"
+ " FACEBOOK_OAUTH_CLIENT_ID + FACEBOOK_OAUTH_CLIENT_SECRET\n"
+ " APPLE_OAUTH_CLIENT_ID + APPLE_OAUTH_CLIENT_SECRET (or APPLE_KEY_ID/APPLE_TEAM_ID/APPLE_PRIVATE_KEY)\n"
+ " DISCORD_OAUTH_CLIENT_ID + DISCORD_OAUTH_CLIENT_SECRET\n"
+ " TWITTER_OAUTH_CLIENT_ID + TWITTER_OAUTH_CLIENT_SECRET\n"
+ " GITLAB_OAUTH_CLIENT_ID + GITLAB_OAUTH_CLIENT_SECRET\n"
+ " BITBUCKET_OAUTH_CLIENT_ID + BITBUCKET_OAUTH_CLIENT_SECRET\n"
+ " LINKEDIN_OAUTH_CLIENT_ID + LINKEDIN_OAUTH_CLIENT_SECRET\n"
+ " OKTA_OAUTH_CLIENT_ID + OKTA_OAUTH_CLIENT_SECRET + OKTA_DOMAIN\n"
+ " AZURE_AD_OAUTH_CLIENT_ID + AZURE_AD_OAUTH_CLIENT_SECRET + AZURE_AD_DIRECTORY_ID\n"
+ " BOXY_SAML_OAUTH_CLIENT_ID + BOXY_SAML_OAUTH_CLIENT_SECRET + BOXY_SAML_URL\n"
+ " GOOGLE_WORKSPACES_OAUTH_CLIENT_ID + GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET\n"
)
- # Email auth value must be valid
- if self.authn_email and self.authn_email not in ("password", "otp"):
- raise ValueError(
- f"Invalid AGENTA_AUTHN_EMAIL value: '{self.authn_email}'. "
- "Must be 'password', 'otp', or empty (disabled)."
- )
+ return
class PostHogConfig(BaseModel):
@@ -117,7 +281,10 @@ class PostHogConfig(BaseModel):
or os.getenv("POSTHOG_HOST")
or "https://alef.agenta.ai"
)
- api_key: str | None = os.getenv("POSTHOG_API_KEY")
+ api_key: str | None = (
+ os.getenv("POSTHOG_API_KEY")
+ or "phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7"
+ )
model_config = ConfigDict(extra="ignore")
@@ -165,6 +332,8 @@ class SendgridConfig(BaseModel):
from_address: str | None = (
os.getenv("SENDGRID_FROM_ADDRESS")
#
+ or os.getenv("AGENTA_AUTHN_EMAIL_FROM")
+ #
or os.getenv("AGENTA_SEND_EMAIL_FROM_ADDRESS")
)
@@ -172,8 +341,8 @@ class SendgridConfig(BaseModel):
@property
def enabled(self) -> bool:
- """SendGrid enabled if API key present"""
- return bool(self.api_key)
+ """SendGrid enabled only if API key and from address are present"""
+ return bool(self.api_key and self.from_address)
class CrispConfig(BaseModel):
@@ -315,6 +484,12 @@ class RedisConfig(BaseModel):
or "redis://redis-durable:6381/0"
)
+ # Cache control flag - defaults to true
+ cache_enabled: bool = os.getenv("AGENTA_CACHE_ENABLED", "true").lower() in (
+ "true",
+ "1",
+ )
+
model_config = ConfigDict(extra="ignore")
@property
@@ -328,17 +503,19 @@ class AgentaConfig(BaseModel):
license: str = _LICENSE
- api_url: str = os.getenv("AGENTA_API_URL") or "http://localhost/api"
web_url: str = os.getenv("AGENTA_WEB_URL") or "http://localhost"
services_url: str = os.getenv("AGENTA_SERVICES_URL") or "http://localhost/services"
+ api_url: str = os.getenv("AGENTA_API_URL") or "http://localhost/api"
- auth_key: str = os.getenv("AGENTA_AUTH_KEY") or ""
- crypt_key: str = os.getenv("AGENTA_CRYPT_KEY") or ""
+ auth_key: str = os.getenv("AGENTA_AUTH_KEY") or "replace-me"
+ crypt_key: str = os.getenv("AGENTA_CRYPT_KEY") or "replace-me"
runtime_prefix: str = os.getenv("AGENTA_RUNTIME_PREFIX") or ""
auto_migrations: bool = (
- os.getenv("AGENTA_AUTO_MIGRATIONS") or "true"
+ os.getenv("ALEMBIC_AUTO_MIGRATIONS")
+ or os.getenv("AGENTA_AUTO_MIGRATIONS")
+ or "true"
).lower() in _TRUTHY
demos: str = os.getenv("AGENTA_DEMOS") or ""
@@ -375,22 +552,9 @@ class PostgresConfig(BaseModel):
f"postgresql://username:password@postgres:5432/agenta_{_LICENSE}_supertokens"
)
- username: str = (
- os.getenv("POSTGRES_USERNAME")
- #
- or os.getenv("POSTGRES_USER")
- or "username"
- )
+ username: str = os.getenv("POSTGRES_USER") or "username"
password: str = os.getenv("POSTGRES_PASSWORD") or "password"
- username_admin: str = (
- os.getenv("POSTGRES_USERNAME_ADMIN")
- #
- or os.getenv("POSTGRES_USER_ADMIN")
- or "username"
- )
- password_admin: str = os.getenv("POSTGRES_PASSWORD_ADMIN") or "password"
-
model_config = ConfigDict(extra="ignore")
diff --git a/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py b/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py
index 313ba8bd6a..ff2bb51216 100644
--- a/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py
+++ b/api/oss/tests/legacy/old_tests/variants_main_router/conftest.py
@@ -65,11 +65,11 @@ async def get_second_user_object():
async def get_or_create_project_from_db():
async with engine.core_session() as session:
result = await session.execute(
- select(ProjectDB).filter_by(project_name="Default Project", is_default=True)
+ select(ProjectDB).filter_by(project_name="Default", is_default=True)
)
project = result.scalars().first()
if project is None:
- create_project = ProjectDB(project_name="Default Project", is_default=True)
+ create_project = ProjectDB(project_name="Default", is_default=True)
session.add(create_project)
await session.commit()
await session.refresh(create_project)
diff --git a/api/oss/tests/manual/annotations/crud.http b/api/oss/tests/manual/annotations/crud.http
index daf6295322..20fa119c6f 100644
--- a/api/oss/tests/manual/annotations/crud.http
+++ b/api/oss/tests/manual/annotations/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/annotations
###
diff --git a/api/oss/tests/manual/auth/admin.http b/api/oss/tests/manual/auth/admin.http
index e31140d3b5..bac6786f20 100644
--- a/api/oss/tests/manual/auth/admin.http
+++ b/api/oss/tests/manual/auth/admin.http
@@ -1,6 +1,6 @@
@host = http://localhost
@base_url = {{host}}/api
-@access_token = change-me
+@access_token = replace-me
###
diff --git a/api/oss/tests/manual/evaluations/crud.http b/api/oss/tests/manual/evaluations/crud.http
index aa0ef43d36..a2687e9fd2 100644
--- a/api/oss/tests/manual/evaluations/crud.http
+++ b/api/oss/tests/manual/evaluations/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/evaluations
###
diff --git a/api/oss/tests/manual/evaluations/live.http b/api/oss/tests/manual/evaluations/live.http
index 6a43280046..7d28c9d76a 100644
--- a/api/oss/tests/manual/evaluations/live.http
+++ b/api/oss/tests/manual/evaluations/live.http
@@ -1,4 +1,4 @@
-@auth_key = {{$dotenv.AGENTA_AUTH_KEY}} || change-me
+@auth_key = {{$dotenv.AGENTA_AUTH_KEY}} || replace-me
@api_url = {{$dotenv AGENTA_API_URL}}
@api_key = {{$dotenv AGENTA_API_KEY}}
diff --git a/api/oss/tests/manual/evaluators/crud.http b/api/oss/tests/manual/evaluators/crud.http
index 21b60c97b5..9efda040c4 100644
--- a/api/oss/tests/manual/evaluators/crud.http
+++ b/api/oss/tests/manual/evaluators/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/evaluators
###
diff --git a/api/oss/tests/manual/folders/crud.http b/api/oss/tests/manual/folders/crud.http
index e443744f66..4c3d6486d3 100644
--- a/api/oss/tests/manual/folders/crud.http
+++ b/api/oss/tests/manual/folders/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me-auth
+@token = replace-me
@api_url = {{host}}/api
@folders_url = {{api_url}}/folders
@apps_url = {{api_url}}/apps
diff --git a/api/oss/tests/manual/testsets/crud.http b/api/oss/tests/manual/testsets/crud.http
index 72f3149fcc..1b2d197c91 100644
--- a/api/oss/tests/manual/testsets/crud.http
+++ b/api/oss/tests/manual/testsets/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/testsets
###
diff --git a/api/oss/tests/manual/testsets/testcase-inclusion.http b/api/oss/tests/manual/testsets/testcase-inclusion.http
index a8db7ac89f..9cba2e66c4 100644
--- a/api/oss/tests/manual/testsets/testcase-inclusion.http
+++ b/api/oss/tests/manual/testsets/testcase-inclusion.http
@@ -26,7 +26,7 @@
# ============================================================================
@host = http://localhost
-@token = change-me-auth
+@token = replace-me
@base_url = {{host}}/api/preview/testsets
@simple_base_url = {{host}}/api/preview/simple/testsets
diff --git a/api/oss/tests/manual/tracing/crud.http b/api/oss/tests/manual/tracing/crud.http
index e5580efa47..8d532b9541 100644
--- a/api/oss/tests/manual/tracing/crud.http
+++ b/api/oss/tests/manual/tracing/crud.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/00_user_id.http b/api/oss/tests/manual/tracing/filtering/00_user_id.http
index 6a39dbd828..c3dc743a6c 100644
--- a/api/oss/tests/manual/tracing/filtering/00_user_id.http
+++ b/api/oss/tests/manual/tracing/filtering/00_user_id.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/01_trace_id.http b/api/oss/tests/manual/tracing/filtering/01_trace_id.http
index 3f86d6575c..906cd2d59f 100644
--- a/api/oss/tests/manual/tracing/filtering/01_trace_id.http
+++ b/api/oss/tests/manual/tracing/filtering/01_trace_id.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/02_span_id.http b/api/oss/tests/manual/tracing/filtering/02_span_id.http
index 33973dba57..26ce14bc5c 100644
--- a/api/oss/tests/manual/tracing/filtering/02_span_id.http
+++ b/api/oss/tests/manual/tracing/filtering/02_span_id.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/03_parent_id.http b/api/oss/tests/manual/tracing/filtering/03_parent_id.http
index 5fa8782df0..29d8907115 100644
--- a/api/oss/tests/manual/tracing/filtering/03_parent_id.http
+++ b/api/oss/tests/manual/tracing/filtering/03_parent_id.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/04_span_kind.http b/api/oss/tests/manual/tracing/filtering/04_span_kind.http
index c4a757d7b5..6e406201d7 100644
--- a/api/oss/tests/manual/tracing/filtering/04_span_kind.http
+++ b/api/oss/tests/manual/tracing/filtering/04_span_kind.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/05_span_name.http b/api/oss/tests/manual/tracing/filtering/05_span_name.http
index 894c42e445..27c71516a3 100644
--- a/api/oss/tests/manual/tracing/filtering/05_span_name.http
+++ b/api/oss/tests/manual/tracing/filtering/05_span_name.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/06_start_time.http b/api/oss/tests/manual/tracing/filtering/06_start_time.http
index 1f20b6bc36..a399834fd4 100644
--- a/api/oss/tests/manual/tracing/filtering/06_start_time.http
+++ b/api/oss/tests/manual/tracing/filtering/06_start_time.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/07_end_time.http b/api/oss/tests/manual/tracing/filtering/07_end_time.http
index f59f4baeb1..c061382cd9 100644
--- a/api/oss/tests/manual/tracing/filtering/07_end_time.http
+++ b/api/oss/tests/manual/tracing/filtering/07_end_time.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/08_status_code.http b/api/oss/tests/manual/tracing/filtering/08_status_code.http
index 7a9860c7d7..2133400e0a 100644
--- a/api/oss/tests/manual/tracing/filtering/08_status_code.http
+++ b/api/oss/tests/manual/tracing/filtering/08_status_code.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/09_status_message.http b/api/oss/tests/manual/tracing/filtering/09_status_message.http
index 6f8d4f59c4..d603b37536 100644
--- a/api/oss/tests/manual/tracing/filtering/09_status_message.http
+++ b/api/oss/tests/manual/tracing/filtering/09_status_message.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/10_attributes.http b/api/oss/tests/manual/tracing/filtering/10_attributes.http
index fe4006a974..5f416b4e85 100644
--- a/api/oss/tests/manual/tracing/filtering/10_attributes.http
+++ b/api/oss/tests/manual/tracing/filtering/10_attributes.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/11_links.http b/api/oss/tests/manual/tracing/filtering/11_links.http
index 68fd7bec53..46ba66f4f1 100644
--- a/api/oss/tests/manual/tracing/filtering/11_links.http
+++ b/api/oss/tests/manual/tracing/filtering/11_links.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/tracing/filtering/12_references.http b/api/oss/tests/manual/tracing/filtering/12_references.http
index 271a374f44..37cb9451e8 100644
--- a/api/oss/tests/manual/tracing/filtering/12_references.http
+++ b/api/oss/tests/manual/tracing/filtering/12_references.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/tracing
###
diff --git a/api/oss/tests/manual/workflows/artifacts.http b/api/oss/tests/manual/workflows/artifacts.http
index d5e3f24693..bafd14e017 100644
--- a/api/oss/tests/manual/workflows/artifacts.http
+++ b/api/oss/tests/manual/workflows/artifacts.http
@@ -1,5 +1,5 @@
@host = http://localhost
-@token = change-me
+@token = replace-me
@base_url = {{host}}/api/preview/workflows
###
diff --git a/api/poetry.lock b/api/poetry.lock
index ce0474f070..ff8191c37a 100644
--- a/api/poetry.lock
+++ b/api/poetry.lock
@@ -2,36 +2,33 @@
[[package]]
name = "agenta"
-version = "0.72.1"
+version = "0.74.0"
description = "The SDK for agenta is an open-source LLMOps platform."
optional = false
python-versions = "<4.0,>=3.11"
groups = ["main"]
files = [
- {file = "agenta-0.72.1-py3-none-any.whl", hash = "sha256:d1b1dd36cc18e25f7b5ab20bb19c24bce0ca3eaeca32edabf539adf16f6c4c57"},
- {file = "agenta-0.72.1.tar.gz", hash = "sha256:c1e32fff97131f2466b7861f4a3ca1dcceea752a01ead9b063cbb0ba8bded4f8"},
+ {file = "agenta-0.74.0-py3-none-any.whl", hash = "sha256:a91385e9ab856f7bbaa36f4787a4b29ee0d50f957300b8512b5482095b6eff4f"},
+ {file = "agenta-0.74.0.tar.gz", hash = "sha256:3c109d26cb590b96ca92ea0d8a0406974620e4eb1defa64a90b3a2ea2d247b06"},
]
[package.dependencies]
-daytona = ">=0.121.0,<0.122.0"
-fastapi = ">=0.125"
+daytona = ">=0.128,<0.129"
+fastapi = ">=0.127"
httpx = ">=0.28,<0.29"
-importlib-metadata = ">=8,<9"
jinja2 = ">=3,<4"
litellm = ">=1,<2"
openai = ">=2,<3"
opentelemetry-api = ">=1,<2"
opentelemetry-exporter-otlp-proto-http = ">=1,<2"
-opentelemetry-instrumentation = ">=0.59b0,<0.60"
+opentelemetry-instrumentation = ">=0.60b1,<0.61"
opentelemetry-sdk = ">=1,<2"
orjson = ">=3,<4"
pydantic = ">=2,<3"
-python-dotenv = ">=1,<2"
python-jsonpath = ">=2,<3"
pyyaml = ">=6,<7"
restrictedpython = {version = ">=8,<9", markers = "python_version >= \"3.11\" and python_version < \"3.14\""}
structlog = ">=25,<26"
-toml = ">=0.10,<0.11"
[[package]]
name = "aiofiles"
@@ -59,132 +56,132 @@ files = [
[[package]]
name = "aiohttp"
-version = "3.13.2"
+version = "3.13.3"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"},
- {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"},
- {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"},
- {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"},
- {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"},
- {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"},
- {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"},
- {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"},
- {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"},
- {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"},
- {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"},
- {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"},
- {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"},
- {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11"},
+ {file = "aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd"},
+ {file = "aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29"},
+ {file = "aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239"},
+ {file = "aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a"},
+ {file = "aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046"},
+ {file = "aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591"},
+ {file = "aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf"},
+ {file = "aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43"},
+ {file = "aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1"},
+ {file = "aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f"},
+ {file = "aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1"},
+ {file = "aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538"},
+ {file = "aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88"},
]
[package.dependencies]
@@ -197,7 +194,7 @@ propcache = ">=0.2.0"
yarl = ">=1.17.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi"]
+speedups = ["Brotli (>=1.2)", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi (>=1.2)"]
[[package]]
name = "aiohttp-retry"
@@ -292,14 +289,14 @@ files = [
[[package]]
name = "anyio"
-version = "4.12.0"
+version = "4.12.1"
description = "High-level concurrency and networking framework on top of asyncio or Trio"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb"},
- {file = "anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0"},
+ {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"},
+ {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"},
]
[package.dependencies]
@@ -460,14 +457,14 @@ files = [
[[package]]
name = "certifi"
-version = "2025.11.12"
+version = "2026.1.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
- {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"},
- {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
+ {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"},
+ {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"},
]
[[package]]
@@ -798,41 +795,41 @@ test-randomorder = ["pytest-randomly"]
[[package]]
name = "daytona"
-version = "0.121.0"
+version = "0.128.1"
description = "Python SDK for Daytona"
optional = false
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
- {file = "daytona-0.121.0-py3-none-any.whl", hash = "sha256:33b92a33729d8866606571cb6924c05d7d471dd281ca1440a5c2342e3f65a737"},
- {file = "daytona-0.121.0.tar.gz", hash = "sha256:7ef327645ea64c99319217a3491ea26878da2e3b9891f53e15878eaaf25f20ec"},
+ {file = "daytona-0.128.1-py3-none-any.whl", hash = "sha256:d00536a899d5bab299837d0ad7d59a8c9cb1e76d5618f01fd0106fc59fbeac69"},
+ {file = "daytona-0.128.1.tar.gz", hash = "sha256:4d9ab0cc2504b48223dbd2f4429b304b484292cdff3d942af8080bb9f359d6b1"},
]
[package.dependencies]
aiofiles = ">=24.1.0,<24.2.0"
-daytona-api-client = "0.121.0"
-daytona-api-client-async = "0.121.0"
-daytona-toolbox-api-client = "0.121.0"
-daytona-toolbox-api-client-async = "0.121.0"
+daytona-api-client = "0.128.1"
+daytona-api-client-async = "0.128.1"
+daytona-toolbox-api-client = "0.128.1"
+daytona-toolbox-api-client-async = "0.128.1"
Deprecated = ">=1.2.18,<2.0.0"
environs = ">=10.0.0,<15.0.0"
httpx = ">=0.28.0,<0.29.0"
multipart = ">=1.0.0,<2.0.0"
-obstore = ">=0.7.0,<0.8.0"
+obstore = ">=0.8.0,<0.9.0"
pydantic = ">=2.4.2,<3.0.0"
toml = ">=0.10.0,<0.11.0"
websockets = ">=15.0.0,<16.0.0"
[[package]]
name = "daytona-api-client"
-version = "0.121.0"
+version = "0.128.1"
description = "Daytona"
optional = false
python-versions = "<4.0,>=3.8"
groups = ["main"]
files = [
- {file = "daytona_api_client-0.121.0-py3-none-any.whl", hash = "sha256:01fa21e8366436a55ef41c1830a14e3587dbaaf707100403207df29930859693"},
- {file = "daytona_api_client-0.121.0.tar.gz", hash = "sha256:9af863df12ad561b4609363fde11d96bd1e3e867ec1d7e1858cb78b9783d1df8"},
+ {file = "daytona_api_client-0.128.1-py3-none-any.whl", hash = "sha256:eede1d93ec8995d7280e077127018508b94a488c6288363f50794128af56d339"},
+ {file = "daytona_api_client-0.128.1.tar.gz", hash = "sha256:e9db105bf5ea7ad4b55431e3bb7db1e3a8937557ffbca7dba6167bc5a6a63c96"},
]
[package.dependencies]
@@ -843,14 +840,14 @@ urllib3 = ">=2.1.0,<3.0.0"
[[package]]
name = "daytona-api-client-async"
-version = "0.121.0"
+version = "0.128.1"
description = "Daytona"
optional = false
python-versions = "<4.0,>=3.8"
groups = ["main"]
files = [
- {file = "daytona_api_client_async-0.121.0-py3-none-any.whl", hash = "sha256:de7c90046c2edb38f9010cd3379d703f6473ef706ea9c22e6300c110fb50f391"},
- {file = "daytona_api_client_async-0.121.0.tar.gz", hash = "sha256:81f93bd4b26d1c4cb76844a516ce2eeb2610342142360ffd7b658c7115fba4cf"},
+ {file = "daytona_api_client_async-0.128.1-py3-none-any.whl", hash = "sha256:c0fb378e0df95fe0ae125d9f60cebfd4badc08e089d18584bfa6158a0002893b"},
+ {file = "daytona_api_client_async-0.128.1.tar.gz", hash = "sha256:2fb7507cb4122ae2011aa1f52a38556c1ce9c137173648aa96ca227ef072eadd"},
]
[package.dependencies]
@@ -863,14 +860,14 @@ urllib3 = ">=2.1.0,<3.0.0"
[[package]]
name = "daytona-toolbox-api-client"
-version = "0.121.0"
+version = "0.128.1"
description = "Daytona Daemon API"
optional = false
python-versions = "<4.0,>=3.8"
groups = ["main"]
files = [
- {file = "daytona_toolbox_api_client-0.121.0-py3-none-any.whl", hash = "sha256:9d121959b446b85e5cdf00eaa15a2bdc2af8d61cae121bef50b940ebc73f5cad"},
- {file = "daytona_toolbox_api_client-0.121.0.tar.gz", hash = "sha256:b248652d2279562bc199943d240f6feb18c9f116040e778376961d5d9fb5e043"},
+ {file = "daytona_toolbox_api_client-0.128.1-py3-none-any.whl", hash = "sha256:dd22da7e7fc823802c657a35996f1f4824deeda1b248192504c67e6936a1a120"},
+ {file = "daytona_toolbox_api_client-0.128.1.tar.gz", hash = "sha256:869ee431f485ed535868a93154e29c10e46fb2c36a0a7af79020385830e23c8f"},
]
[package.dependencies]
@@ -881,14 +878,14 @@ urllib3 = ">=2.1.0,<3.0.0"
[[package]]
name = "daytona-toolbox-api-client-async"
-version = "0.121.0"
+version = "0.128.1"
description = "Daytona Daemon API"
optional = false
python-versions = "<4.0,>=3.8"
groups = ["main"]
files = [
- {file = "daytona_toolbox_api_client_async-0.121.0-py3-none-any.whl", hash = "sha256:b7e5a09ce06f4e1a918ad852e3b191bb9d6fdfb142d9c32d39d0a253b4c096fe"},
- {file = "daytona_toolbox_api_client_async-0.121.0.tar.gz", hash = "sha256:9829d79be0c78689f6164e5386d15f2bc11c357330d6223df80448b51e987696"},
+ {file = "daytona_toolbox_api_client_async-0.128.1-py3-none-any.whl", hash = "sha256:2ae4afb86f2d2568bd1df031764df0d41837e8ccab2200f2ed70fd61d18231d3"},
+ {file = "daytona_toolbox_api_client_async-0.128.1.tar.gz", hash = "sha256:d9ef0ec4d17fcc611e5c8d17ae300afb825b32bf8346fa6a2a8576d760ef0304"},
]
[package.dependencies]
@@ -929,6 +926,43 @@ files = [
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
]
+[[package]]
+name = "dnspython"
+version = "2.8.0"
+description = "DNS toolkit"
+optional = false
+python-versions = ">=3.10"
+groups = ["main"]
+files = [
+ {file = "dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af"},
+ {file = "dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f"},
+]
+
+[package.extras]
+dev = ["black (>=25.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.17.0)", "mypy (>=1.17)", "pylint (>=3)", "pytest (>=8.4)", "pytest-cov (>=6.2.0)", "quart-trio (>=0.12.0)", "sphinx (>=8.2.0)", "sphinx-rtd-theme (>=3.0.0)", "twine (>=6.1.0)", "wheel (>=0.45.0)"]
+dnssec = ["cryptography (>=45)"]
+doh = ["h2 (>=4.2.0)", "httpcore (>=1.0.0)", "httpx (>=0.28.0)"]
+doq = ["aioquic (>=1.2.0)"]
+idna = ["idna (>=3.10)"]
+trio = ["trio (>=0.30)"]
+wmi = ["wmi (>=1.5.1)"]
+
+[[package]]
+name = "email-validator"
+version = "2.3.0"
+description = "A robust email address syntax and deliverability validation library."
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+ {file = "email_validator-2.3.0-py3-none-any.whl", hash = "sha256:80f13f623413e6b197ae73bb10bf4eb0908faf509ad8362c5edeb0be7fd450b4"},
+ {file = "email_validator-2.3.0.tar.gz", hash = "sha256:9fc05c37f2f6cf439ff414f8fc46d917929974a82244c20eb10231ba60c54426"},
+]
+
+[package.dependencies]
+dnspython = ">=2.0.0"
+idna = ">=2.0.0"
+
[[package]]
name = "environs"
version = "14.5.0"
@@ -982,14 +1016,14 @@ python-dateutil = ">=2.4"
[[package]]
name = "fastapi"
-version = "0.127.0"
+version = "0.128.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"},
- {file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"},
+ {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
+ {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
]
[package.dependencies]
@@ -1093,14 +1127,14 @@ files = [
[[package]]
name = "filelock"
-version = "3.20.1"
+version = "3.20.2"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a"},
- {file = "filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c"},
+ {file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"},
+ {file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"},
]
[[package]]
@@ -1642,14 +1676,14 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "huggingface-hub"
-version = "1.2.3"
+version = "1.2.4"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.9.0"
groups = ["main"]
files = [
- {file = "huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642"},
- {file = "huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7"},
+ {file = "huggingface_hub-1.2.4-py3-none-any.whl", hash = "sha256:2db69b91877d9d34825f5cd2a63b94f259011a77dcf761b437bf510fbe9522e9"},
+ {file = "huggingface_hub-1.2.4.tar.gz", hash = "sha256:7a1d9ec4802e64372d1d152d69fb8e26d943f15a2289096fbc8e09e7b90c21a5"},
]
[package.dependencies]
@@ -1662,13 +1696,13 @@ pyyaml = ">=5.1"
shellingham = "*"
tqdm = ">=4.42.1"
typer-slim = "*"
-typing-extensions = ">=3.7.4.3"
+typing-extensions = ">=4.1.0"
[package.extras]
all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-hf-xet = ["hf-xet (>=1.1.3,<2.0.0)"]
+hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"]
mcp = ["mcp (>=1.8.0)"]
oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"]
quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"]
@@ -2068,19 +2102,19 @@ files = [
[[package]]
name = "marshmallow"
-version = "4.1.2"
+version = "4.2.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "marshmallow-4.1.2-py3-none-any.whl", hash = "sha256:a8cfa18bd8d0e5f7339e734edf84815fe8db1bdb57358c7ccc05472b746eeadc"},
- {file = "marshmallow-4.1.2.tar.gz", hash = "sha256:083f250643d2e75fd363f256aeb6b1af369a7513ad37647ce4a601f6966e3ba5"},
+ {file = "marshmallow-4.2.0-py3-none-any.whl", hash = "sha256:1dc369bd13a8708a9566d6f73d1db07d50142a7580f04fd81e1c29a4d2e10af4"},
+ {file = "marshmallow-4.2.0.tar.gz", hash = "sha256:908acabd5aa14741419d3678d3296bda6abe28a167b7dcd05969ceb8256943ac"},
]
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
-docs = ["autodocsumm (==0.2.14)", "furo (==2025.9.25)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"]
+docs = ["autodocsumm (==0.2.14)", "furo (==2025.12.19)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"]
tests = ["pytest", "simplejson"]
[[package]]
@@ -2299,89 +2333,102 @@ infinite-tracing = ["grpcio", "protobuf"]
[[package]]
name = "obstore"
-version = "0.7.3"
+version = "0.8.2"
description = ""
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "obstore-0.7.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:8c89b6205672490fb99e16159bb290a12d4d8e6f9b27904720faafd4fd8ae436"},
- {file = "obstore-0.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26357df7b3824f431ced44e26fe334f686410cb5e8c218569759d6aa32ab7242"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca3380121cc5ce6d040698fcf126c1acab4a00282db5a6bc8e5026bba22fc43d"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1eca930fa0229f7fd5d881bc03deffca51e96ad754cbf256e4aa27ac7c50db6"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b91fec58a65350303b643ce1da7a890fb2cc411c2a9d86672ad30febb196df"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4eba1c87af7002d95cce8c2c67fac814056938f16500880e1fb908a0e8c7a7f5"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5e8ad65c5b481f168080db1c5290cf55ad7ab77b45fd467c4d25367db2a3ae"},
- {file = "obstore-0.7.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:b680dd856d238a892a14ef3115daee33e267502229cee248266a20e03dbe98d0"},
- {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c3dccb74ebfec1f5517c2160503f30629b62685c78bbe15ad03492969fadd858"},
- {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:cd614e53a00d22b2facfd1fb9b516fa210cd788ecce513dd532a8e65fa07d55d"},
- {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:32841a2b4bef838412302e9a8612fc3ba1c51bd808b77b4854efe6b1f7a65f0d"},
- {file = "obstore-0.7.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a58f3952b43fb5f7b0f0f9f08272983e4dd50f83b16a05943f89581b0e6bff20"},
- {file = "obstore-0.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:8745e2437e79e073c3cf839454f803909540fa4f6cd9180c9ab4ce742c716c8b"},
- {file = "obstore-0.7.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65ffe43fd63c9968172bed649fcaf6345b41a124be5d34f46adb94604e9ccef8"},
- {file = "obstore-0.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2947609a1fab1f9b808235a8088e7e99814fbaf3b6000833d760fd90f68fa7cd"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15409f75acc4e10f924fe118f7018607d6d96a72330ac4cc1663d36b7c6847b1"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5224d834bbe7a9f2592b130e4ddd86340fa172e5a3a51284e706f6515d95c036"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b1af6c1a33d98db9954f7ceab8eb5e543aea683a79a0ffd72b6c8d176834a9b"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:708c27c4e5e85799fe7a2d2ae443fbd96c2ad36b561c815a9b01b5333ab536ad"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7da327920bef8bbd02445f33947487fe4e94fcb9e084c810108e88be57d0877b"},
- {file = "obstore-0.7.3-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:8f3b23a40ad374fe7a65fab4678a9978978ec83a597156a2a9d1dbeab433a469"},
- {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9b3e7d0c7e85e4f67e479f7efab5dea26ceaace10897d639d38f77831ef0cdaf"},
- {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:dfee24c5e9d5b7e0f43e4bbf8cc15069e5c60bfdb86873ce97c0eb487afa5da8"},
- {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:99e187cee4a6e13605886b906b34fec7ae9902dd25b1e9aafae863a9d55c6e47"},
- {file = "obstore-0.7.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5de3b0859512b9ddbf57ac34db96ad41fb85fc9597e422916044d1bf550427d"},
- {file = "obstore-0.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:35fdd1cd8856984de1b5a11fced83f6fd6623eb459736e57b9975400ff5baf5a"},
- {file = "obstore-0.7.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6cbe5dde68bf6ab5a88f3bb467ca8f123bcce3efc03e22fd8339688559d36199"},
- {file = "obstore-0.7.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6db23cbcb3aec10e09a31fd0883950cb9b7f77f4fcf1fb0e8a276e1d1961bf3"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00fde287770bdbdbb06379670d30c257b20e77a4a11b36f1e232b5bc6ef07b7a"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c420036356269666197f0704392c9495f255bb3ff9b667c69fb49bc65bd50dcd"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28482626ca9481569ad16ba0c0c36947ce96e8147c64011dc0af6d58be8ff9c"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cead20055221337ddf218098afe8138f8624395b0cf2a730da72a4523c11b2f"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c71017142a593022848f4af0ac1e39af1a56927981cc2c89542888edb206eb33"},
- {file = "obstore-0.7.3-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:8aebc2bf796a0d1525318a9ac69608a96d03abc621ca1e6d810e08a70bd695c1"},
- {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c0ebf03969b81ee559c377c5ebca9dcdffbef0e6650d43659676aeaeb302a272"},
- {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e0f5d97064ec35fdef3079f867afe6fa5e76ab2bb3e809855ab34a1aa34c9dcd"},
- {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3a80541671646c5e49493de61361a1851c8c172cf28981b76aa4248a9f02f5b1"},
- {file = "obstore-0.7.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5ce6385ad89afad106d05d37296f724ba10f8f4e57ab8ad7f4ecce0aa226d3d"},
- {file = "obstore-0.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:632522ba63a44768977defc0a93fc5dd59ea0455bfd6926cd3121971306da4e5"},
- {file = "obstore-0.7.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:dcb71412dc8d2bd464b340d1f36d8c0ceb7894c01c2ceaaa5f2ac45376503fa2"},
- {file = "obstore-0.7.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d486bb01438039d686401ce4207d82c02b8b639227baa5bdd578efdab388dea"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaaf0c9223b5592658c131ff32a0574be995c7e237f406266f9a68ea2266769"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8ae6cde734df3cc542c14152029170d9ae70ce50b957831ed71073113bd3d60"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30da82ae3bfdf24fa80af38967e323ae8da0bb7c36cce01f0dda7689faaf1272"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5daa9f912eac8cdf218161d34e13f38cbb594e934eaaf8a7c09dca5a394b231"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef06cad4e8978d672357b328b4f61c48827b2b79d7eaf58b68ee31ac0e652b8"},
- {file = "obstore-0.7.3-cp313-cp313-manylinux_2_24_aarch64.whl", hash = "sha256:d34920539a94da2b87195787b80004960638dfd0aa2f4369fc9239e0a41470a8"},
- {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcdaa779f376745ff493cce7f19cbbe8d75f68304bf1062e757ab60bd62de1"},
- {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ae095f679e4796b8f6ef80ed3813ddd14a477ae219a0c059c23cf294f9288ded"},
- {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6def59e79c19b8804743fec6407f542b387dc1630c2254412ae8bd3a0b98e7e4"},
- {file = "obstore-0.7.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f97797c42476ab19853ef4a161b903eaf96c2363a23b9e0187d66b0daee350cb"},
- {file = "obstore-0.7.3-cp313-cp313-win_amd64.whl", hash = "sha256:8f0ecc01b1444bc08ff98e368b80ea2c085a7783621075298e86d3aba96f8e27"},
- {file = "obstore-0.7.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b0a337b6d2b430040e752effdf9584b0d6adddef2ead2bbbc3c204957a2f69d2"},
- {file = "obstore-0.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:439874c31a78198211c45ebde0b3535650dc3585353be51b361bd017bc492090"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:360034e4b1fe84da59bc3b090798acdd1b4a8b75cc1e56d2656591c7cc8776f2"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44989c9be1156c8ad02522bcb0358e813fd71fa061e51c3331cc11f4b6d36525"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bf0b9c28b3149138ff3db0c2cfb3acb329d3a3bef02a3146edec6d2419b27ad"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98fd91e90442ff3bf8832c713189c81cd892299a8423fc5d8c4534e84db62643"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eccae18d75d753129d58c080716cd91738fd1f913b7182eb5695f483d6cbd94"},
- {file = "obstore-0.7.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:bbe0488ca1573020af14ca585ddc5e5aa7593f8fc42ec5d1f53b83393ccaefa5"},
- {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6765cef76ca62b13d4cfec4648fbf6048410d34c2e11455323d011d208977b89"},
- {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:00f8d1211d247fc24c9f5d5614f2ed25872fe2c4af2e283f3e6cc85544a3dee5"},
- {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ebc387320a00918c8afb5f2d76c07157003a661d60ff03763103278670bc75e3"},
- {file = "obstore-0.7.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8b526bdc5b5392ac55b3a45bf04f2eba3a33c132dfa04418e7ffba38763d7b5d"},
- {file = "obstore-0.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:1af6dfef86b37e74ff812bd70d8643619e16485559fcaee01b3f2442b70d4918"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:848eb12ed713f447a7b1f7de3f0bff570de99546f76c37e6315102f5bbdaf71c"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:091998d57331aa0e648a9dca0adebf6dc09eb53a4e6935c9c06625998120acc1"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed7c957d19a6a994e8c9198b1e58b31e0fc3748ca056e27f738a4ead789eb80b"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af8daa0568c89ce863986ccf14570c30d1dc817b51ed2146eecb76fddc82704e"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe42053413a35a964e88ea156af3253defac30bedd973797b55b8e230cc50fe4"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2faa2ac90672334cdaabbf930c82e91efa184928dc55b55bcbf84b152bc4df1"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49f20fdabd295a5a001569957c19a51615d288cd255fb80dcf966e2307ca0cec"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-manylinux_2_24_aarch64.whl", hash = "sha256:aa131d089565fb7a5225220fcdfe260e3b1fc6821c0a2eef2e3a23c5ba9c79bd"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:73df8b270b89a97ef9e87fc8e552d97d426bbfcb61c55097f5d452a7457ee9d5"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:25cea5cf5a727800b14cf4d09fd2b799c28fb755cc04e5635e7fb36d413bf772"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:aae7fea048d7e73e5c206efef1627bff677455f6eed5c94a596906c4fcedc744"},
- {file = "obstore-0.7.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:b4ee1ee4f8846ae891f1715a19a8f89d16a00c9e8913bf60c9f3acf24d905de2"},
+ {file = "obstore-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:49104c0d72688c180af015b02c691fbb6cf6a45b03a9d71b84059ed92dbec704"},
+ {file = "obstore-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c49776abd416e4d80d003213522d82ad48ed3517bee27a6cf8ce0f0cf4e6337e"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1636372b5e171a98369612d122ea20b955661daafa6519ed8322f4f0cb43ff74"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2efed0d86ad4ebffcbe3d0c4d84f26c2c6b20287484a0a748499c169a8e1f2c4"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00c5542616dc5608de82ab6f6820633c9dbab6ff048e770fb8a5fcd1d30cd656"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d9df46aaf25ce80fff48c53382572adc67b6410611660b798024450281a3129"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ccf0f03a7fe453fb8640611c922bce19f021c6aaeee6ee44d6d8fb57db6be48"},
+ {file = "obstore-0.8.2-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:ddfbfadc88c5e9740b687ef0833384329a56cea07b34f44e1c4b00a0e97d94a9"},
+ {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:53ad53bb16e64102f39559ec470efd78a5272b5e3b84c53aa0423993ac5575c1"},
+ {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:b0b905b46354db0961ab818cad762b9c1ac154333ae5d341934c90635a6bd7ab"},
+ {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fee235694406ebb2dc4178752cf5587f471d6662659b082e9786c716a0a9465c"},
+ {file = "obstore-0.8.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6c36faf7ace17dd0832aa454118a63ea21862e3d34f71b9297d0c788d00f4985"},
+ {file = "obstore-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:948a1db1d34f88cfc7ab7e0cccdcfd84cf3977365634599c95ba03b4ef80d1c4"},
+ {file = "obstore-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2edaa97687c191c5324bb939d72f6fe86a7aa8191c410f1648c14e8296d05c1c"},
+ {file = "obstore-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c4fb7ef8108f08d14edc8bec9e9a6a2e5c4d14eddb8819f5d0da498aff6e8888"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fda8f658c0edf799ab1e264f9b12c7c184cd09a5272dc645d42e987810ff2772"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87fe2bc15ce4051ecb56abd484feca323c2416628beb62c1c7b6712114564d6e"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2482aa2562ab6a4ca40250b26bea33f8375b59898a9b5615fd412cab81098123"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4153b928f5d2e9c6cb645e83668a53e0b42253d1e8bcb4e16571fc0a1434599a"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbfa9c38620cc191be98c8b5558c62071e495dc6b1cc724f38293ee439aa9f92"},
+ {file = "obstore-0.8.2-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:0822836eae8d52499f10daef17f26855b4c123119c6eb984aa4f2d525ec2678d"},
+ {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8ef6435dfd586d83b4f778e7927a5d5b0d8b771e9ba914bc809a13d7805410e6"},
+ {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0f2cba91f4271ca95a932a51aa8dda1537160342b33f7836c75e1eb9d40621a2"},
+ {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:23c876d603af0627627808d19a58d43eb5d8bfd02eecd29460bc9a58030fed55"},
+ {file = "obstore-0.8.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ff3c4b5d07629b70b9dee494cd6b94fff8465c3864752181a1cb81a77190fe42"},
+ {file = "obstore-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:aadb2cb72de7227d07f4570f82729625ffc77522fadca5cf13c3a37fbe8c8de9"},
+ {file = "obstore-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:bb70ce297a47392b1d9a3e310f18d59cd5ebbb9453428210fef02ed60e4d75d1"},
+ {file = "obstore-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1619bf618428abf1f607e0b219b2e230a966dcf697b717deccfa0983dd91f646"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a4605c3ed7c9515aeb4c619b5f7f2c9986ed4a79fe6045e536b5e59b804b1476"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce42670417876dd8668cbb8659e860e9725e5f26bbc86449fd259970e2dd9d18"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4a3e893b2a06585f651c541c1972fe1e3bf999ae2a5fda052ee55eb7e6516f5"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08462b32f95a9948ed56ed63e88406e2e5a4cae1fde198f9682e0fb8487100ed"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a0bf7763292a8fc47d01cd66e6f19002c5c6ad4b3ed4e6b2729f5e190fa8a0d"},
+ {file = "obstore-0.8.2-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:bcd47f8126cb192cbe86942b8f73b1c45a651ce7e14c9a82c5641dfbf8be7603"},
+ {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57eda9fd8c757c3b4fe36cf3918d7e589cc1286591295cc10b34122fa36dd3fd"},
+ {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ea44442aad8992166baa69f5069750979e4c5d9ffce772e61565945eea5774b9"},
+ {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:41496a3ab8527402db4142aaaf0d42df9d7d354b13ba10d9c33e0e48dd49dd96"},
+ {file = "obstore-0.8.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43da209803f052df96c7c3cbec512d310982efd2407e4a435632841a51143170"},
+ {file = "obstore-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:1836f5dcd49f9f2950c75889ab5c51fb290d3ea93cdc39a514541e0be3af016e"},
+ {file = "obstore-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:212f033e53fe6e53d64957923c5c88949a400e9027f7038c705ec2e9038be563"},
+ {file = "obstore-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bee21fa4ba148d08fa90e47a96df11161661ed31e09c056a373cb2154b0f2852"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4c66594b59832ff1ced4c72575d9beb8b5f9b4e404ac1150a42bfb226617fd50"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:089f33af5c2fe132d00214a0c1f40601b28f23a38e24ef9f79fb0576f2730b74"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d87f658dfd340d5d9ea2d86a7c90d44da77a0db9e00c034367dca335735110cf"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e2e4fa92828c4fbc2d487f3da2d3588701a1b67d9f6ca3c97cc2afc912e9c63"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab440e89c5c37a8ec230857dd65147d4b923e0cada33297135d05e0f937d696a"},
+ {file = "obstore-0.8.2-cp313-cp313-manylinux_2_24_aarch64.whl", hash = "sha256:b9beed107c5c9cd995d4a73263861fcfbc414d58773ed65c14f80eb18258a932"},
+ {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b75b4e7746292c785e31edcd5aadc8b758238372a19d4c5e394db5c305d7d175"},
+ {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f33e6c366869d05ab0b7f12efe63269e631c5450d95d6b4ba4c5faf63f69de70"},
+ {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:12c885a9ce5ceb09d13cc186586c0c10b62597eff21b985f6ce8ff9dab963ad3"},
+ {file = "obstore-0.8.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4accc883b93349a81c9931e15dd318cc703b02bbef2805d964724c73d006d00e"},
+ {file = "obstore-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ec850adf9980e5788a826ccfd5819989724e2a2f712bfa3258e85966c8d9981e"},
+ {file = "obstore-0.8.2-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:1431e40e9bb4773a261e51b192ea6489d0799b9d4d7dbdf175cdf813eb8c0503"},
+ {file = "obstore-0.8.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ddb39d4da303f50b959da000aa42734f6da7ac0cc0be2d5a7838b62c97055bb9"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e01f4e13783db453e17e005a4a3ceff09c41c262e44649ba169d253098c775e8"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df0fc2d0bc17caff9b538564ddc26d7616f7e8b7c65b1a3c90b5048a8ad2e797"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e439d06c99a140348f046c9f598ee349cc2dcd9105c15540a4b231f9cc48bbae"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e37d9046669fcc59522d0faf1d105fcbfd09c84cccaaa1e809227d8e030f32c"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2646fdcc4bbe92dc2bb5bcdff15574da1211f5806c002b66d514cee2a23c7cb8"},
+ {file = "obstore-0.8.2-cp314-cp314-manylinux_2_24_aarch64.whl", hash = "sha256:e31a7d37675056d93dfc244605089dee67f5bba30f37c88436623c8c5ad9ba9d"},
+ {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:656313dd8170dde0f0cd471433283337a63912e8e790a121f7cc7639c83e3816"},
+ {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:329038c9645d6d1741e77fe1a53e28a14b1a5c1461cfe4086082ad39ebabf981"},
+ {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:1e4df99b369790c97c752d126b286dc86484ea49bff5782843a265221406566f"},
+ {file = "obstore-0.8.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9e1c65c65e20cc990414a8a9af88209b1bbc0dd9521b5f6b0293c60e19439bb7"},
+ {file = "obstore-0.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2ca19d5310ba2736a3052d756e682cc1aafbcc4069e62c05b7222b7d8434b543"},
+ {file = "obstore-0.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e5f3df9b64c683e288fa1e47fac237c6a1e1021e7c8cadcc75f1bcb3098e824d"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cd293ace46ee175b50e21c0d8c94f606de6cd68f2f199877c55fe8837c585a5"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5a39750feedf5b95b4f62bacaded0b95a53be047d9462d6b24dc8f8b6fc6ec8"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb76517cca57f6ee9d74be18074a1c0f5ff0e62b4c6e1e0f893993dda93ebbfc"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7cd653932bbb7afe611786388cdb403a4b19b13205e0e43d8b0e4890e0accfd0"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4952d69843bb78c73c9a81258f448003f74ff7b298a60899f015788db98a1cd1"},
+ {file = "obstore-0.8.2-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:2e3cd6d0822888b7e79c92c1258997289ebf0224598aad8f46ada17405666852"},
+ {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:feb4a6e5a3f2d323b3f61356d4ef99dd3f430aaacdaf5607ced5f857d992d2d4"},
+ {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:61e29fd6a27df284027c23dc49851dbeeacb2d40cb3d945bd3d6ec6cb0650450"},
+ {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8f9e18ff6c32997bd9a9fd636a98439bcbd3f44f13bae350243eacfb75803161"},
+ {file = "obstore-0.8.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6ebc814302485d453b61df956c09662ebb33471684add5bbc321de7ba265b723"},
+ {file = "obstore-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:36478c16fd7c7f880f28ece352251eec1fc6f6b69dbf2b78cec9754eb80a4b41"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6ea04118980a9c22fc8581225ff4507b6a161baf8949d728d96e68326ebaab59"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5f33a7570b6001b54252260fbec18c3f6d21e25d3ec57e9b6c5e7330e8290eb2"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11fa78dfb749edcf5a041cd6db20eae95b3e8b09dfdd9b38d14939da40e7c115"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872bc0921ff88305884546ba05e258ccd95672a03d77db123f0d0563fd3c000b"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72556a2fbf018edd921286283e5c7eec9f69a21c6d12516d8a44108eceaa526a"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75fa1abf21499dfcfb0328941a175f89a9aa58245bf00e3318fe928e4b10d297"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f54f72f30cd608c4399679781c884bf8a0e816c1977a2fac993bf5e1fb30609f"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-manylinux_2_24_aarch64.whl", hash = "sha256:b044ebf1bf7b8f7b0ca309375c1cd9e140be79e072ae8c70bbd5d9b2ad1f7678"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:b1326cd2288b64d6fe8857cc22d3a8003b802585fc0741eff2640a8dc35e8449"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_armv7l.whl", hash = "sha256:ba6863230648a9b0e11502d2745d881cf74262720238bc0093c3eabd22a3b24c"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:887615da9eeefeb2df849d87c380e04877487aa29dbeb367efc3f17f667470d3"},
+ {file = "obstore-0.8.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4eec1fb32ffa4fb9fe9ad584611ff031927a5c22732b56075ee7204f0e35ebdf"},
+ {file = "obstore-0.8.2.tar.gz", hash = "sha256:a467bc4e97169e2ba749981b4fd0936015428d9b8f3fb83a5528536b1b6f377f"},
]
[package.dependencies]
@@ -2417,14 +2464,14 @@ voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
-version = "1.38.0"
+version = "1.39.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"},
- {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"},
+ {file = "opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950"},
+ {file = "opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c"},
]
[package.dependencies]
@@ -2433,68 +2480,71 @@ typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
-version = "1.38.0"
+version = "1.39.1"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"},
- {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464"},
]
[package.dependencies]
-opentelemetry-proto = "1.38.0"
+opentelemetry-proto = "1.39.1"
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
-version = "1.38.0"
+version = "1.39.1"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"},
- {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"},
+ {file = "opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985"},
+ {file = "opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb"},
]
[package.dependencies]
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
-opentelemetry-exporter-otlp-proto-common = "1.38.0"
-opentelemetry-proto = "1.38.0"
-opentelemetry-sdk = ">=1.38.0,<1.39.0"
+opentelemetry-exporter-otlp-proto-common = "1.39.1"
+opentelemetry-proto = "1.39.1"
+opentelemetry-sdk = ">=1.39.1,<1.40.0"
requests = ">=2.7,<3.0"
typing-extensions = ">=4.5.0"
+[package.extras]
+gcp-auth = ["opentelemetry-exporter-credential-provider-gcp (>=0.59b0)"]
+
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.59b0"
+version = "0.60b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee"},
- {file = "opentelemetry_instrumentation-0.59b0.tar.gz", hash = "sha256:6010f0faaacdaf7c4dff8aac84e226d23437b331dcda7e70367f6d73a7db1adc"},
+ {file = "opentelemetry_instrumentation-0.60b1-py3-none-any.whl", hash = "sha256:04480db952b48fb1ed0073f822f0ee26012b7be7c3eac1a3793122737c78632d"},
+ {file = "opentelemetry_instrumentation-0.60b1.tar.gz", hash = "sha256:57ddc7974c6eb35865af0426d1a17132b88b2ed8586897fee187fd5b8944bd6a"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.59b0"
+opentelemetry-semantic-conventions = "0.60b1"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-proto"
-version = "1.38.0"
+version = "1.39.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"},
- {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"},
+ {file = "opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007"},
+ {file = "opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8"},
]
[package.dependencies]
@@ -2502,35 +2552,35 @@ protobuf = ">=5.0,<7.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.38.0"
+version = "1.39.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"},
- {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"},
+ {file = "opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c"},
+ {file = "opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6"},
]
[package.dependencies]
-opentelemetry-api = "1.38.0"
-opentelemetry-semantic-conventions = "0.59b0"
+opentelemetry-api = "1.39.1"
+opentelemetry-semantic-conventions = "0.60b1"
typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.59b0"
+version = "0.60b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"},
- {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"},
+ {file = "opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb"},
+ {file = "opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953"},
]
[package.dependencies]
-opentelemetry-api = "1.38.0"
+opentelemetry-api = "1.39.1"
typing-extensions = ">=4.5.0"
[[package]]
@@ -2760,14 +2810,14 @@ files = [
[[package]]
name = "posthog"
-version = "7.4.2"
+version = "7.4.3"
description = "Integrate PostHog into any python application."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "posthog-7.4.2-py3-none-any.whl", hash = "sha256:36954f06f4adede905d97faeb24926a705a4d86f4a308506b15b41b661ef064c"},
- {file = "posthog-7.4.2.tar.gz", hash = "sha256:5953f31a21c5e2485ac57eb5d600a231a70118f884f438c0e8b493c30373c409"},
+ {file = "posthog-7.4.3-py3-none-any.whl", hash = "sha256:ae068f8954ee7a56d10ce35261580f1b8d99c6a2b6e878964eeacea1ec906b4a"},
+ {file = "posthog-7.4.3.tar.gz", hash = "sha256:02484a32c8bf44ab489dcef270ada46e5ce324021258c322f0d1b567c2d6f174"},
]
[package.dependencies]
@@ -3028,6 +3078,7 @@ files = [
[package.dependencies]
annotated-types = ">=0.6.0"
+email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"email\""}
pydantic-core = "2.41.5"
typing-extensions = ">=4.14.1"
typing-inspection = ">=0.4.2"
@@ -3345,14 +3396,14 @@ files = [
[[package]]
name = "python-jsonpath"
-version = "2.0.1"
+version = "2.0.2"
description = "JSONPath, JSON Pointer and JSON Patch for Python."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "python_jsonpath-2.0.1-py3-none-any.whl", hash = "sha256:ebd518b7c883acc5b976518d76b6c96288405edec7d9ef838641869c1e1a5eb7"},
- {file = "python_jsonpath-2.0.1.tar.gz", hash = "sha256:32a84ebb2dc0ec1b42a6e165b0f9174aef8310bad29154ad9aee31ac37cca18f"},
+ {file = "python_jsonpath-2.0.2-py3-none-any.whl", hash = "sha256:3f8ab612f815ce10c03bf0deaede87235f3381b109a60b4a22744069953627e3"},
+ {file = "python_jsonpath-2.0.2.tar.gz", hash = "sha256:41abb6660b3ee54d5ae77e4b0e901049fb1662ad90de241f038df47edc75ee60"},
]
[package.extras]
@@ -4220,14 +4271,14 @@ blobfile = ["blobfile (>=2)"]
[[package]]
name = "tldextract"
-version = "5.3.0"
+version = "5.3.1"
description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well."
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "tldextract-5.3.0-py3-none-any.whl", hash = "sha256:f70f31d10b55c83993f55e91ecb7c5d84532a8972f22ec578ecfbe5ea2292db2"},
- {file = "tldextract-5.3.0.tar.gz", hash = "sha256:b3d2b70a1594a0ecfa6967d57251527d58e00bb5a91a74387baa0d87a0678609"},
+ {file = "tldextract-5.3.1-py3-none-any.whl", hash = "sha256:6bfe36d518de569c572062b788e16a659ccaceffc486d243af0484e8ecf432d9"},
+ {file = "tldextract-5.3.1.tar.gz", hash = "sha256:a72756ca170b2510315076383ea2993478f7da6f897eef1f4a5400735d5057fb"},
]
[package.dependencies]
@@ -4242,27 +4293,36 @@ testing = ["mypy", "pytest", "pytest-gitignore", "pytest-mock", "responses", "ru
[[package]]
name = "tokenizers"
-version = "0.22.1"
+version = "0.22.2"
description = ""
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73"},
- {file = "tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390"},
- {file = "tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82"},
- {file = "tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138"},
- {file = "tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9"},
+ {file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"},
+ {file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319f659ee992222f04e58f84cbf407cfa66a65fe3a8de44e8ad2bc53e7d99012"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e50f8554d504f617d9e9d6e4c2c2884a12b388a97c5c77f0bc6cf4cd032feee"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a62ba2c5faa2dd175aaeed7b15abf18d20266189fb3406c5d0550dd34dd5f37"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143b999bdc46d10febb15cbffb4207ddd1f410e2c755857b5a0797961bbdc113"},
+ {file = "tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917"},
]
[package.dependencies]
@@ -4271,7 +4331,7 @@ huggingface-hub = ">=0.16.4,<2.0"
[package.extras]
dev = ["tokenizers[testing]"]
docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
-testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"]
+testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", "ty"]
[[package]]
name = "toml"
@@ -4327,14 +4387,14 @@ requests = ">=2.0.0"
[[package]]
name = "typer-slim"
-version = "0.21.0"
+version = "0.21.1"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e"},
- {file = "typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557"},
+ {file = "typer_slim-0.21.1-py3-none-any.whl", hash = "sha256:6e6c31047f171ac93cc5a973c9e617dbc5ab2bddc4d0a3135dc161b4e2020e0d"},
+ {file = "typer_slim-0.21.1.tar.gz", hash = "sha256:73495dd08c2d0940d611c5a8c04e91c2a0a98600cbd4ee19192255a233b6dbfd"},
]
[package.dependencies]
@@ -4839,4 +4899,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = "^3.11"
-content-hash = "ef7e131d700fdd953b5104884661b7cfa251aff86e8f9e52ab3f4f34a8447af3"
+content-hash = "b92d54d17a02394a5732d27c113605af042bd47a4500511a0d639eedb74ee9c1"
diff --git a/api/pyproject.toml b/api/pyproject.toml
index a9bc733424..bd75182a82 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "api"
-version = "0.72.1"
+version = "0.76.0"
description = "Agenta API"
authors = [
{ name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" },
@@ -22,7 +22,7 @@ agenta = ">=0.72.1"
# Core framework dependencies
fastapi = ">=0.127"
-pydantic = "^2"
+pydantic = { version = "^2", extras = ["email"] }
uvicorn = "^0.40"
gunicorn = "^23"
httpx = "^0.28"
@@ -52,9 +52,6 @@ taskiq-redis = "^1"
redis = "^7"
cachetools = "^6"
-# Sandbox
-restrictedpython = { version = "^8", python = ">=3.11,<3.14" }
-
# Integrations & external services
supertokens-python = "^0.29"
openai = "^2"
@@ -62,6 +59,7 @@ sendgrid = "^6"
stripe = "^14"
posthog = "^7"
newrelic = "^11"
+dnspython = "^2"
# Observability (limited - see comment below)
opentelemetry-proto = "^1"
diff --git a/api/test-auth.http b/api/test-auth.http
new file mode 100644
index 0000000000..145d262fd6
--- /dev/null
+++ b/api/test-auth.http
@@ -0,0 +1,409 @@
+### Auth Endpoints Testing
+### Base URL
+@baseUrl = http://localhost
+@apiBaseUrl = {{baseUrl}}/api/auth
+
+### NOTE: FastAPI app has root_path="/api" (line 150 in entrypoints/routers.py)
+### So auth router mounted at "/auth" becomes "/api/auth" in final URLs
+
+################################################################################
+# 1. Discover Authentication Methods (ALWAYS AVAILABLE)
+################################################################################
+# This endpoint works regardless of configuration
+# Returns available authentication methods based on:
+# - Environment variables (AGENTA_AUTHN_EMAIL, AGENTA_AUTHN_GOOGLE_ENABLED, etc.)
+# - Organization policies (EE only)
+# - User's existing identities
+
+POST {{apiBaseUrl}}/discover
+Content-Type: application/json
+
+{
+ "email": "jp@agenta.ai"
+}
+
+# Expected response (default config with no env vars):
+# {
+# "user_exists": false,
+# "primary_method": "email:password",
+# "methods": {
+# "email:password": true,
+# "email:otp": false,
+# "social:google": false,
+# "social:github": false,
+# "sso": false
+# }
+# }
+
+################################################################################
+# 2. EMAIL/PASSWORD AUTHENTICATION (DEFAULT)
+################################################################################
+# Available when: AGENTA_AUTHN_EMAIL is NOT set OR = "password" (default)
+# This is the DEFAULT authentication method when no env vars are configured
+
+### 2a. Sign Up (Email/Password)
+POST {{apiBaseUrl}}/signup
+Content-Type: application/json
+
+{
+ "formFields": [
+ {
+ "id": "email",
+ "value": "test@example.com"
+ },
+ {
+ "id": "password",
+ "value": "SecurePassword123!"
+ }
+ ]
+}
+
+### 2b. Sign In (Email/Password)
+POST {{apiBaseUrl}}/signin
+Content-Type: application/json
+
+{
+ "formFields": [
+ {
+ "id": "email",
+ "value": "test@example.com"
+ },
+ {
+ "id": "password",
+ "value": "SecurePassword123!"
+ }
+ ]
+}
+
+################################################################################
+# 3. EMAIL OTP AUTHENTICATION (PASSWORDLESS)
+################################################################################
+# Available when: AGENTA_AUTHN_EMAIL="otp"
+# NOTE: These endpoints return 404 if email OTP is NOT enabled!
+
+### 3a. Create OTP Code (Email OTP)
+POST {{apiBaseUrl}}/signinup/code
+Content-Type: application/json
+
+{
+ "email": "jp@agenta.ai"
+}
+
+# Response contains:
+# {
+# "status": "OK",
+# "deviceId": "...",
+# "preAuthSessionId": "...",
+# "flowType": "USER_INPUT_CODE"
+# }
+
+### 3b. Consume OTP Code (Email OTP)
+POST {{apiBaseUrl}}/signinup/code/consume
+Content-Type: application/json
+
+{
+ "preAuthSessionId": "your-pre-session-id",
+ "deviceId": "your-device-id",
+ "userInputCode": "599869"
+}
+
+################################################################################
+# 4. SOCIAL OAUTH AUTHENTICATION
+################################################################################
+# Available when:
+# - Google: AGENTA_AUTHN_GOOGLE_ENABLED=true + client ID/secret configured
+# - GitHub: AGENTA_AUTHN_GITHUB_ENABLED=true + client ID/secret configured
+
+### 4a. Google - Get Authorization URL
+GET {{apiBaseUrl}}/authorisationurl?thirdPartyId=google&redirectURIOnProviderDashboard={{baseUrl}}/auth/callback/google
+
+# Response:
+# {
+# "status": "OK",
+# "urlWithQueryParams": "https://accounts.google.com/o/oauth2/v2/auth?..."
+# }
+
+### 4b. GitHub - Get Authorization URL
+GET {{apiBaseUrl}}/authorisationurl?thirdPartyId=github&redirectURIOnProviderDashboard={{baseUrl}}/auth/callback/github
+
+### 4c. OAuth Callback (handled automatically by SuperTokens)
+# After user authorizes on provider:
+# Provider redirects to: {{baseUrl}}/auth/callback/google
+# SuperTokens handles the callback, creates session, and redirects to frontend
+
+################################################################################
+# 5. OIDC/SSO AUTHENTICATION (EE ONLY)
+################################################################################
+# Available when: EE edition + organization has configured OIDC providers
+# Requires organization_providers table with enabled providers
+
+### 5a. OIDC - Initiate Authorization
+GET {{apiBaseUrl}}/authorize/oidc?provider_id=PROVIDER_UUID&redirect=/dashboard
+
+# This endpoint:
+# 1. Validates provider access
+# 2. Redirects to SuperTokens OIDC flow
+# 3. SuperTokens handles OIDC exchange
+# 4. Creates session with user identities
+# 5. Redirects to specified redirect path
+
+### 5b. OIDC Callback (handled automatically by SuperTokens)
+# Provider redirects to: {{baseUrl}}/auth/callback/custom
+# SuperTokens processes OIDC callback and creates session
+
+################################################################################
+# 6. SESSION MANAGEMENT (ALWAYS AVAILABLE)
+################################################################################
+# These endpoints are available regardless of which auth method was used
+
+### 6a. Verify Session
+GET {{apiBaseUrl}}/session/verify
+Cookie: sAccessToken=YOUR_ACCESS_TOKEN; sRefreshToken=YOUR_REFRESH_TOKEN
+
+### 6b. Refresh Session
+POST {{apiBaseUrl}}/session/refresh
+Cookie: sRefreshToken=YOUR_REFRESH_TOKEN
+
+### 6c. Sign Out
+POST {{apiBaseUrl}}/signout
+Cookie: sAccessToken=YOUR_ACCESS_TOKEN
+
+### 6d. Get Session User Info
+GET {{apiBaseUrl}}/session
+Cookie: sAccessToken=YOUR_ACCESS_TOKEN
+
+################################################################################
+# 7. SUPERTOKENS DASHBOARD (ADMIN)
+################################################################################
+# Available when: Dashboard recipe is enabled (default)
+GET {{baseUrl}}/auth/dashboard
+
+
+################################################################################
+# CURL COMMANDS
+################################################################################
+
+### 1. Discover (custom endpoint - always works)
+# curl -X POST http://localhost/api/auth/discover \
+# -H "Content-Type: application/json" \
+# -d '{"email": "test@example.com"}'
+
+### 2a. Email/Password - Sign Up (DEFAULT - works without env vars)
+# curl -X POST http://localhost/api/auth/signup \
+# -H "Content-Type: application/json" \
+# -d '{
+# "formFields": [
+# {"id": "email", "value": "test@example.com"},
+# {"id": "password", "value": "SecurePassword123!"}
+# ]
+# }'
+
+### 2b. Email/Password - Sign In (DEFAULT)
+# curl -X POST http://localhost/api/auth/signin \
+# -H "Content-Type: application/json" \
+# -d '{
+# "formFields": [
+# {"id": "email", "value": "test@example.com"},
+# {"id": "password", "value": "SecurePassword123!"}
+# ]
+# }'
+
+### 3a. Email OTP - Create Code (only if AGENTA_AUTHN_EMAIL=otp)
+# curl -X POST http://localhost/api/auth/signinup/code \
+# -H "Content-Type: application/json" \
+# -d '{"email": "test@example.com"}'
+
+### 3b. Email OTP - Consume Code (only if AGENTA_AUTHN_EMAIL=otp)
+# curl -X POST http://localhost/api/auth/signinup/code/consume \
+# -H "Content-Type: application/json" \
+# -d '{
+# "preAuthSessionId": "SESSION_ID",
+# "deviceId": "DEVICE_ID",
+# "userInputCode": "123456"
+# }'
+
+### 4a. Google OAuth - Get URL (only if AGENTA_AUTHN_GOOGLE_ENABLED=true)
+# curl -X GET "http://localhost/api/auth/authorisationurl?thirdPartyId=google&redirectURIOnProviderDashboard=http://localhost/auth/callback/google"
+
+### 4b. GitHub OAuth - Get URL (only if AGENTA_AUTHN_GITHUB_ENABLED=true)
+# curl -X GET "http://localhost/api/auth/authorisationurl?thirdPartyId=github&redirectURIOnProviderDashboard=http://localhost/auth/callback/github"
+
+### 5. OIDC/SSO - Authorize (EE only, requires configured providers)
+# curl -X GET "http://localhost/api/auth/authorize/oidc?provider_id=UUID&redirect=/dashboard"
+
+### 6a. Session - Verify
+# curl -X GET http://localhost/api/auth/session/verify \
+# -H "Cookie: sAccessToken=YOUR_TOKEN"
+
+### 6b. Session - Refresh
+# curl -X POST http://localhost/api/auth/session/refresh \
+# -H "Cookie: sRefreshToken=YOUR_REFRESH_TOKEN"
+
+### 6c. Session - Sign Out
+# curl -X POST http://localhost/api/auth/signout \
+# -H "Cookie: sAccessToken=YOUR_TOKEN"
+
+
+################################################################################
+# AUTHENTICATION FLOW DOCUMENTATION
+################################################################################
+
+# ============================================================================
+# DEFAULT CONFIGURATION (No environment variables set)
+# ============================================================================
+# When AGENTA_AUTHN_EMAIL is NOT set or = "password":
+# - Email/Password authentication is ENABLED (default)
+# - Available endpoints: /discover, /signup, /signin, /signout, /session/*
+# - SuperTokens emailpassword recipe is initialized
+#
+# Expected discover response:
+# {
+# "primary_method": "email:password",
+# "methods": {
+# "email:password": true,
+# "email:otp": false,
+# "social:google": false,
+# "social:github": false,
+# "sso": false
+# }
+# }
+
+# ============================================================================
+# EMAIL OTP CONFIGURATION
+# ============================================================================
+# Set: AGENTA_AUTHN_EMAIL=otp
+# - Email OTP authentication is ENABLED
+# - Email/Password authentication is DISABLED
+# - Available endpoints: /discover, /signinup/code, /signinup/code/consume, /signout, /session/*
+# - SuperTokens passwordless recipe is initialized
+#
+# Expected discover response:
+# {
+# "primary_method": "email:otp",
+# "methods": {
+# "email:password": false,
+# "email:otp": true,
+# "social:google": false,
+# "social:github": false,
+# "sso": false
+# }
+# }
+
+# ============================================================================
+# SOCIAL OAUTH CONFIGURATION
+# ============================================================================
+# Set: AGENTA_AUTHN_GOOGLE_ENABLED=true
+# AGENTA_AUTHN_GOOGLE_CLIENT_ID=your_client_id
+# AGENTA_AUTHN_GOOGLE_CLIENT_SECRET=your_client_secret
+# - Google OAuth is ENABLED
+# - Available endpoints: /authorisationurl?thirdPartyId=google
+# - SuperTokens thirdparty recipe is initialized with Google provider
+#
+# Set: AGENTA_AUTHN_GITHUB_ENABLED=true
+# AGENTA_AUTHN_GITHUB_CLIENT_ID=your_client_id
+# AGENTA_AUTHN_GITHUB_CLIENT_SECRET=your_client_secret
+# - GitHub OAuth is ENABLED
+# - Available endpoints: /authorisationurl?thirdPartyId=github
+# - SuperTokens thirdparty recipe is initialized with GitHub provider
+
+# ============================================================================
+# OIDC/SSO CONFIGURATION (EE ONLY)
+# ============================================================================
+# Requires:
+# 1. EE edition enabled
+# 2. Organization with verified domain in organization_domains table
+# 3. OIDC provider configured in organization_providers table
+# 4. Provider must be enabled and linked to domain
+#
+# Available endpoints: /authorize/oidc?provider_id=UUID
+# SuperTokens thirdparty recipe handles OIDC exchange
+#
+# Expected discover response (when domain has SSO):
+# {
+# "primary_method": "sso",
+# "methods": {
+# "email:password": false,
+# "email:otp": false,
+# "social:google": false,
+# "social:github": false,
+# "sso": {
+# "available": true,
+# "required_by_some_orgs": false,
+# "providers": [
+# {
+# "slug": "okta",
+# "name": "ACME SSO",
+# "recommended": true
+# }
+# ]
+# }
+# }
+# }
+
+# ============================================================================
+# COMMON ERRORS
+# ============================================================================
+# 404 on /signup or /signin:
+# - Email/Password recipe is NOT enabled
+# - Check: AGENTA_AUTHN_EMAIL should be "password" or unset
+#
+# 404 on /signinup/code:
+# - Email OTP recipe is NOT enabled
+# - Check: AGENTA_AUTHN_EMAIL should be "otp"
+#
+# 404 on /authorisationurl?thirdPartyId=google:
+# - Google OAuth is NOT enabled
+# - Check: AGENTA_AUTHN_GOOGLE_ENABLED=true and credentials configured
+#
+# 404 on /authorize/oidc:
+# - OIDC endpoints are EE only
+# - Check: EE edition enabled and provider configured
+#
+# All methods false in discover response:
+# - No authentication methods are configured
+# - This should NOT happen - email:password is always default
+# - Check SuperTokens initialization in oss/src/__init__.py
+
+# ============================================================================
+# SUPERTOKENS RECIPE INITIALIZATION
+# ============================================================================
+# Location: /Users/junaway/Agenta/github/agenta/api/oss/src/__init__.py
+#
+# Email/Password (default):
+# - Initialized when: env.auth.email_method == "password" OR not set
+# - Recipe: emailpassword.init()
+# - Endpoints: /signup, /signin
+#
+# Email OTP:
+# - Initialized when: env.auth.email_method == "otp"
+# - Recipe: passwordless.init()
+# - Endpoints: /signinup/code, /signinup/code/consume
+#
+# Third-Party (Social OAuth):
+# - Initialized when: env.auth.google_enabled OR env.auth.github_enabled
+# - Recipe: thirdparty.init()
+# - Endpoints: /authorisationurl, /callback/{provider}
+#
+# Session:
+# - Always initialized
+# - Recipe: session.init()
+# - Endpoints: /session/verify, /session/refresh
+#
+# Dashboard:
+# - Always initialized
+# - Recipe: dashboard.init()
+# - Endpoints: /dashboard
+
+# ============================================================================
+# SESSION PAYLOAD
+# ============================================================================
+# After successful authentication, SuperTokens creates a session with:
+# - User ID (from SuperTokens)
+# - Email
+# - Identities (list of authentication methods used)
+# Example: ["email:password"], ["email:otp"], ["social:google", "email:otp"]
+#
+# Session cookies:
+# - sAccessToken: Short-lived access token
+# - sRefreshToken: Long-lived refresh token
+# - Both are httpOnly, secure, sameSite=lax
diff --git a/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx b/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx
index 423b5239b9..e138fa8a29 100644
--- a/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx
+++ b/docs/docs/evaluation/configure-evaluators/07-custom-evaluator.mdx
@@ -2,11 +2,7 @@
title: "Custom Code Evaluator"
---
-Sometimes, the default evaluators in **Agenta** may not be sufficient for your specific use case. In such cases, you can create a custom evaluator to suit your specific needs. Custom evaluators are written in Python.
-
-:::info
-For the moment, there are limitation on the code that can be written in the custom evaluator. Our backend uses `RestrictedPython` to execute the code which limits the libraries that can be used.
-:::
+Sometimes, the default evaluators in **Agenta** may not be sufficient for your specific use case. In such cases, you can create a custom evaluator to suit your specific needs. Custom evaluators are written in Python, JavaScript, or TypeScript.
## Evaluation code
diff --git a/docs/docs/observability/integrations/02-langchain.mdx b/docs/docs/observability/integrations/02-langchain.mdx
index 8d365ce859..3f04a1e6a1 100644
--- a/docs/docs/observability/integrations/02-langchain.mdx
+++ b/docs/docs/observability/integrations/02-langchain.mdx
@@ -19,7 +19,7 @@ This guide shows you how to instrument LangChain applications using Agenta's obs
Install the required packages:
```bash
-pip install -U agenta openai opentelemetry-instrumentation-langchain langchain langchain_community
+pip install -U agenta openai opentelemetry-instrumentation-langchain langchain langchain-openai
```
## Configure Environment Variables
@@ -48,15 +48,17 @@ os.environ["AGENTA_HOST"] = "http://localhost"
## Code Example
+This example uses [LangChain Expression Language (LCEL)](https://python.langchain.com/docs/concepts/lcel/) to build a multi-step workflow that generates a joke and then translates it.
+
```python
# highlight-next-line
import agenta as ag
# highlight-next-line
from opentelemetry.instrumentation.langchain import LangchainInstrumentor
-from langchain.schema import SystemMessage, HumanMessage
-from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
-from langchain_community.chat_models import ChatOpenAI
-from langchain.chains import LLMChain, SequentialChain, TransformChain
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_core.output_parsers import StrOutputParser
+from langchain_core.runnables import RunnablePassthrough, RunnableLambda
+from langchain_openai import ChatOpenAI
# highlight-next-line
ag.init()
@@ -66,43 +68,39 @@ LangchainInstrumentor().instrument()
def langchain_app():
# Initialize the chat model
- chat = ChatOpenAI(temperature=0)
-
- # Define a transformation chain to create the prompt
- transform = TransformChain(
- input_variables=["subject"],
- output_variables=["prompt"],
- transform=lambda inputs: {"prompt": f"Tell me a joke about {inputs['subject']}."},
- )
-
- # Define the first LLM chain to generate a joke
- first_prompt_messages = [
- SystemMessage(content="You are a funny sarcastic nerd."),
- HumanMessage(content="{prompt}"),
- ]
- first_prompt_template = ChatPromptTemplate.from_messages(first_prompt_messages)
- first_chain = LLMChain(llm=chat, prompt=first_prompt_template, output_key="joke")
-
- # Define the second LLM chain to translate the joke
- second_prompt_messages = [
- SystemMessage(content="You are an Elf."),
- HumanMessagePromptTemplate.from_template(
- "Translate the joke below into Sindarin language:\n{joke}"
- ),
- ]
- second_prompt_template = ChatPromptTemplate.from_messages(second_prompt_messages)
- second_chain = LLMChain(llm=chat, prompt=second_prompt_template)
-
- # Chain everything together in a sequential workflow
- workflow = SequentialChain(
- chains=[transform, first_chain, second_chain],
- input_variables=["subject"],
+ llm = ChatOpenAI(temperature=0)
+
+ # Create prompt for joke generation
+ joke_prompt = ChatPromptTemplate.from_messages([
+ ("system", "You are a funny sarcastic nerd."),
+ ("human", "Tell me a joke about {subject}."),
+ ])
+
+ # Create prompt for translation
+ translate_prompt = ChatPromptTemplate.from_messages([
+ ("system", "You are an Elf."),
+ ("human", "Translate the joke below into Sindarin language:\n{joke}"),
+ ])
+
+ # Build the chain using LCEL (LangChain Expression Language)
+ # First chain: generate a joke
+ joke_chain = joke_prompt | llm | StrOutputParser()
+
+ # Second chain: translate the joke
+ translate_chain = translate_prompt | llm | StrOutputParser()
+
+ # Combine the chains: generate joke, then translate it
+ full_chain = (
+ {"subject": RunnablePassthrough()}
+ | RunnableLambda(lambda x: {"joke": joke_chain.invoke(x["subject"])})
+ | translate_chain
)
# Execute the workflow and print the result
- result = workflow({"subject": "OpenTelemetry"})
+ result = full_chain.invoke("OpenTelemetry")
print(result)
+
# Run the LangChain application
langchain_app()
```
@@ -111,6 +109,7 @@ langchain_app()
- **Initialize Agenta**: `ag.init()` sets up the Agenta SDK.
- **Instrument LangChain**: `LangchainInstrumentor().instrument()` instruments LangChain for tracing. This must be called **before** running your application to ensure all components are traced.
+- **LCEL Chains**: The pipe operator (`|`) chains components together. Each step's output becomes the next step's input, making it easy to compose complex workflows.
## Using Workflows
diff --git a/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx b/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx
index b710d7052b..67d182d485 100644
--- a/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx
+++ b/docs/docs/observability/trace-with-python-sdk/09-track-chat-sessions.mdx
@@ -5,6 +5,10 @@ description: "Learn how to track multi-turn conversations and chat sessions"
sidebar_position: 9
---
+:::info
+This guide covers tracking chat sessions with the Agenta Python SDK. For JavaScript/TypeScript or other OpenTelemetry-based clients, see the [session tracking with OpenTelemetry guide](/observability/trace-with-opentelemetry/session-tracking).
+:::
+
Chat applications often span multiple requests and traces. Session tracking groups related interactions together so you can analyze complete conversations and user journeys.
## What are sessions?
diff --git a/docs/docs/prompt-engineering/playground/02-custom-providers.mdx b/docs/docs/prompt-engineering/playground/02-custom-providers.mdx
index bbb65252e8..007233c221 100644
--- a/docs/docs/prompt-engineering/playground/02-custom-providers.mdx
+++ b/docs/docs/prompt-engineering/playground/02-custom-providers.mdx
@@ -166,6 +166,24 @@ Region: (e.g eu-central-1)
Model name: (e.g anthropic.claude-3-sonnet-20240229-v1:0)
```
+### Troubleshooting AWS Bedrock
+
+If a model works in the AWS console but fails in Agenta, the most common cause is that the model does not support on demand throughput in your Region. In that case, you must invoke the model through an inference profile.
+
+You will usually see an error similar to:
+
+`Invocation of model ID with on-demand throughput isn't supported. Retry your request with the ID or ARN of an inference profile that contains this model.`
+
+To fix it:
+
+1. In AWS Bedrock, open the **Cross region inference** page.
+2. Create or select an **inference profile** that includes your model.
+3. Copy the **Inference profile ID**. It looks like `eu.anthropic.claude-3-haiku-20240307-v1:0`.
+4. In Agenta, set **Model name** to the inference profile ID (not the base model ID).
+5. Keep your Bedrock **Region** set to a Region supported by your account and the model. If you are not sure, use the same Region you used to create the inference profile.
+
+If you see a Bedrock error about a malformed request and an extraneous key (for example, `textGenerationConfig`), verify that you selected the right model identifier. The inference profile ID is often the correct choice for cross region inference models.
+
## Configuring OpenAI-Compatible Endpoints (e.g., Ollama)
diff --git a/docs/docs/self-host/01-quick-start.mdx b/docs/docs/self-host/01-quick-start.mdx
index 9c3b02cfe8..13ceb0c90f 100644
--- a/docs/docs/self-host/01-quick-start.mdx
+++ b/docs/docs/self-host/01-quick-start.mdx
@@ -69,7 +69,8 @@ If Agenta doesn't start properly, check these common issues:
docker logs agenta-oss-gh-api
```
4. SDK connectivity issues: If you're using the Agenta SDK from outside Docker to connect to your localhost Agenta instance and experiencing connection failures, ensure the `DOCKER_NETWORK_MODE` environment variable is unset (this is the default behavior).
-5. Lack of memory provided to docker: If you are experiencing the web container restarting and dying unexpectedly, the most likely cause is that you are running out of memory. You may need to increase the memory allocated to docker (desktop).
+5. Docker network layout: The Docker networks are defined in the compose files. See `hosting/docker-compose/oss/docker-compose.gh.yml` (OSS) or `hosting/docker-compose/ee/docker-compose.dev.yml` (EE) for the network names and service attachments.
+6. Lack of memory provided to docker: If you are experiencing the web container restarting and dying unexpectedly, the most likely cause is that you are running out of memory. You may need to increase the memory allocated to docker (desktop).
:::info
To set up a development environment with features like hot-reloading, refer to our [Development Guide](/misc/contributing/development-mode).
@@ -78,5 +79,3 @@ To set up a development environment with features like hot-reloading, refer to o
Need help? Either:
- [Create a GitHub issue](https://github.com/Agenta-AI/agenta/issues/new/choose)
- Join our [Slack community](https://join.slack.com/t/agenta-hq/shared_invite/zt-37pnbp5s6-mbBrPL863d_oLB61GSNFjw) for quick support
-
-
diff --git a/docs/docs/self-host/02-configuration.mdx b/docs/docs/self-host/02-configuration.mdx
index 749debb58f..7eb5dac203 100644
--- a/docs/docs/self-host/02-configuration.mdx
+++ b/docs/docs/self-host/02-configuration.mdx
@@ -49,7 +49,7 @@ Optional Agenta-specific configurations:
| Variable | Description | Default |
|----------|-------------|---------|
-| `AGENTA_AUTO_MIGRATIONS` | Enable automatic database migrations | `true` |
+| `ALEMBIC_AUTO_MIGRATIONS` | Enable automatic database migrations (legacy: `AGENTA_AUTO_MIGRATIONS`) | `true` |
| `AGENTA_PRICING` | Enable pricing features | _(empty)_ |
| `AGENTA_DEMOS` | Enable demo applications | _(empty)_ |
| `AGENTA_RUNTIME_PREFIX` | Prefix for runtime containers | _(empty)_ |
@@ -60,6 +60,20 @@ Optional Agenta-specific configurations:
| `AGENTA_ALLOWED_DOMAINS` | Comma-separated list of email domains allowed to authenticate; when set, all other domains are rejected | _(empty)_ |
| `AGENTA_OTLP_MAX_BATCH_BYTES` | Max OTLP batch size before requests are rejected with 413 | `10485760` (10MB) |
+### Sandbox Runner (Custom Evaluators)
+
+Agenta executes custom evaluator code in a sandboxed environment. You can choose between local execution or remote execution using Daytona sandboxes.
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `AGENTA_SERVICES_SANDBOX_RUNNER` | Code execution backend for custom evaluators. Set to `local` or `daytona`. | `local` |
+| `DAYTONA_API_KEY` | Your Daytona API key. Required when using Daytona. Get one from https://app.daytona.io | _(empty)_ |
+| `DAYTONA_API_URL` | Daytona API endpoint. | `https://app.daytona.io/api` |
+| `DAYTONA_TARGET` | Daytona region for sandbox execution (e.g., `eu`, `us`). | Value of `AGENTA_REGION`, or `eu` |
+| `DAYTONA_SNAPSHOT` | Snapshot ID that defines the sandbox environment. Required when using Daytona. | _(empty)_ |
+
+**When to use Daytona**: Local execution runs code directly on the API server. This is simpler but runs in the same process as the API. Daytona runs code in isolated containers with their own dependencies. Use Daytona for production deployments or when you need stronger isolation.
+
### Third-party (Required)
Essential third-party service configurations:
diff --git a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx
index 237fee4f68..638d413d86 100644
--- a/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx
+++ b/docs/docs/tutorials/cookbooks/02-observability_langchain.mdx
@@ -73,9 +73,7 @@ This Langchain RAG application:
```python
from langchain_openai import ChatOpenAI
-
import bs4
-from langchain import hub
from langchain_chroma import Chroma
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.output_parsers import StrOutputParser
diff --git a/docs/package-lock.json b/docs/package-lock.json
index 4a5e1e5339..205fad92b4 100644
--- a/docs/package-lock.json
+++ b/docs/package-lock.json
@@ -7031,23 +7031,23 @@
}
},
"node_modules/body-parser": {
- "version": "1.20.3",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
- "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "version": "1.20.4",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz",
+ "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==",
"license": "MIT",
"dependencies": {
- "bytes": "3.1.2",
+ "bytes": "~3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
- "destroy": "1.2.0",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "on-finished": "2.4.1",
- "qs": "6.13.0",
- "raw-body": "2.5.2",
+ "destroy": "~1.2.0",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "on-finished": "~2.4.1",
+ "qs": "~6.14.0",
+ "raw-body": "~2.5.3",
"type-is": "~1.6.18",
- "unpipe": "1.0.0"
+ "unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8",
@@ -7072,6 +7072,26 @@
"ms": "2.0.0"
}
},
+ "node_modules/body-parser/node_modules/http-errors": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
+ "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "~2.0.0",
+ "inherits": "~2.0.4",
+ "setprototypeof": "~1.2.0",
+ "statuses": "~2.0.2",
+ "toidentifier": "~1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
"node_modules/body-parser/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
@@ -7090,19 +7110,13 @@
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
- "node_modules/body-parser/node_modules/qs": {
- "version": "6.13.0",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
- "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
- "license": "BSD-3-Clause",
- "dependencies": {
- "side-channel": "^1.0.6"
- },
+ "node_modules/body-parser/node_modules/statuses": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "license": "MIT",
"engines": {
- "node": ">=0.6"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
+ "node": ">= 0.8"
}
},
"node_modules/bonjour-service": {
@@ -18510,9 +18524,9 @@
}
},
"node_modules/qs": {
- "version": "6.14.0",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz",
- "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==",
+ "version": "6.14.1",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz",
+ "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==",
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.1.0"
@@ -18575,15 +18589,15 @@
}
},
"node_modules/raw-body": {
- "version": "2.5.2",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
- "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "version": "2.5.3",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz",
+ "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==",
"license": "MIT",
"dependencies": {
- "bytes": "3.1.2",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "unpipe": "1.0.0"
+ "bytes": "~3.1.2",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
@@ -18598,6 +18612,26 @@
"node": ">= 0.8"
}
},
+ "node_modules/raw-body/node_modules/http-errors": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
+ "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "~2.0.0",
+ "inherits": "~2.0.4",
+ "setprototypeof": "~1.2.0",
+ "statuses": "~2.0.2",
+ "toidentifier": "~1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
"node_modules/raw-body/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
@@ -18610,6 +18644,15 @@
"node": ">=0.10.0"
}
},
+ "node_modules/raw-body/node_modules/statuses": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
"node_modules/rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
diff --git a/examples/jupyter/integrations/google-adk-integration.ipynb b/examples/jupyter/integrations/google-adk-integration.ipynb
index f35064cc54..bf8c90e28d 100644
--- a/examples/jupyter/integrations/google-adk-integration.ipynb
+++ b/examples/jupyter/integrations/google-adk-integration.ipynb
@@ -77,10 +77,10 @@
"# Load configuration from environment\n",
"os.environ[\"AGENTA_API_KEY\"] = \"YOUR AGENTA API KEY\"\n",
"os.environ[\"AGENTA_HOST\"] = \"https://cloud.agenta.ai\"\n",
- "os.environ[\"GOOGLE_API_KEY\"] = \"YOUR GOOGLE API KEY\" # Required for Google ADK / Gemini\n",
+ "os.environ[\"GOOGLE_API_KEY\"] = \"YOUR GOOGLE API KEY\" # Required for Google ADK / Gemini\n",
"\n",
"# Initialize Agenta (uses AGENTA_* env vars)\n",
- "ag.init()\n"
+ "ag.init()"
]
},
{
@@ -135,6 +135,7 @@
"APP_NAME = \"weather_app\"\n",
"USER_ID = \"demo_user\"\n",
"\n",
+ "\n",
"def get_weather(city: str) -> dict:\n",
" \"\"\"Toy tool used to generate spans in our traces.\"\"\"\n",
" normalized = city.strip().lower()\n",
@@ -144,7 +145,7 @@
" \"report\": \"The weather in New York is sunny with a temperature of 25°C.\",\n",
" }\n",
" if normalized == \"london\":\n",
- " return{\n",
+ " return {\n",
" \"status\": \"success\",\n",
" \"report\": \"The weather in London is cloudy with a temperature of 18°C.\",\n",
" }\n",
@@ -154,6 +155,7 @@
" \"error_message\": f\"Weather information for '{city}' is not available.\",\n",
" }\n",
"\n",
+ "\n",
"weather_agent = Agent(\n",
" name=\"weather_agent\",\n",
" model=\"gemini-2.0-flash-exp\",\n",
@@ -163,7 +165,9 @@
")\n",
"\n",
"session_service = InMemorySessionService()\n",
- "weather_runner = Runner(agent=weather_agent , app_name=APP_NAME, session_service=session_service)\n"
+ "weather_runner = Runner(\n",
+ " agent=weather_agent, app_name=APP_NAME, session_service=session_service\n",
+ ")"
]
},
{
@@ -183,8 +187,9 @@
"from google.genai import types\n",
"import agenta as ag\n",
"\n",
+ "\n",
"@ag.instrument(spankind=\"workflow\")\n",
- "async def ask_weather(question: str, user_id: str = \"demo_user\")-> str:\n",
+ "async def ask_weather(question: str, user_id: str = \"demo_user\") -> str:\n",
" \"\"\"\n",
" Run a single weather question through the Google ADK agent.\n",
" This appears as a top-level span inside Agenta observability.\n",
@@ -201,13 +206,12 @@
" parts=[types.Part.from_text(text=question)],\n",
" )\n",
"\n",
- "\n",
" try:\n",
" events = weather_runner.run_async(\n",
" user_id=user_id,\n",
" session_id=session.id,\n",
" new_message=content,\n",
- " )\n",
+ " )\n",
"\n",
" final_text = \"\"\n",
" async for event in events:\n",
@@ -220,11 +224,9 @@
" # Basic handling for Gemini quota / resource exhaustion\n",
" msg = str(exc).lower()\n",
" if \"exhausted\" in msg:\n",
- " return (\n",
- " \"The model is temporarily exhausted or over quota. (Check you 'google gemini' subscription) \"\n",
- " )\n",
+ " return \"The model is temporarily exhausted or over quota. (Check you 'google gemini' subscription) \"\n",
" # Re-raise all other errors so you still see real issues\n",
- " raise\n"
+ " raise"
]
},
{
@@ -240,12 +242,13 @@
"metadata": {},
"outputs": [],
"source": [
- "# Example usage \n",
- "async def main(): \n",
- " response = await ask_weather(\"What is the weather in New York?\") \n",
- " print(\"Response:\", response) \n",
+ "# Example usage\n",
+ "async def main():\n",
+ " response = await ask_weather(\"What is the weather in New York?\")\n",
+ " print(\"Response:\", response)\n",
"\n",
- "# Run the example \n",
+ "\n",
+ "# Run the example\n",
"await main()"
]
},
@@ -283,11 +286,11 @@
"# Example with custom span classification:\n",
"import agenta as ag\n",
"\n",
+ "\n",
"@ag.instrument(spankind=\"agent\")\n",
"def specialized_agent_function(input_data: str):\n",
" # Agent-specific logic implementation (placeholder)\n",
- " return input_data.upper()\n",
- " "
+ " return input_data.upper()"
]
},
{
diff --git a/examples/jupyter/observability/observability_langchain.ipynb b/examples/jupyter/observability/observability_langchain.ipynb
index dfc7d6ae09..3390878816 100644
--- a/examples/jupyter/observability/observability_langchain.ipynb
+++ b/examples/jupyter/observability/observability_langchain.ipynb
@@ -135,78 +135,8 @@
"cell_type": "code",
"execution_count": null,
"metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "'To save a new version of a prompt in Agenta, you need to create a variant, which acts like a branch in git for versioning. After making your changes, commit them to the variant. Finally, you can deploy the specific version of your variant to the desired environment.'"
- ]
- },
- "execution_count": 16,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "from langchain_openai import ChatOpenAI\n",
- "\n",
- "\n",
- "import bs4\n",
- "from langchain import hub\n",
- "from langchain_chroma import Chroma\n",
- "from langchain_community.document_loaders import WebBaseLoader\n",
- "from langchain_core.output_parsers import StrOutputParser\n",
- "from langchain_core.runnables import RunnablePassthrough\n",
- "from langchain_openai import OpenAIEmbeddings\n",
- "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
- "from langchain_core.prompts import ChatPromptTemplate\n",
- "\n",
- "prompt = \"\"\"\n",
- "You are an assistant for question-answering tasks.\n",
- "Use the following pieces of retrieved context to answer the question.\n",
- "If you don't know the answer, just say that you don't know.\n",
- "Use three sentences maximum and keep the answer concise and to the point.\n",
- "\n",
- "Question: {question} \n",
- "\n",
- "Context: {context} \n",
- "\n",
- "Answer:\n",
- "\"\"\"\n",
- "\n",
- "prompt_template = ChatPromptTemplate(\n",
- " [\n",
- " (\"human\", prompt),\n",
- " ]\n",
- ")\n",
- "\n",
- "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
- "\n",
- "loader = WebBaseLoader(\n",
- " web_paths=(\n",
- " \"https://agenta.ai/docs/prompt-engineering/managing-prompts-programatically/create-and-commit\",\n",
- " ),\n",
- " bs_kwargs=dict(parse_only=bs4.SoupStrainer(\"article\")), # Only parse the core\n",
- ")\n",
- "docs = loader.load()\n",
- "\n",
- "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
- "splits = text_splitter.split_documents(docs)\n",
- "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n",
- "\n",
- "# Retrieve and generate using the relevant snippets of the blog.\n",
- "retriever = vectorstore.as_retriever()\n",
- "\n",
- "\n",
- "rag_chain = (\n",
- " {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
- " | prompt_template\n",
- " | llm\n",
- " | StrOutputParser()\n",
- ")\n",
- "\n",
- "rag_chain.invoke(\"How can I save a new version of a prompt in Agenta?\")"
- ]
+ "outputs": [],
+ "source": "from langchain_openai import ChatOpenAI\n\nimport bs4\nfrom langchain_chroma import Chroma\nfrom langchain_community.document_loaders import WebBaseLoader\nfrom langchain_core.output_parsers import StrOutputParser\nfrom langchain_core.runnables import RunnablePassthrough\nfrom langchain_openai import OpenAIEmbeddings\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\nfrom langchain_core.prompts import ChatPromptTemplate\n\nprompt = \"\"\"\nYou are an assistant for question-answering tasks.\nUse the following pieces of retrieved context to answer the question.\nIf you don't know the answer, just say that you don't know.\nUse three sentences maximum and keep the answer concise and to the point.\n\nQuestion: {question} \n\nContext: {context} \n\nAnswer:\n\"\"\"\n\nprompt_template = ChatPromptTemplate(\n [\n (\"human\", prompt),\n ]\n)\n\nllm = ChatOpenAI(model=\"gpt-4o-mini\")\n\nloader = WebBaseLoader(\n web_paths=(\n \"https://agenta.ai/docs/prompt-engineering/managing-prompts-programatically/create-and-commit\",\n ),\n bs_kwargs=dict(parse_only=bs4.SoupStrainer(\"article\")), # Only parse the core\n)\ndocs = loader.load()\n\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\nsplits = text_splitter.split_documents(docs)\nvectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n\n# Retrieve and generate using the relevant snippets of the blog.\nretriever = vectorstore.as_retriever()\n\n\nrag_chain = (\n {\"context\": retriever, \"question\": RunnablePassthrough()}\n | prompt_template\n | llm\n | StrOutputParser()\n)\n\nrag_chain.invoke(\"How can I save a new version of a prompt in Agenta?\")"
}
],
"metadata": {
@@ -230,4 +160,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/examples/python/evaluators/ag/store_internals.py b/examples/python/evaluators/ag/store_internals.py
index ce527657a1..b1559a3271 100644
--- a/examples/python/evaluators/ag/store_internals.py
+++ b/examples/python/evaluators/ag/store_internals.py
@@ -33,10 +33,12 @@ def evaluate(
return 1.0 if str(output).lower() == str(correct_answer).lower() else 0.0
# Store a simple hello world message in internals
- ag.tracing.store_internals({
- "message": "Hello World from evaluator internals!",
- "evaluator_name": "internals_demo",
- })
+ ag.tracing.store_internals(
+ {
+ "message": "Hello World from evaluator internals!",
+ "evaluator_name": "internals_demo",
+ }
+ )
# Perform actual evaluation
output_str = str(output).lower().strip()
@@ -47,11 +49,13 @@ def evaluate(
# Store evaluation details as internals
# These will be visible in the observability drawer
- ag.tracing.store_internals({
- "output_processed": output_str,
- "correct_answer_processed": correct_str,
- "exact_match": match,
- "score": score,
- })
+ ag.tracing.store_internals(
+ {
+ "output_processed": output_str,
+ "correct_answer_processed": correct_str,
+ "exact_match": match,
+ "score": score,
+ }
+ )
return score
diff --git a/examples/python/evaluators/basic/json_structure.py b/examples/python/evaluators/basic/json_structure.py
index fea48debe3..0e123463a7 100644
--- a/examples/python/evaluators/basic/json_structure.py
+++ b/examples/python/evaluators/basic/json_structure.py
@@ -13,7 +13,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Evaluator that validates JSON structure and required fields.
@@ -40,7 +40,7 @@ def evaluate(
return 0.0
# Get required fields
- required_fields = app_params.get('required_fields', '').split(',')
+ required_fields = app_params.get("required_fields", "").split(",")
required_fields = [f.strip() for f in required_fields if f.strip()]
if not required_fields:
diff --git a/examples/python/evaluators/basic/length_check.py b/examples/python/evaluators/basic/length_check.py
index e86e3177df..01cd8cf947 100644
--- a/examples/python/evaluators/basic/length_check.py
+++ b/examples/python/evaluators/basic/length_check.py
@@ -13,7 +13,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Evaluator that checks if output length is within expected range.
@@ -36,8 +36,8 @@ def evaluate(
output_str = str(output)
# Get length constraints from app_params
- min_length = int(app_params.get('min_length', 0))
- max_length = int(app_params.get('max_length', 10000))
+ min_length = int(app_params.get("min_length", 0))
+ max_length = int(app_params.get("max_length", 10000))
output_length = len(output_str)
diff --git a/examples/python/evaluators/basic/string_contains.py b/examples/python/evaluators/basic/string_contains.py
index b807c1e2da..aca84c3c82 100644
--- a/examples/python/evaluators/basic/string_contains.py
+++ b/examples/python/evaluators/basic/string_contains.py
@@ -13,7 +13,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Evaluator that checks if the output contains expected keywords.
diff --git a/examples/python/evaluators/basic/word_count.py b/examples/python/evaluators/basic/word_count.py
index 8dd4446656..346b88cfa7 100644
--- a/examples/python/evaluators/basic/word_count.py
+++ b/examples/python/evaluators/basic/word_count.py
@@ -13,7 +13,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Evaluator that checks word count is within target range.
@@ -31,7 +31,7 @@ def evaluate(
"""
# Convert output to string
if isinstance(output, dict):
- output_str = str(output.get('text', json.dumps(output)))
+ output_str = str(output.get("text", json.dumps(output)))
else:
output_str = str(output)
@@ -40,14 +40,14 @@ def evaluate(
word_count = len(words)
# Check target or range
- if 'target_words' in app_params:
- target = int(app_params['target_words'])
+ if "target_words" in app_params:
+ target = int(app_params["target_words"])
# Allow 10% variance
min_words = int(target * 0.9)
max_words = int(target * 1.1)
else:
- min_words = int(app_params.get('min_words', 0))
- max_words = int(app_params.get('max_words', 10000))
+ min_words = int(app_params.get("min_words", 0))
+ max_words = int(app_params.get("max_words", 10000))
if min_words <= word_count <= max_words:
return 1.0
diff --git a/examples/python/evaluators/numpy/dependency_check.py b/examples/python/evaluators/numpy/dependency_check.py
index 70bcdcf30e..46d8bfebcb 100644
--- a/examples/python/evaluators/numpy/dependency_check.py
+++ b/examples/python/evaluators/numpy/dependency_check.py
@@ -12,7 +12,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Tests if NumPy is available in the environment.
diff --git a/examples/python/evaluators/numpy/exact_match.py b/examples/python/evaluators/numpy/exact_match.py
index 254a6f0884..058bd6d9ec 100644
--- a/examples/python/evaluators/numpy/exact_match.py
+++ b/examples/python/evaluators/numpy/exact_match.py
@@ -13,7 +13,7 @@ def evaluate(
app_params: Dict[str, str],
inputs: Dict[str, str],
output: Union[str, Dict[str, Any]],
- correct_answer: str
+ correct_answer: str,
) -> float:
"""
Tests NumPy functionality by counting characters in strings.
diff --git a/examples/test_daytona_scripts.py b/examples/test_daytona_scripts.py
index 5b8d5b8568..aa7da58fb0 100644
--- a/examples/test_daytona_scripts.py
+++ b/examples/test_daytona_scripts.py
@@ -33,7 +33,13 @@ def _load_files() -> dict[str, list[Path]]:
for runtime, folder in BASIC_DIRS.items():
if not folder.exists():
continue
- pattern = "*.py" if runtime == "python" else "*.js" if runtime == "javascript" else "*.ts"
+ pattern = (
+ "*.py"
+ if runtime == "python"
+ else "*.js"
+ if runtime == "javascript"
+ else "*.ts"
+ )
candidates = sorted(folder.glob(pattern))
files[runtime] = [
path
@@ -57,9 +63,7 @@ def _wrap_js(code: str) -> str:
"const app_params = params.app_params;\n"
"const inputs = params.inputs;\n"
"const output = params.output;\n"
- "const correct_answer = params.correct_answer;\n"
- + code
- + "\n"
+ "const correct_answer = params.correct_answer;\n" + code + "\n"
"let result = evaluate(app_params, inputs, output, correct_answer);\n"
"result = Number(result);\n"
"if (!Number.isFinite(result)) { result = 0.0; }\n"
@@ -82,9 +86,7 @@ def _wrap_python(code: str) -> str:
"app_params = params['app_params']\n"
"inputs = params['inputs']\n"
"output = params['output']\n"
- "correct_answer = params['correct_answer']\n"
- + code
- + "\n"
+ "correct_answer = params['correct_answer']\n" + code + "\n"
"result = evaluate(app_params, inputs, output, correct_answer)\n"
"if isinstance(result, (float, int, str)):\n"
" try:\n"
diff --git a/hosting/docker-compose/ee/docker-compose.dev.yml b/hosting/docker-compose/ee/docker-compose.dev.yml
index 9bb435c188..9382983c04 100644
--- a/hosting/docker-compose/ee/docker-compose.dev.yml
+++ b/hosting/docker-compose/ee/docker-compose.dev.yml
@@ -1,78 +1,58 @@
name: agenta-ee-dev
services:
- # Build services - keep for caching but not strictly needed
.api:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
build:
context: ../../../api
dockerfile: ee/docker/Dockerfile.dev
+ # === EXECUTION ============================================ #
command: ["true"]
.web:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-web:latest
build:
context: ../../../web
dockerfile: ee/docker/Dockerfile.dev
+ # === EXECUTION ============================================ #
command: ["true"]
web:
+ # === ACTIVATION =========================================== #
profiles:
- with-web
-
+ # === IMAGE ================================================ #
image: agenta-ee-dev-web:latest
-
+ # === EXECUTION ============================================ #
+ command: sh -c "pnpm dev-ee"
+ # === STORAGE ============================================== #
volumes:
- ../../../web/ee/src:/app/ee/src
- ../../../web/ee/public:/app/ee/public
- ../../../web/oss/src:/app/oss/src
- ../../../web/oss/public:/app/oss/public
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
- ports:
- - "3000:3000"
-
- restart: always
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
networks:
- agenta-network
-
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.agenta-web.rule=PathPrefix(`/`)"
- "traefik.http.routers.agenta-web.entrypoints=web"
- "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
-
- command: sh -c "pnpm dev-ee"
+ # === LIFECYCLE ============================================ #
+ restart: always
api:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
-
- volumes:
- - ../../../api:/app
- - ../../../sdk:/sdk
-
- env_file:
- - ${ENV_FILE:-./.env.ee.dev}
-
- labels:
- - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
- - "traefik.http.routers.api.entrypoints=web"
- - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
- - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
- - "traefik.http.routers.api.middlewares=api-strip"
- - "traefik.http.services.api.loadbalancer.server.port=8000"
- - "traefik.http.routers.api.service=api"
-
- restart: always
-
- networks:
- - agenta-network
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === EXECUTION ============================================ #
command:
[
"uvicorn",
@@ -85,7 +65,23 @@ services:
"--root-path",
"/api",
]
-
+ # === STORAGE ============================================== #
+ volumes:
+ - ../../../api/ee:/app/ee
+ - ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
+ - ../../../sdk:/sdk
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
@@ -97,125 +93,173 @@ services:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LABELS =============================================== #
+ labels:
+ - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
+ - "traefik.http.routers.api.entrypoints=web"
+ - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
+ - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.api.middlewares=api-strip"
+ - "traefik.http.services.api.loadbalancer.server.port=8000"
+ - "traefik.http.routers.api.service=api"
+ # === LIFECYCLE ============================================ #
+ restart: always
worker-evaluations:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
-
+ # === EXECUTION ============================================ #
+ command: >
+ watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
+ python -m entrypoints.worker_evaluations
+ # === STORAGE ============================================== #
volumes:
- - ../../../api:/app
+ - ../../../api/ee:/app/ee
+ - ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
- networks:
- - agenta-network
-
- command: >
- watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
- python -m entrypoints.worker_evaluations
-
worker-tracing:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
-
+ # === EXECUTION ============================================ #
+ command: >
+ watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
+ python -m entrypoints.worker_tracing
+ # === STORAGE ============================================== #
volumes:
- - ../../../api:/app
+ - ../../../api/ee:/app/ee
+ - ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
- networks:
- - agenta-network
-
- command: >
- watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
- python -m entrypoints.worker_tracing
-
cron:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
-
+ # === EXECUTION ============================================ #
+ command: cron -f
+ # === STORAGE ============================================== #
volumes:
- ../../../api/ee/src/crons/meters.sh:/meters.sh
- ../../../api/oss/src/crons/queries.sh:/queries.sh
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
- postgres
- api
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
- networks:
- - agenta-network
-
- command: cron -f
alembic:
+ # === IMAGE ================================================ #
image: agenta-ee-dev-api:latest
-
+ # === EXECUTION ============================================ #
+ command: sh -c "python -m ee.databases.postgres.migrations.runner"
+ # === STORAGE ============================================== #
volumes:
- - ../../../api:/app
+ - ../../../api/ee:/app/ee
+ - ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
-
- networks:
- - agenta-network
-
- command: sh -c "python -m ee.databases.postgres.migrations.runner"
completion:
+ # === IMAGE ================================================ #
build:
context: ../../../services/completion
dockerfile: ee/docker/Dockerfile.dev
-
+ # === EXECUTION ============================================ #
+ command:
+ [
+ "uvicorn",
+ "oss.src.main:app",
+ "--host",
+ "0.0.0.0",
+ "--port",
+ "80",
+ "--reload",
+ "--root-path",
+ "/services/completion",
+ ]
+ # === STORAGE ============================================== #
volumes:
- ../../../services/completion:/app
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
extra_hosts:
- "host.docker.internal:host-gateway"
-
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)"
- "traefik.http.routers.completion.entrypoints=web"
@@ -224,12 +268,15 @@ services:
- "traefik.http.routers.completion.middlewares=completion-strip"
- "traefik.http.services.completion.loadbalancer.server.port=80"
- "traefik.http.routers.completion.service=completion"
-
+ # === LIFECYCLE ============================================ #
restart: always
- networks:
- - agenta-network
-
+ chat:
+ # === IMAGE ================================================ #
+ build:
+ context: ../../../services/chat
+ dockerfile: ee/docker/Dockerfile.dev
+ # === EXECUTION ============================================ #
command:
[
"uvicorn",
@@ -240,24 +287,23 @@ services:
"80",
"--reload",
"--root-path",
- "/services/completion",
+ "/services/chat",
]
-
- chat:
- build:
- context: ../../../services/chat
- dockerfile: ee/docker/Dockerfile.dev
-
+ # === STORAGE ============================================== #
volumes:
- ../../../services/chat:/app
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
extra_hosts:
- "host.docker.internal:host-gateway"
-
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)"
- "traefik.http.routers.chat.entrypoints=web"
@@ -266,46 +312,29 @@ services:
- "traefik.http.routers.chat.middlewares=chat-strip"
- "traefik.http.services.chat.loadbalancer.server.port=80"
- "traefik.http.routers.chat.service=chat"
-
+ # === LIFECYCLE ============================================ #
restart: always
- networks:
- - agenta-network
-
- command:
- [
- "uvicorn",
- "oss.src.main:app",
- "--host",
- "0.0.0.0",
- "--port",
- "80",
- "--reload",
- "--root-path",
- "/services/chat",
- ]
-
postgres:
+ # === IMAGE ================================================ #
image: postgres:16
-
+ # === STORAGE ============================================== #
+ volumes:
+ - postgres-data:/var/lib/postgresql/data/
+ - ../../../api/ee/databases/postgres/init-db-ee.sql:/docker-entrypoint-initdb.d/init-db.sql
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
environment:
POSTGRES_USER: ${POSTGRES_USER:-username}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
-
- ports:
- - "5432:5432"
-
- restart: always
-
+ # === NETWORK ============================================== #
networks:
- agenta-network
-
- volumes:
- - postgres-data:/var/lib/postgresql/data/
- - ../../../api/ee/databases/postgres/init-db-ee.sql:/docker-entrypoint-initdb.d/init-db.sql
-
+ ports:
+ - "${POSTGRES_PORT:-5432}:5432"
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U username -d agenta_ee_core"]
interval: 10s
@@ -313,8 +342,9 @@ services:
retries: 5
redis-volatile:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly no
@@ -322,18 +352,14 @@ services:
--maxmemory 512mb
--maxmemory-policy volatile-lru
--port 6379
-
- ports:
- - "6379:6379"
-
- networks:
- - agenta-network
-
+ # === STORAGE ============================================== #
volumes:
- redis-volatile-data:/data
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6379", "ping"]
interval: 10s
@@ -342,8 +368,9 @@ services:
start_period: 5s
redis-durable:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly yes
@@ -352,18 +379,14 @@ services:
--maxmemory 512mb
--maxmemory-policy noeviction
--port 6381
-
- ports:
- - "6381:6381"
-
- networks:
- - agenta-network
-
+ # === STORAGE ============================================== #
volumes:
- redis-durable-data:/data
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6381", "ping"]
interval: 10s
@@ -372,28 +395,28 @@ services:
start_period: 5s
traefik:
+ # === IMAGE ================================================ #
image: traefik:2
-
- command:
+ # === EXECUTION ============================================ #
+ command:
- --api.dashboard=true
- --api.insecure=true
- --providers.docker
+ - --providers.docker.constraints=Label(`com.docker.compose.project`,`${COMPOSE_PROJECT_NAME:-agenta-ee-dev}`)
- --entrypoints.web.address=:80
- --ping=true
- - --accesslog=true # Enable access logs for debugging
-
- ports:
- - "80:80" # ALB forwards to this port
- - "8080:8080" # Dashboard (optional, can be internal only)
-
+ - --accesslog=true
+ # === STORAGE ============================================== #
volumes:
- /var/run/docker.sock:/var/run/docker.sock
-
+ # === NETWORK ============================================== #
networks:
- agenta-network
-
+ ports:
+ - "${TRAEFIK_PORT:-80}:80"
+ - "${TRAEFIK_UI_PORT:-8080}:8080"
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 10s
@@ -402,28 +425,24 @@ services:
start_period: 10s
supertokens:
+ # === IMAGE ================================================ #
image: registry.supertokens.io/supertokens/supertokens-postgresql
-
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+ environment:
+ POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS:-postgresql://username:password@postgres:5432/agenta_ee_supertokens}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
alembic:
condition: service_completed_successfully
-
- ports:
- - "3567:3567"
-
- env_file:
- - ${ENV_FILE:-./.env.ee.dev}
-
- environment:
- POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS}
-
+ # === LIFECYCLE ============================================ #
restart: always
-
- networks:
- - agenta-network
-
healthcheck:
test: >
bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"'
@@ -432,23 +451,25 @@ services:
retries: 5
stripe:
+ # === IMAGE ================================================ #
image: stripe/stripe-cli:latest
-
- command: [
- listen,
- --forward-to,
- http://api:8000/billing/stripe/events/,
- --events,
- "customer.subscription.created,customer.subscription.deleted,invoice.updated,invoice.upcoming,invoice.payment_failed,invoice.payment_succeeded"
- ]
-
+ # === EXECUTION ============================================ #
+ command:
+ [
+ listen,
+ --forward-to,
+ http://api:8000/billing/stripe/events/,
+ --events,
+ "customer.subscription.created,customer.subscription.deleted,invoice.updated,invoice.upcoming,invoice.payment_failed,invoice.payment_succeeded",
+ ]
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.ee.dev}
-
- restart: always
-
+ # === NETWORK ============================================== #
networks:
- agenta-network
+ # === LIFECYCLE ============================================ #
+ restart: always
networks:
agenta-network:
diff --git a/hosting/docker-compose/ee/env.ee.dev.example b/hosting/docker-compose/ee/env.ee.dev.example
index c51bad5e19..77a442f622 100644
--- a/hosting/docker-compose/ee/env.ee.dev.example
+++ b/hosting/docker-compose/ee/env.ee.dev.example
@@ -1,93 +1,167 @@
-# First-party (required)
+# ============================================================================ #
+# License - https://agenta.ai/pricing
+# ============================================================================ #
AGENTA_LICENSE=ee
-AGENTA_STAGE=dev
-AGENTA_PROVIDER=local
-AGENTA_WEB_URL=http://localhost
-AGENTA_API_URL=http://localhost/api
-AGENTA_SERVICES_URL=http://localhost/services
-AGENTA_AUTH_KEY=change-me
-AGENTA_CRYPT_KEY=change-me
-AGENTA_API_IMAGE_NAME=agenta-api
-AGENTA_API_IMAGE_TAG=latest
-AGENTA_WEB_IMAGE_NAME=agenta-web
-AGENTA_WEB_IMAGE_TAG=latest
-AGENTA_SERVICES_COMPLETION_IMAGE_NAME=agenta-completion
-AGENTA_SERVICES_COMPLETION_IMAGE_TAG=latest
-AGENTA_SERVICES_CHAT_IMAGE_NAME=agenta-chat
-AGENTA_SERVICES_CHAT_IMAGE_TAG=latest
-
-# First-party (registry & service)
-DOCKER_NETWORK_MODE=bridge
-POSTGRES_USERNAME=username
-POSTGRES_PASSWORD=password
-
-# First-party (optional)
-AGENTA_AUTO_MIGRATIONS=true
-AGENTA_PRICING=
-AGENTA_DEMOS=
-AGENTA_RUNTIME_PREFIX=
-AGENTA_API_INTERNAL_URL=
-AGENTA_LITELLM_MOCK=
-POSTGRES_USERNAME_ADMIN=
-POSTGRES_PASSWORD_ADMIN=
-AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
-AGENTA_OTLP_MAX_BATCH_BYTES=10485760
-
-# Third-party (required)
-TRAEFIK_DOMAIN=
-TRAEFIK_PROTOCOL=
-TRAEFIK_PORT=
-
-# Redis: set REDIS_URI for a single instance, or override with the split URIs below
-REDIS_URI=
-REDIS_URI_VOLATILE=
-REDIS_URI_DURABLE=
-
-POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens"
-POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core"
-POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing"
-
-ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini
-ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini
-
-SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
-
-# Third-party (optional)
-AWS_ECR_URL=
-AWS_RDS_SECRET=
+# ============================================================================ #
+# Secrets - REPLACE ME IN PRODUCTION!
+# ============================================================================ #
+AGENTA_AUTH_KEY=replace-me
+AGENTA_CRYPT_KEY=replace-me
+
+# ============================================================================ #
+# Endpoints
+# ============================================================================ #
+# AGENTA_WEB_URL=http://localhost
+# AGENTA_API_URL=http://localhost/api
+# AGENTA_SERVICES_URL=http://localhost/services
+# AGENTA_API_INTERNAL_URL=
+
+# ============================================================================ #
+# Images
+# ============================================================================ #
+# AGENTA_WEB_IMAGE_NAME=agenta-web
+# AGENTA_WEB_IMAGE_TAG=latest
+# AGENTA_API_IMAGE_NAME=agenta-api
+# AGENTA_API_IMAGE_TAG=latest
+# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion
+# AGENTA_COMPLETION_IMAGE_TAG=latest
+# AGENTA_CHAT_IMAGE_NAME=agenta-chat
+# AGENTA_CHAT_IMAGE_TAG=latest
+
+# ============================================================================ #
+# OTLP
+# ============================================================================ #
+# AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# ============================================================================ #
+# Proxy - LLM Providers
+# ============================================================================ #
+# OPENAI_API_KEY=
+# ANTHROPIC_API_KEY=
+# COHERE_API_KEY=
+# GROQ_API_KEY=
+# GEMINI_API_KEY=
+# MISTRAL_API_KEY=
+# ALEPHALPHA_API_KEY=
+# ANYSCALE_API_KEY=
+# DEEPINFRA_API_KEY=
+# OPENROUTER_API_KEY=
+# PERPLEXITYAI_API_KEY=
+# TOGETHERAI_API_KEY=
+
+# ============================================================================ #
+# Docker - Compose
+# ============================================================================ #
+# COMPOSE_PROJECT_NAME=agenta-ee-dev
+
+# ============================================================================ #
+# Network - Traefik
+# ============================================================================ #
+# TRAEFIK_PROTOCOL=http
+# TRAEFIK_DOMAIN=localhost
+# TRAEFIK_PORT=80
+# TRAEFIK_SSL_DIR=
+
+# ============================================================================ #
+# Network - Nginx
+# ============================================================================ #
+# NGINX_PORT=80
+
+# ============================================================================ #
+# Databases - Postgres
+# ============================================================================ #
+# POSTGRES_USER=username
+# POSTGRES_PASSWORD=password
+
+# POSTGRES_PORT=5432
+# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core
+# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing
+# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens
+
+# ============================================================================ #
+# Databases - Alembic (migrations)
+# ============================================================================ #
+# ALEMBIC_AUTO_MIGRATIONS=true
+# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini
+# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini
+
+# ============================================================================ #
+# Databases - Redis
+# ============================================================================ #
+# REDIS_URI_VOLATILE=redis://localhost:6379/0
+# REDIS_URI_DURABLE=redis://localhost:6381/0
+
+# ============================================================================ #
+# Authentication - SuperTokens
+# ============================================================================ #
+# SUPERTOKENS_EMAIL_DISABLED=false
+
+# ============================================================================ #
+# Authentication - Email providers
+# ============================================================================ #
+# SENDGRID_API_KEY=
+# SENDGRID_FROM_ADDRESS=
+
+# ============================================================================ #
+# Authentication - OIDC providers
+# ============================================================================ #
+# GOOGLE_OAUTH_CLIENT_ID=
+# GOOGLE_OAUTH_CLIENT_SECRET=
+
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID=
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET=
+# GOOGLE_WORKSPACES_HD=
+
+# APPLE_OAUTH_CLIENT_ID=
+# APPLE_OAUTH_CLIENT_SECRET=
+# APPLE_KEY_ID=
+# APPLE_TEAM_ID=
+# APPLE_PRIVATE_KEY=
+
+# DISCORD_OAUTH_CLIENT_ID=
+# DISCORD_OAUTH_CLIENT_SECRET=
+
+# FACEBOOK_OAUTH_CLIENT_ID=
+# FACEBOOK_OAUTH_CLIENT_SECRET=
+
+# GITHUB_OAUTH_CLIENT_ID=
+# GITHUB_OAUTH_CLIENT_SECRET=
+
+# GITLAB_OAUTH_CLIENT_ID=
+# GITLAB_OAUTH_CLIENT_SECRET=
+# GITLAB_BASE_URL=
+
+# BITBUCKET_OAUTH_CLIENT_ID=
+# BITBUCKET_OAUTH_CLIENT_SECRET=
+
+# LINKEDIN_OAUTH_CLIENT_ID=
+# LINKEDIN_OAUTH_CLIENT_SECRET=
+
+# OKTA_OAUTH_CLIENT_ID=
+# OKTA_OAUTH_CLIENT_SECRET=
+# OKTA_DOMAIN=
+
+# AZURE_AD_OAUTH_CLIENT_ID=
+# AZURE_AD_OAUTH_CLIENT_SECRET=
+# AZURE_AD_DIRECTORY_ID=
+
+# BOXY_SAML_OAUTH_CLIENT_ID=
+# BOXY_SAML_OAUTH_CLIENT_SECRET=
+# BOXY_SAML_URL=
+
+# TWITTER_OAUTH_CLIENT_ID=
+# TWITTER_OAUTH_CLIENT_SECRET=
+
+# ============================================================================ #
+# Billing - Stripe [ee-only]
+# ============================================================================ #
+# STRIPE_API_KEY=
+# STRIPE_WEBHOOK_SECRET=
+# STRIPE_WEBHOOK_TARGET=
+# STRIPE_PRICING=
+
+# ============================================================================ #
+# Analytics - PostHog
+# ============================================================================ #
POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp
-
-GITHUB_OAUTH_CLIENT_ID=
-GITHUB_OAUTH_CLIENT_SECRET=
-GOOGLE_OAUTH_CLIENT_ID=
-GOOGLE_OAUTH_CLIENT_SECRET=
-
-SUPERTOKENS_API_KEY=replace-me
-
-NEW_RELIC_LICENSE_KEY=
-NRIA_LICENSE_KEY=
-
-LOOPS_API_KEY=
-
-SENDGRID_API_KEY=
-
-CRISP_WEBSITE_ID=
-
-STRIPE_API_KEY=
-STRIPE_WEBHOOK_SECRET=
-STRIPE_WEBHOOK_TARGET=
-
-# Third-party - LLM (optional)
-ALEPHALPHA_API_KEY=
-ANTHROPIC_API_KEY=
-ANYSCALE_API_KEY=
-COHERE_API_KEY=
-DEEPINFRA_API_KEY=
-GEMINI_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-OPENAI_API_KEY=
-OPENROUTER_API_KEY=
-PERPLEXITYAI_API_KEY=
-TOGETHERAI_API_KEY=
diff --git a/hosting/docker-compose/ee/env.ee.gh.example b/hosting/docker-compose/ee/env.ee.gh.example
index 7485edba05..77a442f622 100644
--- a/hosting/docker-compose/ee/env.ee.gh.example
+++ b/hosting/docker-compose/ee/env.ee.gh.example
@@ -1,86 +1,167 @@
-# First-party (required)
+# ============================================================================ #
+# License - https://agenta.ai/pricing
+# ============================================================================ #
AGENTA_LICENSE=ee
-AGENTA_STAGE=dev
-AGENTA_PROVIDER=local
-AGENTA_API_URL=http://localhost/api
-AGENTA_WEB_URL=http://localhost
-AGENTA_SERVICES_URL=http://localhost/services
-AGENTA_AUTH_KEY=change-me
-AGENTA_CRYPT_KEY=change-me
-AGENTA_API_IMAGE_NAME=agenta-api
-AGENTA_API_IMAGE_TAG=latest
-AGENTA_WEB_IMAGE_NAME=agenta-web
-AGENTA_WEB_IMAGE_TAG=latest
-AGENTA_SERVICES_COMPLETION_IMAGE_NAME=agenta-completion
-AGENTA_SERVICES_COMPLETION_IMAGE_TAG=latest
-AGENTA_SERVICES_CHAT_IMAGE_NAME=agenta-chat
-AGENTA_SERVICES_CHAT_IMAGE_TAG=latest
-
-# First-party (registry & service)
-DOCKER_NETWORK_MODE=bridge
-POSTGRES_PASSWORD=password
-POSTGRES_USERNAME=username
-
-# First-party (optional)
-AGENTA_AUTO_MIGRATIONS=true
-AGENTA_PRICING=
-AGENTA_DEMOS=
-AGENTA_RUNTIME_PREFIX=
-AGENTA_API_INTERNAL_URL=
-AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
-AGENTA_OTLP_MAX_BATCH_BYTES=10485760
-
-# Third-party (required)
-TRAEFIK_DOMAIN=
-TRAEFIK_PROTOCOL=
-TRAEFIK_PORT=
-
-# Redis: set REDIS_URI for a single instance, or override with the split URIs below
-REDIS_URI=
-REDIS_URI_VOLATILE=
-REDIS_URI_DURABLE=
-
-POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens"
-POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core"
-POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing"
-
-ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini
-ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini
-
-SUPERTOKENS_API_KEY=replace-me
-SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
-
-# Third-party (optional)
-POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp
-
-GITHUB_OAUTH_CLIENT_ID=
-GITHUB_OAUTH_CLIENT_SECRET=
-
-GOOGLE_OAUTH_CLIENT_ID=
-GOOGLE_OAUTH_CLIENT_SECRET=
-
-NEW_RELIC_LICENSE_KEY=
-NRIA_LICENSE_KEY=
-
-LOOPS_API_KEY=
-SENDGRID_API_KEY=
-
-CRISP_WEBSITE_ID=
-
-STRIPE_API_KEY=
-STRIPE_WEBHOOK_SECRET=
-
-# Third-party - LLM (optional)
-ALEPHALPHA_API_KEY=
-ANTHROPIC_API_KEY=
-ANYSCALE_API_KEY=
-COHERE_API_KEY=
-DEEPINFRA_API_KEY=
-GEMINI_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-OPENAI_API_KEY=
-OPENROUTER_API_KEY=
-PERPLEXITYAI_API_KEY=
-TOGETHERAI_API_KEY=
+# ============================================================================ #
+# Secrets - REPLACE ME IN PRODUCTION!
+# ============================================================================ #
+AGENTA_AUTH_KEY=replace-me
+AGENTA_CRYPT_KEY=replace-me
+
+# ============================================================================ #
+# Endpoints
+# ============================================================================ #
+# AGENTA_WEB_URL=http://localhost
+# AGENTA_API_URL=http://localhost/api
+# AGENTA_SERVICES_URL=http://localhost/services
+# AGENTA_API_INTERNAL_URL=
+
+# ============================================================================ #
+# Images
+# ============================================================================ #
+# AGENTA_WEB_IMAGE_NAME=agenta-web
+# AGENTA_WEB_IMAGE_TAG=latest
+# AGENTA_API_IMAGE_NAME=agenta-api
+# AGENTA_API_IMAGE_TAG=latest
+# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion
+# AGENTA_COMPLETION_IMAGE_TAG=latest
+# AGENTA_CHAT_IMAGE_NAME=agenta-chat
+# AGENTA_CHAT_IMAGE_TAG=latest
+
+# ============================================================================ #
+# OTLP
+# ============================================================================ #
+# AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# ============================================================================ #
+# Proxy - LLM Providers
+# ============================================================================ #
+# OPENAI_API_KEY=
+# ANTHROPIC_API_KEY=
+# COHERE_API_KEY=
+# GROQ_API_KEY=
+# GEMINI_API_KEY=
+# MISTRAL_API_KEY=
+# ALEPHALPHA_API_KEY=
+# ANYSCALE_API_KEY=
+# DEEPINFRA_API_KEY=
+# OPENROUTER_API_KEY=
+# PERPLEXITYAI_API_KEY=
+# TOGETHERAI_API_KEY=
+
+# ============================================================================ #
+# Docker - Compose
+# ============================================================================ #
+# COMPOSE_PROJECT_NAME=agenta-ee-dev
+
+# ============================================================================ #
+# Network - Traefik
+# ============================================================================ #
+# TRAEFIK_PROTOCOL=http
+# TRAEFIK_DOMAIN=localhost
+# TRAEFIK_PORT=80
+# TRAEFIK_SSL_DIR=
+
+# ============================================================================ #
+# Network - Nginx
+# ============================================================================ #
+# NGINX_PORT=80
+
+# ============================================================================ #
+# Databases - Postgres
+# ============================================================================ #
+# POSTGRES_USER=username
+# POSTGRES_PASSWORD=password
+
+# POSTGRES_PORT=5432
+# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core
+# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing
+# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens
+
+# ============================================================================ #
+# Databases - Alembic (migrations)
+# ============================================================================ #
+# ALEMBIC_AUTO_MIGRATIONS=true
+# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini
+# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini
+
+# ============================================================================ #
+# Databases - Redis
+# ============================================================================ #
+# REDIS_URI_VOLATILE=redis://localhost:6379/0
+# REDIS_URI_DURABLE=redis://localhost:6381/0
+
+# ============================================================================ #
+# Authentication - SuperTokens
+# ============================================================================ #
+# SUPERTOKENS_EMAIL_DISABLED=false
+
+# ============================================================================ #
+# Authentication - Email providers
+# ============================================================================ #
+# SENDGRID_API_KEY=
+# SENDGRID_FROM_ADDRESS=
+
+# ============================================================================ #
+# Authentication - OIDC providers
+# ============================================================================ #
+# GOOGLE_OAUTH_CLIENT_ID=
+# GOOGLE_OAUTH_CLIENT_SECRET=
+
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID=
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET=
+# GOOGLE_WORKSPACES_HD=
+
+# APPLE_OAUTH_CLIENT_ID=
+# APPLE_OAUTH_CLIENT_SECRET=
+# APPLE_KEY_ID=
+# APPLE_TEAM_ID=
+# APPLE_PRIVATE_KEY=
+
+# DISCORD_OAUTH_CLIENT_ID=
+# DISCORD_OAUTH_CLIENT_SECRET=
+
+# FACEBOOK_OAUTH_CLIENT_ID=
+# FACEBOOK_OAUTH_CLIENT_SECRET=
+
+# GITHUB_OAUTH_CLIENT_ID=
+# GITHUB_OAUTH_CLIENT_SECRET=
+
+# GITLAB_OAUTH_CLIENT_ID=
+# GITLAB_OAUTH_CLIENT_SECRET=
+# GITLAB_BASE_URL=
+
+# BITBUCKET_OAUTH_CLIENT_ID=
+# BITBUCKET_OAUTH_CLIENT_SECRET=
+
+# LINKEDIN_OAUTH_CLIENT_ID=
+# LINKEDIN_OAUTH_CLIENT_SECRET=
+
+# OKTA_OAUTH_CLIENT_ID=
+# OKTA_OAUTH_CLIENT_SECRET=
+# OKTA_DOMAIN=
+
+# AZURE_AD_OAUTH_CLIENT_ID=
+# AZURE_AD_OAUTH_CLIENT_SECRET=
+# AZURE_AD_DIRECTORY_ID=
+
+# BOXY_SAML_OAUTH_CLIENT_ID=
+# BOXY_SAML_OAUTH_CLIENT_SECRET=
+# BOXY_SAML_URL=
+
+# TWITTER_OAUTH_CLIENT_ID=
+# TWITTER_OAUTH_CLIENT_SECRET=
+
+# ============================================================================ #
+# Billing - Stripe [ee-only]
+# ============================================================================ #
+# STRIPE_API_KEY=
+# STRIPE_WEBHOOK_SECRET=
+# STRIPE_WEBHOOK_TARGET=
+# STRIPE_PRICING=
+
+# ============================================================================ #
+# Analytics - PostHog
+# ============================================================================ #
+POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp
diff --git a/hosting/docker-compose/oss/docker-compose.dev.yml b/hosting/docker-compose/oss/docker-compose.dev.yml
index c880ff2cc2..97d45c3527 100644
--- a/hosting/docker-compose/oss/docker-compose.dev.yml
+++ b/hosting/docker-compose/oss/docker-compose.dev.yml
@@ -1,61 +1,58 @@
name: agenta-oss-dev
services:
- web:
- profiles:
- - with-web
+ .api:
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ build:
+ context: ../../../api
+ dockerfile: oss/docker/Dockerfile.dev
+ # === EXECUTION ============================================ #
+ command: ["true"]
+ .web:
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-web:latest
build:
context: ../../../web
dockerfile: oss/docker/Dockerfile.dev
-
+ # === EXECUTION ============================================ #
+ command: ["true"]
+
+ web:
+ # === ACTIVATION =========================================== #
+ profiles:
+ - with-web
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-web:latest
+ # === EXECUTION ============================================ #
+ command: sh -c "pnpm dev-oss"
+ # === STORAGE ============================================== #
volumes:
+ #
+ #
- ../../../web/oss/src:/app/oss/src
- ../../../web/oss/public:/app/oss/public
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
- ports:
- - "3000:3000"
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
networks:
- agenta-network
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.agenta-web.rule=PathPrefix(`/`)"
- "traefik.http.routers.agenta-web.entrypoints=web"
- "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
-
- command: sh -c "pnpm dev-oss"
-
+ # === LIFECYCLE ============================================ #
restart: always
api:
- build:
- context: ../../../api
- dockerfile: oss/docker/Dockerfile.dev
-
- volumes:
- - ../../../api/entrypoints:/app/entrypoints
- - ../../../api/oss:/app/oss
- - ../../../sdk:/sdk
-
- env_file:
- - ${ENV_FILE:-./.env.oss.dev}
-
- labels:
- - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
- - "traefik.http.routers.api.entrypoints=web"
- - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
- - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
- - "traefik.http.routers.api.middlewares=api-strip"
- - "traefik.http.services.api.loadbalancer.server.port=8000"
- - "traefik.http.routers.api.service=api"
-
- networks:
- - agenta-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ # === EXECUTION ============================================ #
command:
[
"uvicorn",
@@ -68,143 +65,201 @@ services:
"--root-path",
"/api",
]
-
+ # === STORAGE ============================================== #
+ volumes:
+ #
+ - ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
+ - ../../../sdk:/sdk
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.dev}
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
alembic:
condition: service_completed_successfully
+ supertokens:
+ condition: service_healthy
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LABELS =============================================== #
+ labels:
+ - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
+ - "traefik.http.routers.api.entrypoints=web"
+ - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
+ - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.api.middlewares=api-strip"
+ - "traefik.http.services.api.loadbalancer.server.port=8000"
+ - "traefik.http.routers.api.service=api"
+ # === LIFECYCLE ============================================ #
restart: always
worker-evaluations:
- build:
- context: ../../../api
- dockerfile: oss/docker/Dockerfile.dev
-
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ # === EXECUTION ============================================ #
+ command: >
+ watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
+ python -m entrypoints.worker_evaluations
+ # === STORAGE ============================================== #
volumes:
- - ../../../api/entrypoints:/app/entrypoints
+ #
- ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
- networks:
- - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
+ worker-tracing:
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ # === EXECUTION ============================================ #
command: >
watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
- python -m entrypoints.worker_evaluations
-
- worker-tracing:
- build:
- context: ../../../api
- dockerfile: oss/docker/Dockerfile.dev
-
+ python -m entrypoints.worker_tracing
+ # === STORAGE ============================================== #
volumes:
- - ../../../api/entrypoints:/app/entrypoints
+ #
- ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
- networks:
- - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
- command: >
- watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive --
- python -m entrypoints.worker_tracing
-
cron:
- build:
- context: ../../../api
- dockerfile: oss/docker/Dockerfile.dev
-
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ # === EXECUTION ============================================ #
+ command: cron -f
+ # === STORAGE ============================================== #
volumes:
#
- ../../../api/oss/src/crons/queries.sh:/queries.sh
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
- postgres
- api
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
- networks:
- - agenta-network
-
- command: cron -f
alembic:
- build:
- context: ../../../api
- dockerfile: oss/docker/Dockerfile.dev
-
+ # === IMAGE ================================================ #
+ image: agenta-oss-dev-api:latest
+ # === EXECUTION ============================================ #
+ command: sh -c "python -m oss.databases.postgres.migrations.runner"
+ # === STORAGE ============================================== #
volumes:
- - ../../../api/routes.py:/app/routes.py
+ #
- ../../../api/oss:/app/oss
+ - ../../../api/entrypoints:/app/entrypoints
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
- networks:
- - agenta-network
-
- command: sh -c "python -m oss.databases.postgres.migrations.runner"
completion:
+ # === IMAGE ================================================ #
build:
context: ../../../services/completion
dockerfile: oss/docker/Dockerfile.dev
-
+ # === EXECUTION ============================================ #
+ command:
+ [
+ "uvicorn",
+ "oss.src.main:app",
+ "--host",
+ "0.0.0.0",
+ "--port",
+ "80",
+ "--reload",
+ "--root-path",
+ "/services/completion",
+ ]
+ # === STORAGE ============================================== #
volumes:
- ../../../services/completion:/app
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
networks:
- agenta-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)"
- "traefik.http.routers.completion.entrypoints=web"
@@ -213,7 +268,15 @@ services:
- "traefik.http.routers.completion.middlewares=completion-strip"
- "traefik.http.services.completion.loadbalancer.server.port=80"
- "traefik.http.routers.completion.service=completion"
+ # === LIFECYCLE ============================================ #
+ restart: always
+ chat:
+ # === IMAGE ================================================ #
+ build:
+ context: ../../../services/chat
+ dockerfile: oss/docker/Dockerfile.dev
+ # === EXECUTION ============================================ #
command:
[
"uvicorn",
@@ -224,27 +287,23 @@ services:
"80",
"--reload",
"--root-path",
- "/services/completion",
+ "/services/chat",
]
-
- restart: always
-
- chat:
- build:
- context: ../../../services/chat
- dockerfile: oss/docker/Dockerfile.dev
-
+ # === STORAGE ============================================== #
volumes:
- ../../../services/chat:/app
- ../../../sdk:/sdk
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
+ environment:
+ DOCKER_NETWORK_MODE: ${DOCKER_NETWORK_MODE:-bridge}
+ # === NETWORK ============================================== #
networks:
- agenta-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)"
- "traefik.http.routers.chat.entrypoints=web"
@@ -253,38 +312,29 @@ services:
- "traefik.http.routers.chat.middlewares=chat-strip"
- "traefik.http.services.chat.loadbalancer.server.port=80"
- "traefik.http.routers.chat.service=chat"
-
- command:
- [
- "uvicorn",
- "oss.src.main:app",
- "--host",
- "0.0.0.0",
- "--port",
- "80",
- "--reload",
- "--root-path",
- "/services/chat",
- ]
-
+ # === LIFECYCLE ============================================ #
restart: always
postgres:
+ # === IMAGE ================================================ #
image: postgres:16
-
- restart: always
- ports:
- - "5432:5432"
- networks:
- - agenta-network
+ # === STORAGE ============================================== #
+ volumes:
+ - postgres-data:/var/lib/postgresql/data/
+ - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
environment:
POSTGRES_USER: ${POSTGRES_USER:-username}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
- volumes:
- - postgres-data:/var/lib/postgresql/data/
- - ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ ports:
+ - "${POSTGRES_PORT:-5432}:5432"
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"]
interval: 10s
@@ -292,8 +342,9 @@ services:
retries: 5
redis-volatile:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly no
@@ -301,18 +352,14 @@ services:
--maxmemory 512mb
--maxmemory-policy volatile-lru
--port 6379
-
- ports:
- - "6379:6379"
-
- networks:
- - agenta-network
-
+ # === STORAGE ============================================== #
volumes:
- redis-volatile-data:/data
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6379", "ping"]
interval: 10s
@@ -321,8 +368,9 @@ services:
start_period: 5s
redis-durable:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly yes
@@ -331,18 +379,14 @@ services:
--maxmemory 512mb
--maxmemory-policy noeviction
--port 6381
-
- ports:
- - "6381:6381"
-
- networks:
- - agenta-network
-
+ # === STORAGE ============================================== #
volumes:
- redis-durable-data:/data
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-network
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6381", "ping"]
interval: 10s
@@ -351,42 +395,78 @@ services:
start_period: 5s
traefik:
+ # === IMAGE ================================================ #
image: traefik:2
-
- command: --api.dashboard=true --api.insecure=true --providers.docker --entrypoints.web.address=:${TRAEFIK_PORT:-80}
- ports:
- - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}"
- - "${TRAEFIK_UI_PORT:-8080}:8080"
+ # === EXECUTION ============================================ #
+ command:
+ - --api.dashboard=true
+ - --api.insecure=true
+ - --providers.docker
+ - --providers.docker.constraints=Label(`com.docker.compose.project`,`${COMPOSE_PROJECT_NAME:-agenta-oss-dev}`)
+ - --entrypoints.web.address=:80
+ - --ping=true
+ - --accesslog=true
+ # === STORAGE ============================================== #
volumes:
- /var/run/docker.sock:/var/run/docker.sock
+ # === NETWORK ============================================== #
networks:
- agenta-network
+ ports:
+ - "${TRAEFIK_PORT:-80}:80"
+ - "${TRAEFIK_UI_PORT:-8080}:8080"
+ # === LIFECYCLE ============================================ #
restart: always
+ healthcheck:
+ test: ["CMD", "traefik", "healthcheck", "--ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
+ start_period: 10s
supertokens:
+ # === IMAGE ================================================ #
image: registry.supertokens.io/supertokens/supertokens-postgresql
-
- depends_on:
- postgres:
- condition: service_healthy
- ports:
- - 3567:3567
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.dev}
-
environment:
- POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS}
-
+ POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS:-postgresql://username:password@postgres:5432/agenta_oss_supertokens}
+ # === NETWORK ============================================== #
networks:
- agenta-network
+ # === ORCHESTRATION ======================================== #
+ depends_on:
+ postgres:
+ condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
test: >
bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"'
interval: 10s
timeout: 5s
retries: 5
- restart: always
+
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #
networks:
agenta-network:
diff --git a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml
index 5d5aa4ce61..c5618234be 100644
--- a/hosting/docker-compose/oss/docker-compose.gh.ssl.yml
+++ b/hosting/docker-compose/oss/docker-compose.gh.ssl.yml
@@ -2,52 +2,37 @@ name: agenta-gh-ssl
services:
web:
+ # === ACTIVATION =========================================== #
profiles:
- with-web
-
+ # === IMAGE ================================================ #
build:
context: ../../../web
dockerfile: oss/docker/Dockerfile.gh
-
+ # === EXECUTION ============================================ #
+ command: sh -c "node ./oss/server.js"
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.web.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/`)"
- "traefik.http.routers.web.entrypoints=web,web-secure"
- "traefik.http.services.web.loadbalancer.server.port=3000"
- "traefik.http.routers.web.tls=true"
- "traefik.http.routers.web.tls.certresolver=myResolver"
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
- command: sh -c "node ./oss/server.js"
+ # === LIFECYCLE ============================================ #
restart: always
api:
- build:
+ # === IMAGE ================================================ #
+ build:
context: ../../../api
dockerfile: oss/docker/Dockerfile.gh
-
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
-
- networks:
- - agenta-gh-ssl-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
- labels:
- - "traefik.http.routers.api.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/api/`)"
- - "traefik.http.routers.api.entrypoints=web,web-secure"
- - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
- - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
- - "traefik.http.routers.api.middlewares=api-strip"
- - "traefik.http.services.api.loadbalancer.server.port=8000"
- - "traefik.http.routers.api.service=api"
- - "traefik.http.routers.api.tls=true"
- - "traefik.http.routers.api.tls.certresolver=myResolver"
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
- environment:
- - SCRIPT_NAME=/api
+ # === EXECUTION ============================================ #
command: >
newrelic-admin run-program gunicorn entrypoints.routers:app
--bind 0.0.0.0:8000
@@ -60,7 +45,20 @@ services:
--log-level info
--access-logfile -
--error-logfile -
-
+ # === STORAGE ============================================== #
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ environment:
+ - SCRIPT_NAME=/api
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
@@ -70,128 +68,175 @@ services:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LABELS =============================================== #
+ labels:
+ - "traefik.http.routers.api.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/api/`)"
+ - "traefik.http.routers.api.entrypoints=web,web-secure"
+ - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
+ - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.api.middlewares=api-strip"
+ - "traefik.http.services.api.loadbalancer.server.port=8000"
+ - "traefik.http.routers.api.service=api"
+ - "traefik.http.routers.api.tls=true"
+ - "traefik.http.routers.api.tls.certresolver=myResolver"
+ # === LIFECYCLE ============================================ #
restart: always
worker-evaluations:
+ # === IMAGE ================================================ #
build:
context: ../../../api
dockerfile: oss/docker/Dockerfile.gh
-
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
-
- networks:
- - agenta-gh-ssl-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
-
+ # === EXECUTION ============================================ #
command:
[
"newrelic-admin",
"run-program",
"python",
"-m",
- "entrypoints.worker_evaluations"
+ "entrypoints.worker_evaluations",
]
+ # === STORAGE ============================================== #
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LIFECYCLE ============================================ #
restart: always
worker-tracing:
+ # === IMAGE ================================================ #
build:
context: ../../../api
dockerfile: oss/docker/Dockerfile.gh
-
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
-
- networks:
- - agenta-gh-ssl-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
-
+ # === EXECUTION ============================================ #
command:
[
"newrelic-admin",
"run-program",
"python",
"-m",
- "entrypoints.worker_tracing"
+ "entrypoints.worker_tracing",
]
+ # === STORAGE ============================================== #
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LIFECYCLE ============================================ #
restart: always
cron:
+ # === IMAGE ================================================ #
build:
context: ../../../api
dockerfile: oss/docker/Dockerfile.gh
-
+ # === EXECUTION ============================================ #
+ command: cron -f
+ # === STORAGE ============================================== #
volumes:
- /var/run/docker.sock:/var/run/docker.sock
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
- postgres
- api
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
- networks:
- - agenta-gh-ssl-network
-
- command: cron -f
alembic:
- build:
+ # === IMAGE ================================================ #
+ build:
context: ../../../api
dockerfile: oss/docker/Dockerfile.gh
-
+ # === EXECUTION ============================================ #
+ command: sh -c "python -m oss.databases.postgres.migrations.runner"
+ # === STORAGE ============================================== #
volumes:
- /var/run/docker.sock:/var/run/docker.sock
-
- networks:
- - agenta-gh-ssl-network
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
- command: sh -c "python -m oss.databases.postgres.migrations.runner"
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
completion:
+ # === IMAGE ================================================ #
build:
context: ../../../services/completion
dockerfile: oss/docker/Dockerfile.gh
-
+ # === EXECUTION ============================================ #
+ command: >
+ newrelic-admin run-program gunicorn oss.src.main:app
+ --bind 0.0.0.0:80
+ --worker-class uvicorn.workers.UvicornWorker
+ --workers 2
+ --max-requests 10000
+ --max-requests-jitter 1000
+ --timeout 60
+ --graceful-timeout 60
+ --log-level info
+ --access-logfile -
+ --error-logfile -
+ # === STORAGE ============================================== #
volumes:
- ../../../services/completion:/app
- ../../../sdk:/sdk
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ environment:
+ - SCRIPT_NAME=/services/completion
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.completion.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/services/completion/`)"
- "traefik.http.routers.completion.entrypoints=web,web-secure"
@@ -202,11 +247,15 @@ services:
- "traefik.http.routers.completion.service=completion"
- "traefik.http.routers.completion.tls=true"
- "traefik.http.routers.completion.tls.certresolver=myResolver"
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
- environment:
- - SCRIPT_NAME=/services/completion
+ # === LIFECYCLE ============================================ #
+ restart: always
+ chat:
+ # === IMAGE ================================================ #
+ build:
+ context: ../../../services/chat
+ dockerfile: oss/docker/Dockerfile.gh
+ # === EXECUTION ============================================ #
command: >
newrelic-admin run-program gunicorn oss.src.main:app
--bind 0.0.0.0:80
@@ -219,22 +268,21 @@ services:
--log-level info
--access-logfile -
--error-logfile -
-
- restart: always
-
- chat:
- build:
- context: ../../../services/chat
- dockerfile: oss/docker/Dockerfile.gh
+ # === STORAGE ============================================== #
volumes:
- ../../../services/chat:/app
- ../../../sdk:/sdk
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
environment:
- SCRIPT_NAME=/services/chat
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.chat.rule=Host(`${TRAEFIK_DOMAIN}`) && PathPrefix(`/services/chat/`)"
- "traefik.http.routers.chat.entrypoints=web,web-secure"
@@ -245,51 +293,39 @@ services:
- "traefik.http.routers.chat.service=chat"
- "traefik.http.routers.chat.tls=true"
- "traefik.http.routers.chat.tls.certresolver=myResolver"
- networks:
- - agenta-gh-ssl-network
-
- command: >
- newrelic-admin run-program gunicorn oss.src.main:app
- --bind 0.0.0.0:80
- --worker-class uvicorn.workers.UvicornWorker
- --workers 2
- --max-requests 10000
- --max-requests-jitter 1000
- --timeout 60
- --graceful-timeout 60
- --log-level info
- --access-logfile -
- --error-logfile -
-
+ # === LIFECYCLE ============================================ #
restart: always
postgres:
+ # === IMAGE ================================================ #
image: postgres:16
-
+ # === STORAGE ============================================== #
volumes:
- postgres-data:/var/lib/postgresql/data/
- ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql
-
- restart: always
- networks:
- - agenta-gh-ssl-network
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
environment:
POSTGRES_USER: ${POSTGRES_USER:-username}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-gh-ssl-network
ports:
- "${POSTGRES_PORT:-5432}:5432"
-
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
- test: ["CMD-SHELL", "pg_isready -U postgres"]
+ test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"]
interval: 10s
timeout: 5s
retries: 5
redis-volatile:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly no
@@ -297,21 +333,17 @@ services:
--maxmemory 512mb
--maxmemory-policy volatile-lru
--port 6379
-
- ports:
- - "6379:6379"
-
+ # === STORAGE ============================================== #
volumes:
- redis-volatile-data:/data
-
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
-
+ # === LABELS =============================================== #
labels:
- "traefik.enable=false"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6379", "ping"]
interval: 10s
@@ -320,8 +352,9 @@ services:
start_period: 5s
redis-durable:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly yes
@@ -330,21 +363,17 @@ services:
--maxmemory 512mb
--maxmemory-policy noeviction
--port 6381
-
- ports:
- - "6381:6381"
-
+ # === STORAGE ============================================== #
volumes:
- redis-durable-data:/data
-
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
-
+ # === LABELS =============================================== #
labels:
- "traefik.enable=false"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6381", "ping"]
interval: 10s
@@ -353,38 +382,46 @@ services:
start_period: 5s
traefik:
+ # === IMAGE ================================================ #
image: traefik:2
+ # === STORAGE ============================================== #
volumes:
- ./ssl/traefik.yml:/traefik.yml
- /var/run/docker.sock:/var/run/docker.sock
- ${AGENTA_SSL_DIR:-/home/ubuntu/ssl_certificates}/acme.json:/acme.json
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
ports:
- - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}"
+ - "${TRAEFIK_PORT:-80}:80"
- "${TRAEFIK_UI_PORT:-8080}:8080"
- - "${TRAEFIK_HTTPS_PORT:-443}:${TRAEFIK_HTTPS_PORT:-443}"
+ - "${TRAEFIK_HTTPS_PORT:-443}:443"
+ # === LIFECYCLE ============================================ #
restart: always
supertokens:
+ # === IMAGE ================================================ #
image: registry.supertokens.io/supertokens/supertokens-postgresql
-
- depends_on:
- postgres:
- condition: service_healthy
- ports:
- - "${SUPERTOKENS_PORT:-3567}:3567"
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
networks:
- agenta-gh-ssl-network
+ # === ORCHESTRATION ======================================== #
+ depends_on:
+ postgres:
+ condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
test: >
bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"'
interval: 10s
timeout: 5s
retries: 5
- restart: always
networks:
agenta-gh-ssl-network:
diff --git a/hosting/docker-compose/oss/docker-compose.gh.yml b/hosting/docker-compose/oss/docker-compose.gh.yml
index 14bc0f8db2..c75963160a 100644
--- a/hosting/docker-compose/oss/docker-compose.gh.yml
+++ b/hosting/docker-compose/oss/docker-compose.gh.yml
@@ -2,53 +2,37 @@ name: agenta-oss-gh
services:
web:
+ # === ACTIVATION =========================================== #
profiles:
- with-web
-
+ # === IMAGE ================================================ #
# build:
# context: ../../../web
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_WEB_IMAGE_NAME:-agenta-web}:${AGENTA_WEB_IMAGE_TAG:-latest}
-
+ # === EXECUTION ============================================ #
+ command: sh -c "node ./oss/server.js"
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
+ # === NETWORK ============================================== #
networks:
- agenta-oss-gh-network
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.web.rule=PathPrefix(`/`)"
- "traefik.http.routers.web.entrypoints=web"
- "traefik.http.services.web.loadbalancer.server.port=3000"
-
- command: sh -c "node ./oss/server.js"
+ # === LIFECYCLE ============================================ #
restart: always
api:
+ # === IMAGE ================================================ #
# build:
# context: ../../../api
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest}
-
- networks:
- - agenta-oss-gh-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
- labels:
- - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
- - "traefik.http.routers.api.entrypoints=web"
- - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
- - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
- - "traefik.http.routers.api.middlewares=api-strip"
- - "traefik.http.services.api.loadbalancer.server.port=8000"
- - "traefik.http.routers.api.service=api"
-
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
- environment:
- - SCRIPT_NAME=/api
-
+ # === EXECUTION ============================================ #
command: >
newrelic-admin run-program gunicorn entrypoints.routers:app
--bind 0.0.0.0:8000
@@ -61,7 +45,17 @@ services:
--log-level info
--access-logfile -
--error-logfile -
-
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ environment:
+ - SCRIPT_NAME=/api
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
@@ -71,128 +65,162 @@ services:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LABELS =============================================== #
+ labels:
+ - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
+ - "traefik.http.routers.api.entrypoints=web"
+ - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
+ - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.api.middlewares=api-strip"
+ - "traefik.http.services.api.loadbalancer.server.port=8000"
+ - "traefik.http.routers.api.service=api"
+ # === LIFECYCLE ============================================ #
restart: always
worker-evaluations:
+ # === IMAGE ================================================ #
# build:
# context: ../../../api
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest}
-
- networks:
- - agenta-oss-gh-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
-
+ # === EXECUTION ============================================ #
command:
[
"newrelic-admin",
"run-program",
"python",
"-m",
- "entrypoints.worker_evaluations"
+ "entrypoints.worker_evaluations",
]
-
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LIFECYCLE ============================================ #
restart: always
worker-tracing:
+ # === IMAGE ================================================ #
# build:
# context: ../../../api
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest}
-
- networks:
- - agenta-oss-gh-network
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
-
+ # === EXECUTION ============================================ #
command:
[
"newrelic-admin",
"run-program",
"python",
"-m",
- "entrypoints.worker_tracing"
+ "entrypoints.worker_tracing",
]
-
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
redis-volatile:
condition: service_healthy
redis-durable:
condition: service_healthy
+ # === LIFECYCLE ============================================ #
restart: always
cron:
+ # === IMAGE ================================================ #
# build:
# context: ../../../api
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest}
-
+ # === EXECUTION ============================================ #
+ command: cron -f
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ # === ORCHESTRATION ======================================== #
depends_on:
- postgres
- api
-
- extra_hosts:
- - "host.docker.internal:host-gateway"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
- networks:
- - agenta-oss-gh-network
-
- command: cron -f
alembic:
+ # === IMAGE ================================================ #
# build:
# context: ../../../api
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_API_IMAGE_NAME:-agenta-api}:${AGENTA_API_IMAGE_TAG:-latest}
-
- networks:
- - agenta-oss-gh-network
-
+ # === EXECUTION ============================================ #
+ command: sh -c "python -m oss.databases.postgres.migrations.runner"
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
- command: sh -c "python -m oss.databases.postgres.migrations.runner"
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
+ # === ORCHESTRATION ======================================== #
depends_on:
postgres:
condition: service_healthy
completion:
+ # === IMAGE ================================================ #
# build:
# context: ../../../services/completion
# dockerfile: oss/docker/Dockerfile.gh
-
image: ghcr.io/agenta-ai/${AGENTA_COMPLETION_IMAGE_NAME:-agenta-completion}:${AGENTA_COMPLETION_IMAGE_TAG:-latest}
-
+ # === EXECUTION ============================================ #
+ command: >
+ newrelic-admin run-program gunicorn oss.src.main:app
+ --bind 0.0.0.0:80
+ --worker-class uvicorn.workers.UvicornWorker
+ --workers 2
+ --max-requests 10000
+ --max-requests-jitter 1000
+ --timeout 60
+ --graceful-timeout 60
+ --log-level info
+ --access-logfile -
+ --error-logfile -
+ # === CONFIGURATION ======================================== #
+ env_file:
+ - ${ENV_FILE:-./.env.oss.gh}
+ environment:
+ - SCRIPT_NAME=/services/completion
+ # === NETWORK ============================================== #
networks:
- agenta-oss-gh-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)"
- "traefik.http.routers.completion.entrypoints=web"
@@ -201,12 +229,16 @@ services:
- "traefik.http.routers.completion.middlewares=completion-strip"
- "traefik.http.services.completion.loadbalancer.server.port=80"
- "traefik.http.routers.completion.service=completion"
+ # === LIFECYCLE ============================================ #
+ restart: always
- env_file:
- - ${ENV_FILE:-./.env.oss.gh}
- environment:
- - SCRIPT_NAME=/services/completion
-
+ chat:
+ # === IMAGE ================================================ #
+ # build:
+ # context: ../../../services/chat
+ # dockerfile: oss/docker/Dockerfile.gh
+ image: ghcr.io/agenta-ai/${AGENTA_CHAT_IMAGE_NAME:-agenta-chat}:${AGENTA_CHAT_IMAGE_TAG:-latest}
+ # === EXECUTION ============================================ #
command: >
newrelic-admin run-program gunicorn oss.src.main:app
--bind 0.0.0.0:80
@@ -219,22 +251,17 @@ services:
--log-level info
--access-logfile -
--error-logfile -
-
- restart: always
-
- chat:
- # build:
- # context: ../../../services/chat
- # dockerfile: oss/docker/Dockerfile.gh
-
- image: ghcr.io/agenta-ai/${AGENTA_CHAT_IMAGE_NAME:-agenta-chat}:${AGENTA_CHAT_IMAGE_TAG:-latest}
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
environment:
- SCRIPT_NAME=/services/chat
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
extra_hosts:
- "host.docker.internal:host-gateway"
+ # === LABELS =============================================== #
labels:
- "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)"
- "traefik.http.routers.chat.entrypoints=web"
@@ -243,53 +270,39 @@ services:
- "traefik.http.routers.chat.middlewares=chat-strip"
- "traefik.http.services.chat.loadbalancer.server.port=80"
- "traefik.http.routers.chat.service=chat"
- networks:
- - agenta-oss-gh-network
-
- command: >
- newrelic-admin run-program gunicorn oss.src.main:app
- --bind 0.0.0.0:80
- --worker-class uvicorn.workers.UvicornWorker
- --workers 2
- --max-requests 10000
- --max-requests-jitter 1000
- --timeout 60
- --graceful-timeout 60
- --log-level info
- --access-logfile -
- --error-logfile -
-
+ # === LIFECYCLE ============================================ #
restart: always
postgres:
+ # === IMAGE ================================================ #
image: postgres:16
-
+ # === STORAGE ============================================== #
volumes:
- postgres-data:/var/lib/postgresql/data/
- ../../../api/oss/databases/postgres/init-db-oss.sql:/docker-entrypoint-initdb.d/init-db.sql
-
- restart: always
- networks:
- - agenta-oss-gh-network
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
environment:
POSTGRES_USER: ${POSTGRES_USER:-username}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
ports:
- "${POSTGRES_PORT:-5432}:5432"
-
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
- test: [ "CMD-SHELL", "pg_isready -U username -d agenta_oss_core" ]
+ test: ["CMD-SHELL", "pg_isready -U username -d agenta_oss_core"]
interval: 10s
timeout: 5s
retries: 5
redis-volatile:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly no
@@ -297,21 +310,17 @@ services:
--maxmemory 512mb
--maxmemory-policy volatile-lru
--port 6379
-
- ports:
- - "6379:6379"
-
+ # === STORAGE ============================================== #
volumes:
- redis-volatile-data:/data
-
+ # === NETWORK ============================================== #
networks:
- agenta-oss-gh-network
-
+ # === LABELS =============================================== #
labels:
- "traefik.enable=false"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6379", "ping"]
interval: 10s
@@ -320,8 +329,9 @@ services:
start_period: 5s
redis-durable:
+ # === IMAGE ================================================ #
image: redis:8
-
+ # === EXECUTION ============================================ #
command: >
redis-server
--appendonly yes
@@ -330,21 +340,17 @@ services:
--maxmemory 512mb
--maxmemory-policy noeviction
--port 6381
-
- ports:
- - "6381:6381"
-
+ # === STORAGE ============================================== #
volumes:
- redis-durable-data:/data
-
+ # === NETWORK ============================================== #
networks:
- agenta-oss-gh-network
-
+ # === LABELS =============================================== #
labels:
- "traefik.enable=false"
-
+ # === LIFECYCLE ============================================ #
restart: always
-
healthcheck:
test: ["CMD", "redis-cli", "-p", "6381", "ping"]
interval: 10s
@@ -353,72 +359,80 @@ services:
start_period: 5s
traefik:
- image: traefik:2
+ # === ACTIVATION =========================================== #
profiles:
- with-traefik
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
- networks:
- - agenta-oss-gh-network
-
+ # === IMAGE ================================================ #
+ image: traefik:2
+ # === EXECUTION ============================================ #
command:
- --api.dashboard=true
- --providers.docker
- - --entrypoints.web.address=:${TRAEFIK_PORT:-80}
+ - --entrypoints.web.address=:80
+ # === STORAGE ============================================== #
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
ports:
- - "${TRAEFIK_PORT:-80}:${TRAEFIK_PORT:-80}"
+ - "${TRAEFIK_PORT:-80}:80"
- "${TRAEFIK_UI_PORT:-8080}:8080"
-
+ # === LIFECYCLE ============================================ #
restart: always
nginx:
- image: nginx:latest
+ # === ACTIVATION =========================================== #
profiles:
- with-nginx
+ # === IMAGE ================================================ #
+ image: nginx:latest
+ # === STORAGE ============================================== #
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
-
- networks:
- - agenta-oss-gh-network
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
+ # === NETWORK ============================================== #
+ networks:
+ - agenta-oss-gh-network
ports:
- "${NGINX_PORT:-80}:80"
- restart: always
-
+ # === ORCHESTRATION ======================================== #
depends_on:
- api
- web
+ # === LIFECYCLE ============================================ #
+ restart: always
supertokens:
+ # === IMAGE ================================================ #
image: registry.supertokens.io/supertokens/supertokens-postgresql
-
- depends_on:
- postgres:
- condition: service_healthy
- ports:
- - "${SUPERTOKENS_PORT:-3567}:3567"
-
+ # === CONFIGURATION ======================================== #
env_file:
- ${ENV_FILE:-./.env.oss.gh}
-
environment:
POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS}
-
+ # === NETWORK ============================================== #
networks:
- agenta-oss-gh-network
+ # === ORCHESTRATION ======================================== #
+ depends_on:
+ postgres:
+ condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
+ # === LIFECYCLE ============================================ #
+ restart: always
healthcheck:
test: >
bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"'
interval: 10s
timeout: 5s
retries: 5
- restart: always
networks:
agenta-oss-gh-network:
diff --git a/hosting/docker-compose/oss/docker-compose.otel.yml b/hosting/docker-compose/oss/docker-compose.otel.yml
index 19570f9353..28a820576e 100644
--- a/hosting/docker-compose/oss/docker-compose.otel.yml
+++ b/hosting/docker-compose/oss/docker-compose.otel.yml
@@ -1,11 +1,15 @@
services:
- otel-collector:
- image: otel/opentelemetry-collector-contrib
- volumes:
- - ./otel-collector-config.yml:/etc/otelcol-contrib/config.yaml
- environment:
- - AGENTA_OTLP_ENDPOINT=${AGENTA_OTLP_ENDPOINT}
- - AGENTA_API_KEY=${AGENTA_API_KEY}
- ports:
- - "4317:4317" # OTLP gRPC receiver
- - "4318:4318" # OTLP HTTP receiver
+ otel-collector:
+ # === IMAGE ================================================ #
+ image: otel/opentelemetry-collector-contrib
+ # === STORAGE ============================================== #
+ volumes:
+ - ./otel-collector-config.yml:/etc/otelcol-contrib/config.yaml
+ # === CONFIGURATION ======================================== #
+ environment:
+ - AGENTA_OTLP_ENDPOINT=${AGENTA_OTLP_ENDPOINT}
+ - AGENTA_API_KEY=${AGENTA_API_KEY}
+ # === NETWORK ============================================== #
+ ports:
+ - "4317:4317" # OTLP gRPC receiver
+ - "4318:4318" # OTLP HTTP receiver
diff --git a/hosting/docker-compose/oss/env.oss.dev.example b/hosting/docker-compose/oss/env.oss.dev.example
index 9840592f72..5d01703cfb 100644
--- a/hosting/docker-compose/oss/env.oss.dev.example
+++ b/hosting/docker-compose/oss/env.oss.dev.example
@@ -1,95 +1,159 @@
-# First-party (required)
+# ============================================================================ #
+# License - https://agenta.ai/pricing
+# ============================================================================ #
AGENTA_LICENSE=oss
-AGENTA_API_URL=http://localhost/api
-AGENTA_WEB_URL=http://localhost
-AGENTA_SERVICES_URL=http://localhost/services
+
+# ============================================================================ #
+# Secrets - REPLACE ME IN PRODUCTION!
+# ============================================================================ #
AGENTA_AUTH_KEY=replace-me
AGENTA_CRYPT_KEY=replace-me
-# First-party (registry & service)
-POSTGRES_PASSWORD=password
-POSTGRES_USERNAME=username
-
-# First-party (optional)
-AGENTA_AUTO_MIGRATIONS=true
-AGENTA_PRICING=
-AGENTA_DEMOS=
-DOCKER_NETWORK_MODE=bridge
-AGENTA_RUNTIME_PREFIX=
-AGENTA_SEND_EMAIL_FROM_ADDRESS=mail@example.com
-AGENTA_API_INTERNAL_URL=
-AGENTA_TELEMETRY_ENABLED=true
-AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
-AGENTA_OTLP_MAX_BATCH_BYTES=10485760
-
-# Third-party (required)
-TRAEFIK_DOMAIN=localhost
-TRAEFIK_PORT=80
-TRAEFIK_PROTOCOL=http
-TRAEFIK_UI_PORT=8080
-TRAEFIK_HTTPS_PORT=443
-
-POSTGRES_URI_SUPERTOKENS=postgresql://username:password@postgres:5432/agenta_oss_supertokens
-POSTGRES_URI_CORE=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_core
-POSTGRES_URI_TRACING=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_tracing
-
-ALEMBIC_CFG_PATH_CORE=/app/oss/databases/postgres/migrations/core/alembic.ini
-ALEMBIC_CFG_PATH_TRACING=/app/oss/databases/postgres/migrations/tracing/alembic.ini
-
-SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
-
+# ============================================================================ #
+# Endpoints
+# ============================================================================ #
+# AGENTA_WEB_URL=http://localhost
+# AGENTA_API_URL=http://localhost/api
+# AGENTA_SERVICES_URL=http://localhost/services
+# AGENTA_API_INTERNAL_URL=
+
+# ============================================================================ #
+# Images
+# ============================================================================ #
+# AGENTA_WEB_IMAGE_NAME=agenta-web
+# AGENTA_WEB_IMAGE_TAG=latest
+# AGENTA_API_IMAGE_NAME=agenta-api
+# AGENTA_API_IMAGE_TAG=latest
+# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion
+# AGENTA_COMPLETION_IMAGE_TAG=latest
+# AGENTA_CHAT_IMAGE_NAME=agenta-chat
+# AGENTA_CHAT_IMAGE_TAG=latest
+
+# ============================================================================ #
+# OTLP
+# ============================================================================ #
+# AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# ============================================================================ #
+# Proxy - LLM Providers
+# ============================================================================ #
+# OPENAI_API_KEY=
+# ANTHROPIC_API_KEY=
+# COHERE_API_KEY=
+# GROQ_API_KEY=
+# GEMINI_API_KEY=
+# MISTRAL_API_KEY=
+# ALEPHALPHA_API_KEY=
+# ANYSCALE_API_KEY=
+# DEEPINFRA_API_KEY=
+# OPENROUTER_API_KEY=
+# PERPLEXITYAI_API_KEY=
+# TOGETHERAI_API_KEY=
+
+# ============================================================================ #
+# Docker - Compose
+# ============================================================================ #
+# COMPOSE_PROJECT_NAME=agenta-oss-dev
+
+# ============================================================================ #
+# Network - Traefik
+# ============================================================================ #
+# TRAEFIK_PROTOCOL=http
+# TRAEFIK_DOMAIN=localhost
+# TRAEFIK_PORT=80
+# TRAEFIK_SSL_DIR=
+
+# ============================================================================ #
+# Network - Nginx
+# ============================================================================ #
+# NGINX_PORT=80
+
+# ============================================================================ #
+# Databases - Postgres
+# ============================================================================ #
+# POSTGRES_USER=username
+# POSTGRES_PASSWORD=password
+
+# POSTGRES_PORT=5432
+# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core
+# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing
+# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens
+
+# ============================================================================ #
+# Databases - Alembic (migrations)
+# ============================================================================ #
+# ALEMBIC_AUTO_MIGRATIONS=true
+# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini
+# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini
+
+# ============================================================================ #
+# Databases - Redis
+# ============================================================================ #
+# REDIS_URI_VOLATILE=redis://localhost:6379/0
+# REDIS_URI_DURABLE=redis://localhost:6381/0
+
+# ============================================================================ #
+# Authentication - SuperTokens
+# ============================================================================ #
+# SUPERTOKENS_EMAIL_DISABLED=false
+
+# ============================================================================ #
+# Authentication - Email providers
+# ============================================================================ #
+# SENDGRID_API_KEY=
+# SENDGRID_FROM_ADDRESS=
+
+# ============================================================================ #
+# Authentication - OIDC providers
+# ============================================================================ #
+# GOOGLE_OAUTH_CLIENT_ID=
+# GOOGLE_OAUTH_CLIENT_SECRET=
+
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID=
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET=
+# GOOGLE_WORKSPACES_HD=
+
+# APPLE_OAUTH_CLIENT_ID=
+# APPLE_OAUTH_CLIENT_SECRET=
+# APPLE_KEY_ID=
+# APPLE_TEAM_ID=
+# APPLE_PRIVATE_KEY=
+
+# DISCORD_OAUTH_CLIENT_ID=
+# DISCORD_OAUTH_CLIENT_SECRET=
+
+# FACEBOOK_OAUTH_CLIENT_ID=
+# FACEBOOK_OAUTH_CLIENT_SECRET=
+
+# GITHUB_OAUTH_CLIENT_ID=
+# GITHUB_OAUTH_CLIENT_SECRET=
+
+# GITLAB_OAUTH_CLIENT_ID=
+# GITLAB_OAUTH_CLIENT_SECRET=
+# GITLAB_BASE_URL=
+
+# BITBUCKET_OAUTH_CLIENT_ID=
+# BITBUCKET_OAUTH_CLIENT_SECRET=
+
+# LINKEDIN_OAUTH_CLIENT_ID=
+# LINKEDIN_OAUTH_CLIENT_SECRET=
+
+# OKTA_OAUTH_CLIENT_ID=
+# OKTA_OAUTH_CLIENT_SECRET=
+# OKTA_DOMAIN=
+
+# AZURE_AD_OAUTH_CLIENT_ID=
+# AZURE_AD_OAUTH_CLIENT_SECRET=
+# AZURE_AD_DIRECTORY_ID=
+
+# BOXY_SAML_OAUTH_CLIENT_ID=
+# BOXY_SAML_OAUTH_CLIENT_SECRET=
+# BOXY_SAML_URL=
+
+# TWITTER_OAUTH_CLIENT_ID=
+# TWITTER_OAUTH_CLIENT_SECRET=
+
+# ============================================================================ #
+# Analytics - PostHog
+# ============================================================================ #
POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
-
-# Third-party (required for TLS/SSL)
-AGENTA_SSL_DIR=
-
-# Third-party (optional)
-NGINX_PORT=80
-SUPERTOKENS_API_KEY=
-# Redis: set REDIS_URI for a single instance, or override with the split URIs below
-REDIS_URI=
-REDIS_URI_VOLATILE=
-REDIS_URI_DURABLE=
-POSTGRES_PORT=
-SUPERTOKENS_PORT=
-
-GOOGLE_OAUTH_CLIENT_ID=
-GOOGLE_OAUTH_CLIENT_SECRET=
-GITHUB_OAUTH_CLIENT_ID=
-GITHUB_OAUTH_CLIENT_SECRET=
-
-NEW_RELIC_LICENSE_KEY=
-NRIA_LICENSE_KEY=
-
-LOOPS_API_KEY=
-
-SENDGRID_API_KEY=
-
-CRISP_WEBSITE_ID=
-
-STRIPE_API_KEY=
-STRIPE_WEBHOOK_SECRET=
-
-# Third-party — LLM (optional)
-ALEPHALPHA_API_KEY=
-ANTHROPIC_API_KEY=
-ANYSCALE_API_KEY=
-COHERE_API_KEY=
-DEEPINFRA_API_KEY=
-GEMINI_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-OPENAI_API_KEY=
-OPENROUTER_API_KEY=
-PERPLEXITYAI_API_KEY=
-TOGETHERAI_API_KEY=
-
-# Legacy (deprecated, to be removed)
-AGENTA_PORT=80
-BARE_DOMAIN_NAME=localhost
-DOMAIN_NAME=http://localhost
-WEBSITE_DOMAIN_NAME=http://localhost
-SERVICE_URL_TEMPLATE=http://localhost:80/services/{path}
-POSTGRES_DB=agenta_oss
-POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss
-ALEMBIC_CFG_PATH=/app/oss/databases/postgres/migrations/alembic.oss.ini
diff --git a/hosting/docker-compose/oss/env.oss.gh.example b/hosting/docker-compose/oss/env.oss.gh.example
index 9840592f72..5d01703cfb 100644
--- a/hosting/docker-compose/oss/env.oss.gh.example
+++ b/hosting/docker-compose/oss/env.oss.gh.example
@@ -1,95 +1,159 @@
-# First-party (required)
+# ============================================================================ #
+# License - https://agenta.ai/pricing
+# ============================================================================ #
AGENTA_LICENSE=oss
-AGENTA_API_URL=http://localhost/api
-AGENTA_WEB_URL=http://localhost
-AGENTA_SERVICES_URL=http://localhost/services
+
+# ============================================================================ #
+# Secrets - REPLACE ME IN PRODUCTION!
+# ============================================================================ #
AGENTA_AUTH_KEY=replace-me
AGENTA_CRYPT_KEY=replace-me
-# First-party (registry & service)
-POSTGRES_PASSWORD=password
-POSTGRES_USERNAME=username
-
-# First-party (optional)
-AGENTA_AUTO_MIGRATIONS=true
-AGENTA_PRICING=
-AGENTA_DEMOS=
-DOCKER_NETWORK_MODE=bridge
-AGENTA_RUNTIME_PREFIX=
-AGENTA_SEND_EMAIL_FROM_ADDRESS=mail@example.com
-AGENTA_API_INTERNAL_URL=
-AGENTA_TELEMETRY_ENABLED=true
-AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
-AGENTA_OTLP_MAX_BATCH_BYTES=10485760
-
-# Third-party (required)
-TRAEFIK_DOMAIN=localhost
-TRAEFIK_PORT=80
-TRAEFIK_PROTOCOL=http
-TRAEFIK_UI_PORT=8080
-TRAEFIK_HTTPS_PORT=443
-
-POSTGRES_URI_SUPERTOKENS=postgresql://username:password@postgres:5432/agenta_oss_supertokens
-POSTGRES_URI_CORE=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_core
-POSTGRES_URI_TRACING=postgresql+asyncpg://username:password@postgres:5432/agenta_oss_tracing
-
-ALEMBIC_CFG_PATH_CORE=/app/oss/databases/postgres/migrations/core/alembic.ini
-ALEMBIC_CFG_PATH_TRACING=/app/oss/databases/postgres/migrations/tracing/alembic.ini
-
-SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
-
+# ============================================================================ #
+# Endpoints
+# ============================================================================ #
+# AGENTA_WEB_URL=http://localhost
+# AGENTA_API_URL=http://localhost/api
+# AGENTA_SERVICES_URL=http://localhost/services
+# AGENTA_API_INTERNAL_URL=
+
+# ============================================================================ #
+# Images
+# ============================================================================ #
+# AGENTA_WEB_IMAGE_NAME=agenta-web
+# AGENTA_WEB_IMAGE_TAG=latest
+# AGENTA_API_IMAGE_NAME=agenta-api
+# AGENTA_API_IMAGE_TAG=latest
+# AGENTA_COMPLETION_IMAGE_NAME=agenta-completion
+# AGENTA_COMPLETION_IMAGE_TAG=latest
+# AGENTA_CHAT_IMAGE_NAME=agenta-chat
+# AGENTA_CHAT_IMAGE_TAG=latest
+
+# ============================================================================ #
+# OTLP
+# ============================================================================ #
+# AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# ============================================================================ #
+# Proxy - LLM Providers
+# ============================================================================ #
+# OPENAI_API_KEY=
+# ANTHROPIC_API_KEY=
+# COHERE_API_KEY=
+# GROQ_API_KEY=
+# GEMINI_API_KEY=
+# MISTRAL_API_KEY=
+# ALEPHALPHA_API_KEY=
+# ANYSCALE_API_KEY=
+# DEEPINFRA_API_KEY=
+# OPENROUTER_API_KEY=
+# PERPLEXITYAI_API_KEY=
+# TOGETHERAI_API_KEY=
+
+# ============================================================================ #
+# Docker - Compose
+# ============================================================================ #
+# COMPOSE_PROJECT_NAME=agenta-oss-dev
+
+# ============================================================================ #
+# Network - Traefik
+# ============================================================================ #
+# TRAEFIK_PROTOCOL=http
+# TRAEFIK_DOMAIN=localhost
+# TRAEFIK_PORT=80
+# TRAEFIK_SSL_DIR=
+
+# ============================================================================ #
+# Network - Nginx
+# ============================================================================ #
+# NGINX_PORT=80
+
+# ============================================================================ #
+# Databases - Postgres
+# ============================================================================ #
+# POSTGRES_USER=username
+# POSTGRES_PASSWORD=password
+
+# POSTGRES_PORT=5432
+# POSTGRES_URI_CORE=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_core
+# POSTGRES_URI_TRACING=postgresql+asyncpg://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_tracing
+# POSTGRES_URI_SUPERTOKENS=postgresql://{USER}:{PASSWORD}@postgres:5432/agenta_{LICENSE}_supertokens
+
+# ============================================================================ #
+# Databases - Alembic (migrations)
+# ============================================================================ #
+# ALEMBIC_AUTO_MIGRATIONS=true
+# ALEMBIC_CFG_PATH_CORE=/app/{LICENSE}/databases/postgres/migrations/core/alembic.ini
+# ALEMBIC_CFG_PATH_TRACING=/app/{LICENSE}/databases/postgres/migrations/tracing/alembic.ini
+
+# ============================================================================ #
+# Databases - Redis
+# ============================================================================ #
+# REDIS_URI_VOLATILE=redis://localhost:6379/0
+# REDIS_URI_DURABLE=redis://localhost:6381/0
+
+# ============================================================================ #
+# Authentication - SuperTokens
+# ============================================================================ #
+# SUPERTOKENS_EMAIL_DISABLED=false
+
+# ============================================================================ #
+# Authentication - Email providers
+# ============================================================================ #
+# SENDGRID_API_KEY=
+# SENDGRID_FROM_ADDRESS=
+
+# ============================================================================ #
+# Authentication - OIDC providers
+# ============================================================================ #
+# GOOGLE_OAUTH_CLIENT_ID=
+# GOOGLE_OAUTH_CLIENT_SECRET=
+
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_ID=
+# GOOGLE_WORKSPACES_OAUTH_CLIENT_SECRET=
+# GOOGLE_WORKSPACES_HD=
+
+# APPLE_OAUTH_CLIENT_ID=
+# APPLE_OAUTH_CLIENT_SECRET=
+# APPLE_KEY_ID=
+# APPLE_TEAM_ID=
+# APPLE_PRIVATE_KEY=
+
+# DISCORD_OAUTH_CLIENT_ID=
+# DISCORD_OAUTH_CLIENT_SECRET=
+
+# FACEBOOK_OAUTH_CLIENT_ID=
+# FACEBOOK_OAUTH_CLIENT_SECRET=
+
+# GITHUB_OAUTH_CLIENT_ID=
+# GITHUB_OAUTH_CLIENT_SECRET=
+
+# GITLAB_OAUTH_CLIENT_ID=
+# GITLAB_OAUTH_CLIENT_SECRET=
+# GITLAB_BASE_URL=
+
+# BITBUCKET_OAUTH_CLIENT_ID=
+# BITBUCKET_OAUTH_CLIENT_SECRET=
+
+# LINKEDIN_OAUTH_CLIENT_ID=
+# LINKEDIN_OAUTH_CLIENT_SECRET=
+
+# OKTA_OAUTH_CLIENT_ID=
+# OKTA_OAUTH_CLIENT_SECRET=
+# OKTA_DOMAIN=
+
+# AZURE_AD_OAUTH_CLIENT_ID=
+# AZURE_AD_OAUTH_CLIENT_SECRET=
+# AZURE_AD_DIRECTORY_ID=
+
+# BOXY_SAML_OAUTH_CLIENT_ID=
+# BOXY_SAML_OAUTH_CLIENT_SECRET=
+# BOXY_SAML_URL=
+
+# TWITTER_OAUTH_CLIENT_ID=
+# TWITTER_OAUTH_CLIENT_SECRET=
+
+# ============================================================================ #
+# Analytics - PostHog
+# ============================================================================ #
POSTHOG_API_KEY=phc_hmVSxIjTW1REBHXgj2aw4HW9X6CXb6FzerBgP9XenC7
-
-# Third-party (required for TLS/SSL)
-AGENTA_SSL_DIR=
-
-# Third-party (optional)
-NGINX_PORT=80
-SUPERTOKENS_API_KEY=
-# Redis: set REDIS_URI for a single instance, or override with the split URIs below
-REDIS_URI=
-REDIS_URI_VOLATILE=
-REDIS_URI_DURABLE=
-POSTGRES_PORT=
-SUPERTOKENS_PORT=
-
-GOOGLE_OAUTH_CLIENT_ID=
-GOOGLE_OAUTH_CLIENT_SECRET=
-GITHUB_OAUTH_CLIENT_ID=
-GITHUB_OAUTH_CLIENT_SECRET=
-
-NEW_RELIC_LICENSE_KEY=
-NRIA_LICENSE_KEY=
-
-LOOPS_API_KEY=
-
-SENDGRID_API_KEY=
-
-CRISP_WEBSITE_ID=
-
-STRIPE_API_KEY=
-STRIPE_WEBHOOK_SECRET=
-
-# Third-party — LLM (optional)
-ALEPHALPHA_API_KEY=
-ANTHROPIC_API_KEY=
-ANYSCALE_API_KEY=
-COHERE_API_KEY=
-DEEPINFRA_API_KEY=
-GEMINI_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-OPENAI_API_KEY=
-OPENROUTER_API_KEY=
-PERPLEXITYAI_API_KEY=
-TOGETHERAI_API_KEY=
-
-# Legacy (deprecated, to be removed)
-AGENTA_PORT=80
-BARE_DOMAIN_NAME=localhost
-DOMAIN_NAME=http://localhost
-WEBSITE_DOMAIN_NAME=http://localhost
-SERVICE_URL_TEMPLATE=http://localhost:80/services/{path}
-POSTGRES_DB=agenta_oss
-POSTGRES_URI=postgresql+asyncpg://username:password@postgres:5432/agenta_oss
-ALEMBIC_CFG_PATH=/app/oss/databases/postgres/migrations/alembic.oss.ini
diff --git a/sdk/agenta/client/backend/types/organization.py b/sdk/agenta/client/backend/types/organization.py
index fef8794c5e..4b6f891ee0 100644
--- a/sdk/agenta/client/backend/types/organization.py
+++ b/sdk/agenta/client/backend/types/organization.py
@@ -8,9 +8,9 @@
class Organization(UniversalBaseModel):
id: str
- name: str
+ name: typing.Optional[str] = None
owner: str
- description: str
+ description: typing.Optional[str] = None
type: typing.Optional[str] = None
workspaces: typing.Optional[typing.List[str]] = None
diff --git a/sdk/agenta/sdk/agenta_init.py b/sdk/agenta/sdk/agenta_init.py
index 0cae1429c2..5b2efc82ea 100644
--- a/sdk/agenta/sdk/agenta_init.py
+++ b/sdk/agenta/sdk/agenta_init.py
@@ -123,9 +123,7 @@ def init(
)
if self.api_key is None:
- log.warning(
- "API key is required (in most cases). Please set AGENTA_API_KEY environment variable or pass api_key parameter in ag.init()."
- )
+ log.warning("Agenta - API key: missing")
log.info("Agenta - API URL: %s", self.api_url)
diff --git a/sdk/agenta/sdk/assets.py b/sdk/agenta/sdk/assets.py
index 4457ab357b..a584371842 100644
--- a/sdk/agenta/sdk/assets.py
+++ b/sdk/agenta/sdk/assets.py
@@ -1,3 +1,8 @@
+from typing import Dict, Optional, Tuple
+
+from litellm import cost_calculator
+
+
supported_llm_models = {
"anthropic": [
"anthropic/claude-sonnet-4-5",
@@ -206,6 +211,58 @@
providers_list = list(supported_llm_models.keys())
+
+def _get_model_costs(model: str) -> Optional[Tuple[float, float]]:
+ """
+ Get the input and output costs per 1M tokens for a model.
+
+ Uses litellm's cost_calculator (same as tracing/inline.py) for consistency.
+
+ Args:
+ model: The model name (e.g., "gpt-4o" or "anthropic/claude-3-opus-20240229")
+
+ Returns:
+ Tuple of (input_cost, output_cost) per 1M tokens, or None if not found.
+ """
+ try:
+ costs = cost_calculator.cost_per_token(
+ model=model,
+ prompt_tokens=1_000_000,
+ completion_tokens=1_000_000,
+ )
+ if costs:
+ input_cost, output_cost = costs
+ if input_cost > 0 or output_cost > 0:
+ return (input_cost, output_cost)
+ except Exception:
+ pass
+ return None
+
+
+def _build_model_metadata() -> Dict[str, Dict[str, Dict[str, float]]]:
+ """
+ Build metadata dictionary with costs for all supported models.
+
+ Returns:
+ Nested dict: {provider: {model: {"input": cost, "output": cost}}}
+ """
+ metadata: Dict[str, Dict[str, Dict[str, float]]] = {}
+
+ for provider, models in supported_llm_models.items():
+ metadata[provider] = {}
+ for model in models:
+ costs = _get_model_costs(model)
+ if costs:
+ metadata[provider][model] = {
+ "input": costs[0],
+ "output": costs[1],
+ }
+
+ return metadata
+
+
+model_metadata = _build_model_metadata()
+
model_to_provider_mapping = {
model: provider
for provider, models in supported_llm_models.items()
diff --git a/sdk/agenta/sdk/middleware/config.py b/sdk/agenta/sdk/middleware/config.py
index cda301ff07..98d960a411 100644
--- a/sdk/agenta/sdk/middleware/config.py
+++ b/sdk/agenta/sdk/middleware/config.py
@@ -224,6 +224,7 @@ async def _parse_variant_ref(
baggage.get("ag.refs.variant.slug")
# ALTERNATIVE
or request.query_params.get("variant_slug")
+ or body.get("variant_slug")
# LEGACY
or baggage.get("variant_slug")
or request.query_params.get("config")
@@ -234,6 +235,7 @@ async def _parse_variant_ref(
baggage.get("ag.refs.variant.version")
# ALTERNATIVE
or request.query_params.get("variant_version")
+ or body.get("variant_version")
# LEGACY
or baggage.get("variant_version")
)
@@ -244,7 +246,7 @@ async def _parse_variant_ref(
return Reference(
id=variant_id,
slug=variant_slug,
- version=variant_version,
+ version=str(variant_version) if variant_version is not None else None,
)
async def _parse_environment_ref(
diff --git a/sdk/agenta/sdk/types.py b/sdk/agenta/sdk/types.py
index 29e1fc8c9c..b3e634c574 100644
--- a/sdk/agenta/sdk/types.py
+++ b/sdk/agenta/sdk/types.py
@@ -8,7 +8,7 @@
from starlette.responses import StreamingResponse
-from agenta.sdk.assets import supported_llm_models
+from agenta.sdk.assets import supported_llm_models, model_metadata
from agenta.client.backend.types import AgentaNodesResponse, AgentaNodeDto
@@ -23,7 +23,11 @@ def MCField( # pylint: disable=invalid-name
) -> Field:
# Pydantic 2.12+ no longer allows post-creation mutation of field properties
if isinstance(choices, dict):
- json_extra = {"choices": choices, "x-parameter": "grouped_choice"}
+ json_extra = {
+ "choices": choices,
+ "x-parameter": "grouped_choice",
+ "x-model-metadata": model_metadata,
+ }
elif isinstance(choices, list):
json_extra = {"choices": choices, "x-parameter": "choice"}
else:
diff --git a/sdk/agenta/sdk/utils/lazy.py b/sdk/agenta/sdk/utils/lazy.py
index f208e9f8b6..b37b073297 100644
--- a/sdk/agenta/sdk/utils/lazy.py
+++ b/sdk/agenta/sdk/utils/lazy.py
@@ -10,9 +10,6 @@
from fastapi import APIRouter, Body, FastAPI, HTTPException, Request
from jinja2 import Template, TemplateError
from openai import AsyncOpenAI, OpenAIError
- from RestrictedPython import safe_builtins, compile_restricted, utility_builtins
- from RestrictedPython.Eval import default_guarded_getiter, default_guarded_getitem
- from RestrictedPython.Guards import guarded_iter_unpack_sequence, full_write_guard
from starlette.responses import Response as StarletteResponse, StreamingResponse
from jsonpath import JSONPointer
import jsonpath as jsonpath_module
@@ -50,19 +47,6 @@ def safe_load(self, *args: Any, **kwargs: Any) -> Any: ...
_openai_cached: Optional[Tuple[type["AsyncOpenAI"], type["OpenAIError"]]] = None
_openai_checked = False
-_restrictedpython_cached: Optional[
- Tuple[
- dict,
- Callable[..., Any],
- dict,
- Callable[..., Any],
- Callable[..., Any],
- Callable[..., Any],
- Callable[..., Any],
- ]
-] = None
-_restrictedpython_checked = False
-
_yaml_module: Optional[_YamlModule] = None
_yaml_checked = False
@@ -157,55 +141,6 @@ def _load_openai() -> Tuple[type["AsyncOpenAI"], type["OpenAIError"]]:
return _openai_cached
-def _load_restrictedpython() -> Tuple[
- dict,
- Callable[..., Any],
- dict,
- Callable[..., Any],
- Callable[..., Any],
- Callable[..., Any],
- Callable[..., Any],
-]:
- global _restrictedpython_cached, _restrictedpython_checked # pylint: disable=global-statement
-
- if _restrictedpython_checked:
- if _restrictedpython_cached is None:
- raise ImportError(
- "RestrictedPython is required for local sandbox execution. "
- "Install it with `pip install restrictedpython`."
- )
- return _restrictedpython_cached
-
- _restrictedpython_checked = True
- try:
- from RestrictedPython import safe_builtins, compile_restricted, utility_builtins
- from RestrictedPython.Eval import (
- default_guarded_getiter,
- default_guarded_getitem,
- )
- from RestrictedPython.Guards import (
- guarded_iter_unpack_sequence,
- full_write_guard,
- )
- except Exception as exc:
- _restrictedpython_cached = None
- raise ImportError(
- "RestrictedPython is required for local sandbox execution. "
- "Install it with `pip install restrictedpython`."
- ) from exc
-
- _restrictedpython_cached = (
- safe_builtins,
- compile_restricted,
- utility_builtins,
- default_guarded_getiter,
- default_guarded_getitem,
- guarded_iter_unpack_sequence,
- full_write_guard,
- )
- return _restrictedpython_cached
-
-
def _load_yaml() -> _YamlModule:
global _yaml_module, _yaml_checked # pylint: disable=global-statement
diff --git a/sdk/agenta/sdk/workflows/builtin.py b/sdk/agenta/sdk/workflows/builtin.py
index 96fe546f66..d4139e7675 100644
--- a/sdk/agenta/sdk/workflows/builtin.py
+++ b/sdk/agenta/sdk/workflows/builtin.py
@@ -166,11 +166,13 @@ def auto_custom_code_run(
#
correct_answer_key: Optional[str] = "correct_answer",
threshold: Optional[float] = 0.5,
+ runtime: Optional[str] = "python",
) -> Workflow:
parameters = dict(
code=code,
correct_answer_key=correct_answer_key,
threshold=threshold,
+ runtime=runtime,
)
return evaluator(
diff --git a/sdk/agenta/sdk/workflows/configurations.py b/sdk/agenta/sdk/workflows/configurations.py
index 9086047c53..42310b9368 100644
--- a/sdk/agenta/sdk/workflows/configurations.py
+++ b/sdk/agenta/sdk/workflows/configurations.py
@@ -5,6 +5,7 @@
auto_exact_match_v0_configuration = WorkflowServiceConfiguration()
auto_regex_test_v0_configuration = WorkflowServiceConfiguration()
field_match_test_v0_configuration = WorkflowServiceConfiguration()
+json_multi_field_match_v0_configuration = WorkflowServiceConfiguration()
auto_webhook_test_v0_configuration = WorkflowServiceConfiguration()
auto_custom_code_run_v0_configuration = WorkflowServiceConfiguration()
auto_ai_critique_v0_configuration = WorkflowServiceConfiguration()
diff --git a/sdk/agenta/sdk/workflows/handlers.py b/sdk/agenta/sdk/workflows/handlers.py
index 3ecd036459..fa95fa9654 100644
--- a/sdk/agenta/sdk/workflows/handlers.py
+++ b/sdk/agenta/sdk/workflows/handlers.py
@@ -1,14 +1,14 @@
-from typing import List, Any, Optional, Any, Dict, Union
-from json import dumps, loads
-import traceback
import json
-import re
import math
+import re
+import traceback
+from difflib import SequenceMatcher
+from json import dumps, loads
+from typing import Any, Dict, List, Optional, Union
import httpx
from pydantic import BaseModel, Field
-from difflib import SequenceMatcher
from agenta.sdk.utils.logging import get_module_logger
from agenta.sdk.utils.lazy import (
@@ -21,33 +21,30 @@
from agenta.sdk.litellm import mockllm
from agenta.sdk.types import PromptTemplate, Message
from agenta.sdk.managers.secrets import SecretsManager
-
from agenta.sdk.decorators.tracing import instrument
-
+from agenta.sdk.litellm.litellm import litellm_handler
from agenta.sdk.models.shared import Data
-from agenta.sdk.models.tracing import Trace
from agenta.sdk.workflows.sandbox import execute_code_safely
from agenta.sdk.workflows.templates import EVALUATOR_TEMPLATES
from agenta.sdk.workflows.errors import (
+ CustomCodeServerV0Error,
InvalidConfigurationParametersV0Error,
- MissingConfigurationParameterV0Error,
InvalidConfigurationParameterV0Error,
InvalidInputsV0Error,
- MissingInputV0Error,
InvalidInputV0Error,
InvalidOutputsV0Error,
- MissingOutputV0Error,
InvalidSecretsV0Error,
JSONDiffV0Error,
LevenshteinDistanceV0Error,
- SyntacticSimilarityV0Error,
+ MissingConfigurationParameterV0Error,
+ MissingInputV0Error,
+ PromptCompletionV0Error,
+ PromptFormattingV0Error,
+ RegexPatternV0Error,
SemanticSimilarityV0Error,
- WebhookServerV0Error,
+ SyntacticSimilarityV0Error,
WebhookClientV0Error,
- CustomCodeServerV0Error,
- RegexPatternV0Error,
- PromptFormattingV0Error,
- PromptCompletionV0Error,
+ WebhookServerV0Error,
)
log = get_module_logger(__name__)
@@ -58,7 +55,6 @@ def _configure_litellm():
litellm = _load_litellm()
if not litellm:
raise ImportError("litellm is required for completion handling.")
- from agenta.sdk.litellm.litellm import litellm_handler
litellm.logging = False
litellm.set_verbose = False
@@ -86,9 +82,7 @@ def _compute_similarity(embedding_1: List[float], embedding_2: List[float]) -> f
return dot / (norm1 * norm2)
-import json
-import re
-from typing import Any, Dict, Iterable, Tuple, Optional
+from typing import Any, Iterable, Tuple
# ========= Scheme detection =========
@@ -393,7 +387,7 @@ def auto_exact_match_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -401,7 +395,7 @@ def auto_exact_match_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -438,7 +432,7 @@ def auto_regex_test_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "regex_pattern" in parameters:
+ if "regex_pattern" not in parameters:
raise MissingConfigurationParameterV0Error(path="regex_pattern")
regex_pattern = parameters["regex_pattern"]
@@ -496,12 +490,12 @@ def field_match_test_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "json_field" in parameters:
+ if "json_field" not in parameters:
raise MissingConfigurationParameterV0Error(path="json_field")
json_field = str(parameters["json_field"])
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -509,7 +503,7 @@ def field_match_test_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -522,7 +516,7 @@ def field_match_test_v0(
if isinstance(outputs, str):
try:
outputs_dict = loads(outputs)
- except json.JSONDecodeError as e:
+ except json.JSONDecodeError:
# raise InvalidOutputsV0Error(expected="dict", got=outputs) from e
return {"success": False}
@@ -530,7 +524,7 @@ def field_match_test_v0(
# raise InvalidOutputsV0Error(expected=["dict", "str"], got=outputs)
return {"success": False}
- if not json_field in outputs_dict:
+ if json_field not in outputs_dict:
# raise MissingOutputV0Error(path=json_field)
return {"success": False}
@@ -541,6 +535,148 @@ def field_match_test_v0(
return {"success": success}
+def _get_nested_value(obj: Any, path: str) -> Any:
+ """
+ Get value from nested object using resolve_any() with graceful None on failure.
+
+ Supports multiple path formats:
+ - Dot notation: "user.address.city", "items.0.name"
+ - JSON Path: "$.user.address.city", "$.items[0].name"
+ - JSON Pointer: "/user/address/city", "/items/0/name"
+
+ Args:
+ obj: The object to traverse (dict or list)
+ path: Path expression in any supported format
+
+ Returns:
+ The value at the path, or None if path doesn't exist or resolution fails
+ """
+ if obj is None:
+ return None
+
+ try:
+ return resolve_any(path, obj)
+ except (KeyError, IndexError, ValueError, TypeError, ImportError):
+ return None
+
+
+@instrument(annotate=True)
+def json_multi_field_match_v0(
+ parameters: Optional[Data] = None,
+ inputs: Optional[Data] = None,
+ outputs: Optional[Union[Data, str]] = None,
+) -> Any:
+ """
+ Multi-field JSON match evaluator for comparing multiple fields between expected and actual JSON.
+
+ Each configured field becomes a separate score (0 or 1), and an aggregate_score shows
+ the percentage of matching fields. Useful for entity extraction validation.
+
+ Args:
+ inputs: Testcase data with ground truth JSON
+ outputs: Output from the workflow execution (expected to be JSON string or dict)
+ parameters: Configuration with:
+ - fields: List of field paths to compare (e.g., ["name", "user.address.city"])
+ - correct_answer_key: Key in inputs containing the expected JSON
+
+ Returns:
+ Dict with per-field scores and aggregate_score, e.g.:
+ {"name": 1.0, "email": 0.0, "aggregate_score": 0.5}
+ """
+ if parameters is None or not isinstance(parameters, dict):
+ raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
+
+ if "fields" not in parameters:
+ raise MissingConfigurationParameterV0Error(path="fields")
+
+ fields = parameters["fields"]
+
+ if not isinstance(fields, list) or len(fields) == 0:
+ raise InvalidConfigurationParameterV0Error(
+ path="fields",
+ expected="non-empty list",
+ got=fields,
+ )
+
+ if "correct_answer_key" not in parameters:
+ raise MissingConfigurationParameterV0Error(path="correct_answer_key")
+
+ correct_answer_key = str(parameters["correct_answer_key"])
+
+ if inputs is None or not isinstance(inputs, dict):
+ raise InvalidInputsV0Error(expected="dict", got=inputs)
+
+ if correct_answer_key not in inputs:
+ raise MissingInputV0Error(path=correct_answer_key)
+
+ correct_answer = inputs[correct_answer_key]
+
+ # Parse ground truth JSON
+ if isinstance(correct_answer, str):
+ try:
+ expected = json.loads(correct_answer)
+ except json.JSONDecodeError:
+ raise InvalidInputV0Error(
+ path=correct_answer_key,
+ expected="valid JSON string",
+ got=correct_answer,
+ )
+ elif isinstance(correct_answer, dict):
+ expected = correct_answer
+ else:
+ raise InvalidInputV0Error(
+ path=correct_answer_key,
+ expected=["dict", "str"],
+ got=correct_answer,
+ )
+
+ # Parse output JSON
+ if not isinstance(outputs, str) and not isinstance(outputs, dict):
+ # Return all zeros if output is invalid
+ results: Dict[str, Any] = {field: 0.0 for field in fields}
+ results["aggregate_score"] = 0.0
+ return results
+
+ if isinstance(outputs, str):
+ try:
+ actual = json.loads(outputs)
+ except json.JSONDecodeError:
+ # Return all zeros if output is not valid JSON
+ results = {field: 0.0 for field in fields}
+ results["aggregate_score"] = 0.0
+ return results
+ else:
+ actual = outputs
+
+ if not isinstance(actual, dict):
+ # Return all zeros if parsed output is not a dict
+ results = {field: 0.0 for field in fields}
+ results["aggregate_score"] = 0.0
+ return results
+
+ # --------------------------------------------------------------------------
+ # Compare each configured field
+ results = {}
+ matches = 0
+
+ for field_path in fields:
+ expected_val = _get_nested_value(expected, field_path)
+ actual_val = _get_nested_value(actual, field_path)
+
+ # Exact match comparison
+ match = expected_val == actual_val
+
+ results[field_path] = 1.0 if match else 0.0
+ if match:
+ matches += 1
+
+ # Aggregate score is the percentage of matching fields
+ results["aggregate_score"] = matches / len(fields) if fields else 0.0
+ # --------------------------------------------------------------------------
+
+ return results
+
+
@instrument(annotate=True)
async def auto_webhook_test_v0(
parameters: Optional[Data] = None,
@@ -561,12 +697,12 @@ async def auto_webhook_test_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "webhook_url" in parameters:
+ if "webhook_url" not in parameters:
raise MissingConfigurationParameterV0Error(path="webhook_url")
webhook_url = str(parameters["webhook_url"])
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -574,7 +710,7 @@ async def auto_webhook_test_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -666,12 +802,12 @@ async def auto_custom_code_run_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "code" in parameters:
+ if "code" not in parameters:
raise MissingConfigurationParameterV0Error(path="code")
code = str(parameters["code"])
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -679,7 +815,7 @@ async def auto_custom_code_run_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -768,7 +904,7 @@ async def auto_ai_critique_v0(
correct_answer_key = parameters.get("correct_answer_key")
- if not "prompt_template" in parameters:
+ if "prompt_template" not in parameters:
raise MissingConfigurationParameterV0Error(path="prompt_template")
prompt_template = parameters.get("prompt_template")
@@ -799,7 +935,7 @@ async def auto_ai_critique_v0(
"json_schema" if template_version == "4" else "text"
)
- if not response_type in ["text", "json_object", "json_schema"]:
+ if response_type not in ["text", "json_object", "json_schema"]:
raise InvalidConfigurationParameterV0Error(
path="response_type",
expected=["text", "json_object", "json_schema"],
@@ -1004,7 +1140,7 @@ def auto_starts_with_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "prefix" in parameters:
+ if "prefix" not in parameters:
raise MissingConfigurationParameterV0Error(path="prefix")
prefix = parameters["prefix"]
@@ -1053,7 +1189,7 @@ def auto_ends_with_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "suffix" in parameters:
+ if "suffix" not in parameters:
raise MissingConfigurationParameterV0Error(path="suffix")
suffix = parameters["suffix"]
@@ -1102,7 +1238,7 @@ def auto_contains_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "substring" in parameters:
+ if "substring" not in parameters:
raise MissingConfigurationParameterV0Error(path="substring")
substring = parameters["substring"]
@@ -1151,7 +1287,7 @@ def auto_contains_any_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "substrings" in parameters:
+ if "substrings" not in parameters:
raise MissingConfigurationParameterV0Error(path="substrings")
substrings = parameters["substrings"]
@@ -1209,7 +1345,7 @@ def auto_contains_all_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "substrings" in parameters:
+ if "substrings" not in parameters:
raise MissingConfigurationParameterV0Error(path="substrings")
substrings = parameters["substrings"]
@@ -1309,7 +1445,7 @@ def auto_json_diff_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -1317,7 +1453,7 @@ def auto_json_diff_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -1401,7 +1537,7 @@ def auto_levenshtein_distance_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -1411,7 +1547,7 @@ def auto_levenshtein_distance_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -1506,7 +1642,7 @@ def auto_similarity_match_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -1516,7 +1652,7 @@ def auto_similarity_match_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -1599,7 +1735,7 @@ async def auto_semantic_similarity_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "correct_answer_key" in parameters:
+ if "correct_answer_key" not in parameters:
raise MissingConfigurationParameterV0Error(path="correct_answer_key")
correct_answer_key = str(parameters["correct_answer_key"])
@@ -1612,7 +1748,7 @@ async def auto_semantic_similarity_v0(
if inputs is None or not isinstance(inputs, dict):
raise InvalidInputsV0Error(expected="dict", got=inputs)
- if not correct_answer_key in inputs:
+ if correct_answer_key not in inputs:
raise MissingInputV0Error(path=correct_answer_key)
correct_answer = inputs[correct_answer_key]
@@ -1715,7 +1851,7 @@ async def completion_v0(
if parameters is None or not isinstance(parameters, dict):
raise InvalidConfigurationParametersV0Error(expected="dict", got=parameters)
- if not "prompt" in parameters:
+ if "prompt" not in parameters:
raise MissingConfigurationParameterV0Error(path="prompt")
params: Dict[str, Any] = {**(parameters or {})}
diff --git a/sdk/agenta/sdk/workflows/interfaces.py b/sdk/agenta/sdk/workflows/interfaces.py
index 85334ab6cb..6c1e5edfbf 100644
--- a/sdk/agenta/sdk/workflows/interfaces.py
+++ b/sdk/agenta/sdk/workflows/interfaces.py
@@ -169,6 +169,53 @@
),
)
+json_multi_field_match_v0_interface = WorkflowServiceInterface(
+ uri="agenta:built-in:json_multi_field_match:v0",
+ schemas=dict( # type: ignore
+ parameters={
+ "type": "object",
+ "title": "JSON Multi-Field Match Parameters",
+ "description": "Settings for comparing multiple JSON fields against expected values from a ground truth column.",
+ "properties": {
+ "correct_answer_key": {
+ "type": "string",
+ "title": "Ground Truth Column",
+ "description": "Column in test data containing the JSON ground truth.",
+ "default": "correct_answer",
+ },
+ "fields": {
+ "type": "array",
+ "title": "Fields to Compare",
+ "description": "List of JSON field paths (dot notation) to compare. Each field becomes a separate score.",
+ "items": {"type": "string"},
+ "default": [],
+ },
+ },
+ "required": ["correct_answer_key", "fields"],
+ "additionalProperties": False,
+ },
+ inputs={
+ "type": "object",
+ "title": "JSON Multi-Field Match Inputs",
+ "description": "Testcase data including the JSON ground truth.",
+ },
+ outputs={
+ "type": "object",
+ "title": "JSON Multi-Field Match Outputs",
+ "description": "Per-field match scores and aggregate score. Each field produces a 0 or 1 output.",
+ "properties": {
+ "aggregate_score": {
+ "type": "number",
+ "title": "Aggregate Score",
+ "description": "Percentage of matched fields (0-1).",
+ },
+ },
+ "required": ["aggregate_score"],
+ "additionalProperties": True, # Allows dynamic field outputs
+ },
+ ),
+)
+
auto_webhook_test_v0_interface = WorkflowServiceInterface(
uri="agenta:built-in:auto_webhook_test:v0",
schemas=dict( # type: ignore
diff --git a/sdk/agenta/sdk/workflows/runners/daytona.py b/sdk/agenta/sdk/workflows/runners/daytona.py
index eaf6fea1d2..a05a57db1b 100644
--- a/sdk/agenta/sdk/workflows/runners/daytona.py
+++ b/sdk/agenta/sdk/workflows/runners/daytona.py
@@ -1,6 +1,7 @@
import os
import json
-from typing import Any, Dict, Union, Optional, TYPE_CHECKING
+from contextlib import contextmanager
+from typing import Any, Dict, Generator, Union, Optional, TYPE_CHECKING
import agenta as ag
from agenta.sdk.workflows.runners.base import CodeRunner
@@ -15,6 +16,42 @@
log = get_module_logger(__name__)
+def _extract_error_message(error_text: str) -> str:
+ """Extract a clean error message from a Python traceback.
+
+ Given a full traceback string, extracts just the final error line
+ (e.g., "NameError: name 'foo' is not defined") instead of the full
+ noisy traceback with base64-encoded code.
+
+ Args:
+ error_text: Full error/traceback string
+
+ Returns:
+ Clean error message, or original text if extraction fails
+ """
+ if not error_text:
+ return "Unknown error"
+
+ lines = error_text.strip().split("\n")
+
+ # Look for common Python error patterns from the end
+ for line in reversed(lines):
+ line = line.strip()
+ # Match patterns like "NameError: ...", "ValueError: ...", etc.
+ if ": " in line and not line.startswith("File "):
+ # Check if it looks like an error line (ErrorType: message)
+ parts = line.split(": ", 1)
+ if parts[0].replace(".", "").replace("_", "").isalnum():
+ return line
+
+ # Fallback: return last non-empty line
+ for line in reversed(lines):
+ if line.strip():
+ return line.strip()
+
+ return error_text[:200] if len(error_text) > 200 else error_text
+
+
class DaytonaRunner(CodeRunner):
"""Remote code runner using Daytona sandbox for execution."""
@@ -186,6 +223,29 @@ def _create_sandbox(self, runtime: Optional[str] = None) -> Any:
except Exception as e:
raise RuntimeError(f"Failed to create sandbox from snapshot: {e}")
+ @contextmanager
+ def _sandbox_context(
+ self, runtime: Optional[str] = None
+ ) -> Generator["Sandbox", None, None]:
+ """Context manager for sandbox lifecycle.
+
+ Ensures sandbox is deleted even if an error occurs during execution.
+
+ Args:
+ runtime: Runtime environment (python, javascript, typescript), None = python
+
+ Yields:
+ Sandbox instance
+ """
+ sandbox = self._create_sandbox(runtime=runtime)
+ try:
+ yield sandbox
+ finally:
+ try:
+ sandbox.delete()
+ except Exception as e:
+ log.error("Failed to delete sandbox: %s", e)
+
def run(
self,
code: str,
@@ -218,95 +278,98 @@ def run(
runtime = runtime or "python"
self._initialize_client()
- sandbox: Sandbox = self._create_sandbox(runtime=runtime)
- try:
- # Prepare all parameters as a single dict
- params = {
- "app_params": app_params,
- "inputs": inputs,
- "output": output,
- "correct_answer": correct_answer,
- }
- params_json = json.dumps(params)
-
- if not templates:
- raise RuntimeError("Missing evaluator templates for Daytona execution")
-
- template = templates.get(runtime)
- if template is None:
- raise RuntimeError(
- f"Missing evaluator template for runtime '{runtime}'"
+ with self._sandbox_context(runtime=runtime) as sandbox:
+ try:
+ # Prepare all parameters as a single dict
+ params = {
+ "app_params": app_params,
+ "inputs": inputs,
+ "output": output,
+ "correct_answer": correct_answer,
+ }
+ params_json = json.dumps(params)
+
+ if not templates:
+ raise RuntimeError(
+ "Missing evaluator templates for Daytona execution"
+ )
+
+ template = templates.get(runtime)
+ if template is None:
+ raise RuntimeError(
+ f"Missing evaluator template for runtime '{runtime}'"
+ )
+
+ # Wrap the user code with the necessary context and evaluation
+ wrapped_code = template.format(
+ params_json=params_json,
+ user_code=code,
)
- # Wrap the user code with the necessary context and evaluation
- wrapped_code = template.format(
- params_json=params_json,
- user_code=code,
- )
-
- # Execute the code in the Daytona sandbox
- response = sandbox.process.code_run(wrapped_code)
- response_stdout = response.result if hasattr(response, "result") else ""
- response_exit_code = getattr(response, "exit_code", 0)
- response_error = getattr(response, "error", None) or getattr(
- response, "stderr", None
- )
-
- sandbox.delete()
-
- if response_exit_code and response_exit_code != 0:
- error_details = response_error or response_stdout or "Unknown error"
- log.error(
- "Sandbox execution error (exit_code=%s): %s",
- response_exit_code,
- error_details,
- )
- raise RuntimeError(
- f"Sandbox execution failed (exit_code={response_exit_code}): "
- f"{error_details}"
+ # Execute the code in the Daytona sandbox
+ response = sandbox.process.code_run(wrapped_code)
+ response_stdout = response.result if hasattr(response, "result") else ""
+ response_exit_code = getattr(response, "exit_code", 0)
+ response_error = getattr(response, "error", None) or getattr(
+ response, "stderr", None
)
- # Parse the result from stdout
- output_lines = response_stdout.strip().split("\n")
- for line in reversed(output_lines):
- if not line.strip():
- continue
- try:
- result_obj = json.loads(line)
+ if response_exit_code and response_exit_code != 0:
+ raw_error = response_error or response_stdout or "Unknown error"
+ # Log full error for debugging
+ # log.warning(
+ # "Sandbox execution error (exit_code=%s): %s",
+ # response_exit_code,
+ # raw_error,
+ # )
+ # Extract clean error message for user display
+ clean_error = _extract_error_message(raw_error)
+ raise RuntimeError(clean_error)
+
+ # Parse the result from stdout
+ output_lines = response_stdout.strip().split("\n")
+ for line in reversed(output_lines):
+ if not line.strip():
+ continue
+ try:
+ result_obj = json.loads(line)
+ if isinstance(result_obj, dict) and "result" in result_obj:
+ result = result_obj["result"]
+ if isinstance(result, (float, int, type(None))):
+ return float(result) if result is not None else None
+ except json.JSONDecodeError:
+ continue
+
+ # Fallback: attempt to extract a JSON object containing "result"
+ for line in reversed(output_lines):
+ if "result" not in line:
+ continue
+ start = line.find("{")
+ end = line.rfind("}")
+ if start == -1 or end == -1 or end <= start:
+ continue
+ try:
+ result_obj = json.loads(line[start : end + 1])
+ except json.JSONDecodeError:
+ continue
if isinstance(result_obj, dict) and "result" in result_obj:
result = result_obj["result"]
if isinstance(result, (float, int, type(None))):
return float(result) if result is not None else None
- except json.JSONDecodeError:
- continue
-
- # Fallback: attempt to extract a JSON object containing "result"
- for line in reversed(output_lines):
- if "result" not in line:
- continue
- start = line.find("{")
- end = line.rfind("}")
- if start == -1 or end == -1 or end <= start:
- continue
- try:
- result_obj = json.loads(line[start : end + 1])
- except json.JSONDecodeError:
- continue
- if isinstance(result_obj, dict) and "result" in result_obj:
- result = result_obj["result"]
- if isinstance(result, (float, int, type(None))):
- return float(result) if result is not None else None
-
- log.error(
- "Evaluation output did not include JSON result: %s", response_stdout
- )
- raise ValueError("Could not parse evaluation result from Daytona output")
- except Exception as e:
- log.error(f"Error during Daytona code execution: {e}", exc_info=True)
- # print(f"Exception details: {type(e).__name__}: {e}")
- raise RuntimeError(f"Error during Daytona code execution: {e}")
+ # log.warning(
+ # "Evaluation output did not include JSON result: %s", response_stdout
+ # )
+ raise ValueError(
+ "Could not parse evaluation result from Daytona output"
+ )
+
+ except Exception as e:
+ # log.warning(
+ # f"Error during Daytona code execution:\n {e}", exc_info=True
+ # )
+ raise RuntimeError(e)
def cleanup(self) -> None:
"""Clean up Daytona client resources."""
diff --git a/sdk/agenta/sdk/workflows/runners/local.py b/sdk/agenta/sdk/workflows/runners/local.py
index 4e0aca20a2..f9309d8bba 100644
--- a/sdk/agenta/sdk/workflows/runners/local.py
+++ b/sdk/agenta/sdk/workflows/runners/local.py
@@ -1,7 +1,6 @@
from typing import Any, Dict, Union, Optional
from agenta.sdk.workflows.runners.base import CodeRunner
-from agenta.sdk.utils.lazy import _load_restrictedpython
class LocalRunner(CodeRunner):
diff --git a/sdk/agenta/sdk/workflows/runners/registry.py b/sdk/agenta/sdk/workflows/runners/registry.py
index 57c944e653..9984e9ad68 100644
--- a/sdk/agenta/sdk/workflows/runners/registry.py
+++ b/sdk/agenta/sdk/workflows/runners/registry.py
@@ -19,7 +19,7 @@ def get_runner() -> CodeRunner:
Registry to get the appropriate code runner based on environment configuration.
Uses AGENTA_SERVICES_SANDBOX_RUNNER environment variable:
- - "local" (default): Uses RestrictedPython for local execution
+ - "local" (default): Uses current container for local execution
- "daytona": Uses Daytona remote sandbox
Returns:
diff --git a/sdk/agenta/sdk/workflows/templates.py b/sdk/agenta/sdk/workflows/templates.py
index 7dc030c2f9..9e131d5ce3 100644
--- a/sdk/agenta/sdk/workflows/templates.py
+++ b/sdk/agenta/sdk/workflows/templates.py
@@ -48,7 +48,7 @@
// Ensure result is a number
result = Number(result);
if (!Number.isFinite(result)) {{
- result = 0.0;
+ result = null;
}}
// Print result for capture
@@ -71,7 +71,7 @@
// Ensure result is a number
result = Number(result);
if (!Number.isFinite(result)) {{
- result = 0.0;
+ result = null;
}}
// Print result for capture
diff --git a/sdk/agenta/sdk/workflows/utils.py b/sdk/agenta/sdk/workflows/utils.py
index d86f499da4..2ecd57d219 100644
--- a/sdk/agenta/sdk/workflows/utils.py
+++ b/sdk/agenta/sdk/workflows/utils.py
@@ -9,6 +9,7 @@
auto_exact_match_v0,
auto_regex_test_v0,
field_match_test_v0,
+ json_multi_field_match_v0,
auto_webhook_test_v0,
auto_custom_code_run_v0,
auto_ai_critique_v0,
@@ -31,6 +32,7 @@
auto_exact_match_v0_interface,
auto_regex_test_v0_interface,
field_match_test_v0_interface,
+ json_multi_field_match_v0_interface,
auto_webhook_test_v0_interface,
auto_custom_code_run_v0_interface,
auto_ai_critique_v0_interface,
@@ -54,6 +56,7 @@
auto_exact_match_v0_configuration,
auto_regex_test_v0_configuration,
field_match_test_v0_configuration,
+ json_multi_field_match_v0_configuration,
auto_webhook_test_v0_configuration,
auto_custom_code_run_v0_configuration,
auto_ai_critique_v0_configuration,
@@ -78,6 +81,7 @@
auto_exact_match=dict(v0=auto_exact_match_v0_interface),
auto_regex_test=dict(v0=auto_regex_test_v0_interface),
field_match_test=dict(v0=field_match_test_v0_interface),
+ json_multi_field_match=dict(v0=json_multi_field_match_v0_interface),
auto_webhook_test=dict(v0=auto_webhook_test_v0_interface),
auto_custom_code_run=dict(v0=auto_custom_code_run_v0_interface),
auto_ai_critique=dict(v0=auto_ai_critique_v0_interface),
@@ -104,6 +108,7 @@
auto_exact_match=dict(v0=auto_exact_match_v0_configuration),
auto_regex_test=dict(v0=auto_regex_test_v0_configuration),
field_match_test=dict(v0=field_match_test_v0_configuration),
+ json_multi_field_match=dict(v0=json_multi_field_match_v0_configuration),
auto_webhook_test=dict(v0=auto_webhook_test_v0_configuration),
auto_custom_code_run=dict(v0=auto_custom_code_run_v0_configuration),
auto_ai_critique=dict(v0=auto_ai_critique_v0_configuration),
@@ -160,6 +165,7 @@
auto_exact_match=dict(v0=auto_exact_match_v0),
auto_regex_test=dict(v0=auto_regex_test_v0),
field_match_test=dict(v0=field_match_test_v0),
+ json_multi_field_match=dict(v0=json_multi_field_match_v0),
auto_webhook_test=dict(v0=auto_webhook_test_v0),
auto_custom_code_run=dict(v0=auto_custom_code_run_v0),
auto_ai_critique=dict(v0=auto_ai_critique_v0),
diff --git a/sdk/poetry.lock b/sdk/poetry.lock
index 4317e7389c..069e7bed54 100644
--- a/sdk/poetry.lock
+++ b/sdk/poetry.lock
@@ -26,132 +26,132 @@ files = [
[[package]]
name = "aiohttp"
-version = "3.13.2"
+version = "3.13.3"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"},
- {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"},
- {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"},
- {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"},
- {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"},
- {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"},
- {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"},
- {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"},
- {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"},
- {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"},
- {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"},
- {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"},
- {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"},
- {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"},
- {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"},
- {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"},
- {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"},
- {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"},
- {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"},
- {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"},
- {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"},
- {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"},
- {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"},
- {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"},
- {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"},
- {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"},
- {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"},
- {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"},
- {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"},
- {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"},
- {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"},
- {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"},
- {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"},
- {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"},
- {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"},
- {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821"},
+ {file = "aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455"},
+ {file = "aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29"},
+ {file = "aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11"},
+ {file = "aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd"},
+ {file = "aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64"},
+ {file = "aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1"},
+ {file = "aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4"},
+ {file = "aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29"},
+ {file = "aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239"},
+ {file = "aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168"},
+ {file = "aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc"},
+ {file = "aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce"},
+ {file = "aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a"},
+ {file = "aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046"},
+ {file = "aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9"},
+ {file = "aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0"},
+ {file = "aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0"},
+ {file = "aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591"},
+ {file = "aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf"},
+ {file = "aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415"},
+ {file = "aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1"},
+ {file = "aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c"},
+ {file = "aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43"},
+ {file = "aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1"},
+ {file = "aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767"},
+ {file = "aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7"},
+ {file = "aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a"},
+ {file = "aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704"},
+ {file = "aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f"},
+ {file = "aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1"},
+ {file = "aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538"},
+ {file = "aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88"},
]
[package.dependencies]
@@ -164,7 +164,7 @@ propcache = ">=0.2.0"
yarl = ">=1.17.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi"]
+speedups = ["Brotli (>=1.2)", "aiodns (>=3.3.0)", "backports.zstd", "brotlicffi (>=1.2)"]
[[package]]
name = "aiohttp-retry"
@@ -223,14 +223,14 @@ files = [
[[package]]
name = "anyio"
-version = "4.12.0"
+version = "4.12.1"
description = "High-level concurrency and networking framework on top of asyncio or Trio"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb"},
- {file = "anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0"},
+ {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"},
+ {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"},
]
[package.dependencies]
@@ -266,18 +266,18 @@ files = [
[[package]]
name = "boto3"
-version = "1.42.16"
+version = "1.42.23"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "boto3-1.42.16-py3-none-any.whl", hash = "sha256:37a43d42aebd06a8f93ee801ea1b7b5181ac42a30869ef403c9dadc160a748e5"},
- {file = "boto3-1.42.16.tar.gz", hash = "sha256:811391611db88c8a061f6e6fabbd7ca784ad9de04490a879f091cbaa9de7de74"},
+ {file = "boto3-1.42.23-py3-none-any.whl", hash = "sha256:2ed797bdb394b08550f6269babf0a31bbeb853684bb2cb67116620df0ed632dc"},
+ {file = "boto3-1.42.23.tar.gz", hash = "sha256:f681a8d43b46b3d8acf0be4f3894eb85e40e75945431d0dfe0542edda7025512"},
]
[package.dependencies]
-botocore = ">=1.42.16,<1.43.0"
+botocore = ">=1.42.23,<1.43.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.16.0,<0.17.0"
@@ -286,14 +286,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
-version = "1.42.16"
+version = "1.42.23"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "botocore-1.42.16-py3-none-any.whl", hash = "sha256:b1f584a0f8645c12e07bf6ec9c18e05221a789f2a9b2d3c6291deb42f8c1c542"},
- {file = "botocore-1.42.16.tar.gz", hash = "sha256:29ee8555cd5d5023350405387cedcf3fe1c7f02fcb8060bf9e01602487482c25"},
+ {file = "botocore-1.42.23-py3-none-any.whl", hash = "sha256:d5042e0252b81f25ca1152fff9ed25463bab2438fbc4530ba53d5390d00ca1b1"},
+ {file = "botocore-1.42.23.tar.gz", hash = "sha256:453ce449bd1021acd67e75c814aae1b132b1ab3ee0ecff248de863bf19e58be8"},
]
[package.dependencies]
@@ -306,14 +306,14 @@ crt = ["awscrt (==0.29.2)"]
[[package]]
name = "certifi"
-version = "2025.11.12"
+version = "2026.1.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
files = [
- {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"},
- {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
+ {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"},
+ {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"},
]
[[package]]
@@ -638,14 +638,14 @@ testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastapi"
-version = "0.127.0"
+version = "0.128.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"},
- {file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"},
+ {file = "fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d"},
+ {file = "fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a"},
]
[package.dependencies]
@@ -749,14 +749,14 @@ files = [
[[package]]
name = "filelock"
-version = "3.20.1"
+version = "3.20.2"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a"},
- {file = "filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c"},
+ {file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"},
+ {file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"},
]
[[package]]
@@ -1201,14 +1201,14 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "huggingface-hub"
-version = "1.2.3"
+version = "1.2.4"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.9.0"
groups = ["main"]
files = [
- {file = "huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642"},
- {file = "huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7"},
+ {file = "huggingface_hub-1.2.4-py3-none-any.whl", hash = "sha256:2db69b91877d9d34825f5cd2a63b94f259011a77dcf761b437bf510fbe9522e9"},
+ {file = "huggingface_hub-1.2.4.tar.gz", hash = "sha256:7a1d9ec4802e64372d1d152d69fb8e26d943f15a2289096fbc8e09e7b90c21a5"},
]
[package.dependencies]
@@ -1221,13 +1221,13 @@ pyyaml = ">=5.1"
shellingham = "*"
tqdm = ">=4.42.1"
typer-slim = "*"
-typing-extensions = ">=3.7.4.3"
+typing-extensions = ">=4.1.0"
[package.extras]
all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-hf-xet = ["hf-xet (>=1.1.3,<2.0.0)"]
+hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"]
mcp = ["mcp (>=1.8.0)"]
oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"]
quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"]
@@ -1467,89 +1467,89 @@ referencing = ">=0.31.0"
[[package]]
name = "librt"
-version = "0.7.5"
+version = "0.7.7"
description = "Mypyc runtime library"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
markers = "platform_python_implementation != \"PyPy\""
files = [
- {file = "librt-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81056e01bba1394f1d92904ec61a4078f66df785316275edbaf51d90da8c6e26"},
- {file = "librt-0.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d7c72c8756eeb3aefb1b9e3dac7c37a4a25db63640cac0ab6fc18e91a0edf05a"},
- {file = "librt-0.7.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ddc4a16207f88f9597b397fc1f60781266d13b13de922ff61c206547a29e4bbd"},
- {file = "librt-0.7.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63055d3dda433ebb314c9f1819942f16a19203c454508fdb2d167613f7017169"},
- {file = "librt-0.7.5-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f85f9b5db87b0f52e53c68ad2a0c5a53e00afa439bd54a1723742a2b1021276"},
- {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c566a4672564c5d54d8ab65cdaae5a87ee14c1564c1a2ddc7a9f5811c750f023"},
- {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fee15c2a190ef389f14928135c6fb2d25cd3fdb7887bfd9a7b444bbdc8c06b96"},
- {file = "librt-0.7.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:584cb3e605ec45ba350962cec853e17be0a25a772f21f09f1e422f7044ae2a7d"},
- {file = "librt-0.7.5-cp310-cp310-win32.whl", hash = "sha256:9c08527055fbb03c641c15bbc5b79dd2942fb6a3bd8dabf141dd7e97eeea4904"},
- {file = "librt-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:dd810f2d39c526c42ea205e0addad5dc08ef853c625387806a29d07f9d150d9b"},
- {file = "librt-0.7.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f952e1a78c480edee8fb43aa2bf2e84dcd46c917d44f8065b883079d3893e8fc"},
- {file = "librt-0.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75965c1f4efb7234ff52a58b729d245a21e87e4b6a26a0ec08052f02b16274e4"},
- {file = "librt-0.7.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:732e0aa0385b59a1b2545159e781c792cc58ce9c134249233a7c7250a44684c4"},
- {file = "librt-0.7.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cdde31759bd8888f3ef0eebda80394a48961328a17c264dce8cc35f4b9cde35d"},
- {file = "librt-0.7.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:df3146d52465b3b6397d25d513f428cb421c18df65b7378667bb5f1e3cc45805"},
- {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29c8d2fae11d4379ea207ba7fc69d43237e42cf8a9f90ec6e05993687e6d648b"},
- {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb41f04046b4f22b1e7ba5ef513402cd2e3477ec610e5f92d38fe2bba383d419"},
- {file = "librt-0.7.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8bb7883c1e94ceb87c2bf81385266f032da09cd040e804cc002f2c9d6b842e2f"},
- {file = "librt-0.7.5-cp311-cp311-win32.whl", hash = "sha256:84d4a6b9efd6124f728558a18e79e7cc5c5d4efc09b2b846c910de7e564f5bad"},
- {file = "librt-0.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:ab4b0d3bee6f6ff7017e18e576ac7e41a06697d8dea4b8f3ab9e0c8e1300c409"},
- {file = "librt-0.7.5-cp311-cp311-win_arm64.whl", hash = "sha256:730be847daad773a3c898943cf67fb9845a3961d06fb79672ceb0a8cd8624cfa"},
- {file = "librt-0.7.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ba1077c562a046208a2dc6366227b3eeae8f2c2ab4b41eaf4fd2fa28cece4203"},
- {file = "librt-0.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:654fdc971c76348a73af5240d8e2529265b9a7ba6321e38dd5bae7b0d4ab3abe"},
- {file = "librt-0.7.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6b7b58913d475911f6f33e8082f19dd9b120c4f4a5c911d07e395d67b81c6982"},
- {file = "librt-0.7.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e0fd344bad57026a8f4ccfaf406486c2fc991838050c2fef156170edc3b775"},
- {file = "librt-0.7.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46aa91813c267c3f60db75d56419b42c0c0b9748ec2c568a0e3588e543fb4233"},
- {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ddc0ab9dbc5f9ceaf2bf7a367bf01f2697660e908f6534800e88f43590b271db"},
- {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7a488908a470451338607650f1c064175094aedebf4a4fa37890682e30ce0b57"},
- {file = "librt-0.7.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e47fc52602ffc374e69bf1b76536dc99f7f6dd876bd786c8213eaa3598be030a"},
- {file = "librt-0.7.5-cp312-cp312-win32.whl", hash = "sha256:cda8b025875946ffff5a9a7590bf9acde3eb02cb6200f06a2d3e691ef3d9955b"},
- {file = "librt-0.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:b591c094afd0ffda820e931148c9e48dc31a556dc5b2b9b3cc552fa710d858e4"},
- {file = "librt-0.7.5-cp312-cp312-win_arm64.whl", hash = "sha256:532ddc6a8a6ca341b1cd7f4d999043e4c71a212b26fe9fd2e7f1e8bb4e873544"},
- {file = "librt-0.7.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b1795c4b2789b458fa290059062c2f5a297ddb28c31e704d27e161386469691a"},
- {file = "librt-0.7.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2fcbf2e135c11f721193aa5f42ba112bb1046afafbffd407cbc81d8d735c74d0"},
- {file = "librt-0.7.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c039bbf79a9a2498404d1ae7e29a6c175e63678d7a54013a97397c40aee026c5"},
- {file = "librt-0.7.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3919c9407faeeee35430ae135e3a78acd4ecaaaa73767529e2c15ca1d73ba325"},
- {file = "librt-0.7.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26b46620e1e0e45af510d9848ea0915e7040605dd2ae94ebefb6c962cbb6f7ec"},
- {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9bbb8facc5375476d392990dd6a71f97e4cb42e2ac66f32e860f6e47299d5e89"},
- {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e9e9c988b5ffde7be02180f864cbd17c0b0c1231c235748912ab2afa05789c25"},
- {file = "librt-0.7.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:edf6b465306215b19dbe6c3fb63cf374a8f3e1ad77f3b4c16544b83033bbb67b"},
- {file = "librt-0.7.5-cp313-cp313-win32.whl", hash = "sha256:060bde69c3604f694bd8ae21a780fe8be46bb3dbb863642e8dfc75c931ca8eee"},
- {file = "librt-0.7.5-cp313-cp313-win_amd64.whl", hash = "sha256:a82d5a0ee43aeae2116d7292c77cc8038f4841830ade8aa922e098933b468b9e"},
- {file = "librt-0.7.5-cp313-cp313-win_arm64.whl", hash = "sha256:3c98a8d0ac9e2a7cb8ff8c53e5d6e8d82bfb2839abf144fdeaaa832f2a12aa45"},
- {file = "librt-0.7.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:9937574e6d842f359b8585903d04f5b4ab62277a091a93e02058158074dc52f2"},
- {file = "librt-0.7.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5cd3afd71e9bc146203b6c8141921e738364158d4aa7cdb9a874e2505163770f"},
- {file = "librt-0.7.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9cffa3ef0af29687455161cb446eff059bf27607f95163d6a37e27bcb37180f6"},
- {file = "librt-0.7.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:82f3f088482e2229387eadf8215c03f7726d56f69cce8c0c40f0795aebc9b361"},
- {file = "librt-0.7.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7aa33153a5bb0bac783d2c57885889b1162823384e8313d47800a0e10d0070e"},
- {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:265729b551a2dd329cc47b323a182fb7961af42abf21e913c9dd7d3331b2f3c2"},
- {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:168e04663e126416ba712114050f413ac306759a1791d87b7c11d4428ba75760"},
- {file = "librt-0.7.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:553dc58987d1d853adda8aeadf4db8e29749f0b11877afcc429a9ad892818ae2"},
- {file = "librt-0.7.5-cp314-cp314-win32.whl", hash = "sha256:263f4fae9eba277513357c871275b18d14de93fd49bf5e43dc60a97b81ad5eb8"},
- {file = "librt-0.7.5-cp314-cp314-win_amd64.whl", hash = "sha256:85f485b7471571e99fab4f44eeb327dc0e1f814ada575f3fa85e698417d8a54e"},
- {file = "librt-0.7.5-cp314-cp314-win_arm64.whl", hash = "sha256:49c596cd18e90e58b7caa4d7ca7606049c1802125fcff96b8af73fa5c3870e4d"},
- {file = "librt-0.7.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:54d2aef0b0f5056f130981ad45081b278602ff3657fe16c88529f5058038e802"},
- {file = "librt-0.7.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0b4791202296ad51ac09a3ff58eb49d9da8e3a4009167a6d76ac418a974e5fd4"},
- {file = "librt-0.7.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6e860909fea75baef941ee6436e0453612505883b9d0d87924d4fda27865b9a2"},
- {file = "librt-0.7.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f02c4337bf271c4f06637f5ff254fad2238c0b8e32a3a480ebb2fc5e26f754a5"},
- {file = "librt-0.7.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7f51ffe59f4556243d3cc82d827bde74765f594fa3ceb80ec4de0c13ccd3416"},
- {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0b7f080ba30601dfa3e3deed3160352273e1b9bc92e652f51103c3e9298f7899"},
- {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fb565b4219abc8ea2402e61c7ba648a62903831059ed3564fa1245cc245d58d7"},
- {file = "librt-0.7.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a3cfb15961e7333ea6ef033dc574af75153b5c230d5ad25fbcd55198f21e0cf"},
- {file = "librt-0.7.5-cp314-cp314t-win32.whl", hash = "sha256:118716de5ad6726332db1801bc90fa6d94194cd2e07c1a7822cebf12c496714d"},
- {file = "librt-0.7.5-cp314-cp314t-win_amd64.whl", hash = "sha256:3dd58f7ce20360c6ce0c04f7bd9081c7f9c19fc6129a3c705d0c5a35439f201d"},
- {file = "librt-0.7.5-cp314-cp314t-win_arm64.whl", hash = "sha256:08153ea537609d11f774d2bfe84af39d50d5c9ca3a4d061d946e0c9d8bce04a1"},
- {file = "librt-0.7.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:df2e210400b28e50994477ebf82f055698c79797b6ee47a1669d383ca33263e1"},
- {file = "librt-0.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d2cc7d187e8c6e9b7bdbefa9697ce897a704ea7a7ce844f2b4e0e2aa07ae51d3"},
- {file = "librt-0.7.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:39183abee670bc37b85f11e86c44a9cad1ed6efa48b580083e89ecee13dd9717"},
- {file = "librt-0.7.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:191cbd42660446d67cf7a95ac7bfa60f49b8b3b0417c64f216284a1d86fc9335"},
- {file = "librt-0.7.5-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea1b60b86595a5dc1f57b44a801a1c4d8209c0a69518391d349973a4491408e6"},
- {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:af69d9e159575e877c7546d1ee817b4ae089aa221dd1117e20c24ad8dc8659c7"},
- {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0e2bf8f91093fac43e3eaebacf777f12fd539dce9ec5af3efc6d8424e96ccd49"},
- {file = "librt-0.7.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8dcae24de1bc9da93aa689cb6313c70e776d7cea2fcf26b9b6160fedfe6bd9af"},
- {file = "librt-0.7.5-cp39-cp39-win32.whl", hash = "sha256:cdb001a1a0e4f41e613bca2c0fc147fc8a7396f53fc94201cbfd8ec7cd69ca4b"},
- {file = "librt-0.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:a9eacbf983319b26b5f340a2e0cd47ac1ee4725a7f3a72fd0f15063c934b69d6"},
- {file = "librt-0.7.5.tar.gz", hash = "sha256:de4221a1181fa9c8c4b5f35506ed6f298948f44003d84d2a8b9885d7e01e6cfa"},
+ {file = "librt-0.7.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4836c5645f40fbdc275e5670819bde5ab5f2e882290d304e3c6ddab1576a6d0"},
+ {file = "librt-0.7.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae8aec43117a645a31e5f60e9e3a0797492e747823b9bda6972d521b436b4e8"},
+ {file = "librt-0.7.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:aea05f701ccd2a76b34f0daf47ca5068176ff553510b614770c90d76ac88df06"},
+ {file = "librt-0.7.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b16ccaeff0ed4355dfb76fe1ea7a5d6d03b5ad27f295f77ee0557bc20a72495"},
+ {file = "librt-0.7.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48c7e150c095d5e3cea7452347ba26094be905d6099d24f9319a8b475fcd3e0"},
+ {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4dcee2f921a8632636d1c37f1bbdb8841d15666d119aa61e5399c5268e7ce02e"},
+ {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14ef0f4ac3728ffd85bfc58e2f2f48fb4ef4fa871876f13a73a7381d10a9f77c"},
+ {file = "librt-0.7.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e4ab69fa37f8090f2d971a5d2bc606c7401170dbdae083c393d6cbf439cb45b8"},
+ {file = "librt-0.7.7-cp310-cp310-win32.whl", hash = "sha256:4bf3cc46d553693382d2abf5f5bd493d71bb0f50a7c0beab18aa13a5545c8900"},
+ {file = "librt-0.7.7-cp310-cp310-win_amd64.whl", hash = "sha256:f0c8fe5aeadd8a0e5b0598f8a6ee3533135ca50fd3f20f130f9d72baf5c6ac58"},
+ {file = "librt-0.7.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a487b71fbf8a9edb72a8c7a456dda0184642d99cd007bc819c0b7ab93676a8ee"},
+ {file = "librt-0.7.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4d4efb218264ecf0f8516196c9e2d1a0679d9fb3bb15df1155a35220062eba8"},
+ {file = "librt-0.7.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b8bb331aad734b059c4b450cd0a225652f16889e286b2345af5e2c3c625c3d85"},
+ {file = "librt-0.7.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:467dbd7443bda08338fc8ad701ed38cef48194017554f4c798b0a237904b3f99"},
+ {file = "librt-0.7.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50d1d1ee813d2d1a3baf2873634ba506b263032418d16287c92ec1cc9c1a00cb"},
+ {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7e5070cf3ec92d98f57574da0224f8c73faf1ddd6d8afa0b8c9f6e86997bc74"},
+ {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bdb9f3d865b2dafe7f9ad7f30ef563c80d0ddd2fdc8cc9b8e4f242f475e34d75"},
+ {file = "librt-0.7.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8185c8497d45164e256376f9da5aed2bb26ff636c798c9dabe313b90e9f25b28"},
+ {file = "librt-0.7.7-cp311-cp311-win32.whl", hash = "sha256:44d63ce643f34a903f09ff7ca355aae019a3730c7afd6a3c037d569beeb5d151"},
+ {file = "librt-0.7.7-cp311-cp311-win_amd64.whl", hash = "sha256:7d13cc340b3b82134f8038a2bfe7137093693dcad8ba5773da18f95ad6b77a8a"},
+ {file = "librt-0.7.7-cp311-cp311-win_arm64.whl", hash = "sha256:983de36b5a83fe9222f4f7dcd071f9b1ac6f3f17c0af0238dadfb8229588f890"},
+ {file = "librt-0.7.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a85a1fc4ed11ea0eb0a632459ce004a2d14afc085a50ae3463cd3dfe1ce43fc"},
+ {file = "librt-0.7.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87654e29a35938baead1c4559858f346f4a2a7588574a14d784f300ffba0efd"},
+ {file = "librt-0.7.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c9faaebb1c6212c20afd8043cd6ed9de0a47d77f91a6b5b48f4e46ed470703fe"},
+ {file = "librt-0.7.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1908c3e5a5ef86b23391448b47759298f87f997c3bd153a770828f58c2bb4630"},
+ {file = "librt-0.7.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbc4900e95a98fc0729523be9d93a8fedebb026f32ed9ffc08acd82e3e181503"},
+ {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7ea4e1fbd253e5c68ea0fe63d08577f9d288a73f17d82f652ebc61fa48d878d"},
+ {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ef7699b7a5a244b1119f85c5bbc13f152cd38240cbb2baa19b769433bae98e50"},
+ {file = "librt-0.7.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:955c62571de0b181d9e9e0a0303c8bc90d47670a5eff54cf71bf5da61d1899cf"},
+ {file = "librt-0.7.7-cp312-cp312-win32.whl", hash = "sha256:1bcd79be209313b270b0e1a51c67ae1af28adad0e0c7e84c3ad4b5cb57aaa75b"},
+ {file = "librt-0.7.7-cp312-cp312-win_amd64.whl", hash = "sha256:4353ee891a1834567e0302d4bd5e60f531912179578c36f3d0430f8c5e16b456"},
+ {file = "librt-0.7.7-cp312-cp312-win_arm64.whl", hash = "sha256:a76f1d679beccccdf8c1958e732a1dfcd6e749f8821ee59d7bec009ac308c029"},
+ {file = "librt-0.7.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4a0b0a3c86ba9193a8e23bb18f100d647bf192390ae195d84dfa0a10fb6244"},
+ {file = "librt-0.7.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5335890fea9f9e6c4fdf8683061b9ccdcbe47c6dc03ab8e9b68c10acf78be78d"},
+ {file = "librt-0.7.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b4346b1225be26def3ccc6c965751c74868f0578cbcba293c8ae9168483d811"},
+ {file = "librt-0.7.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a10b8eebdaca6e9fdbaf88b5aefc0e324b763a5f40b1266532590d5afb268a4c"},
+ {file = "librt-0.7.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:067be973d90d9e319e6eb4ee2a9b9307f0ecd648b8a9002fa237289a4a07a9e7"},
+ {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:23d2299ed007812cccc1ecef018db7d922733382561230de1f3954db28433977"},
+ {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6b6f8ea465524aa4c7420c7cc4ca7d46fe00981de8debc67b1cc2e9957bb5b9d"},
+ {file = "librt-0.7.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8df32a99cc46eb0ee90afd9ada113ae2cafe7e8d673686cf03ec53e49635439"},
+ {file = "librt-0.7.7-cp313-cp313-win32.whl", hash = "sha256:86f86b3b785487c7760247bcdac0b11aa8bf13245a13ed05206286135877564b"},
+ {file = "librt-0.7.7-cp313-cp313-win_amd64.whl", hash = "sha256:4862cb2c702b1f905c0503b72d9d4daf65a7fdf5a9e84560e563471e57a56949"},
+ {file = "librt-0.7.7-cp313-cp313-win_arm64.whl", hash = "sha256:0996c83b1cb43c00e8c87835a284f9057bc647abd42b5871e5f941d30010c832"},
+ {file = "librt-0.7.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:23daa1ab0512bafdd677eb1bfc9611d8ffbe2e328895671e64cb34166bc1b8c8"},
+ {file = "librt-0.7.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:558a9e5a6f3cc1e20b3168fb1dc802d0d8fa40731f6e9932dcc52bbcfbd37111"},
+ {file = "librt-0.7.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2567cb48dc03e5b246927ab35cbb343376e24501260a9b5e30b8e255dca0d1d2"},
+ {file = "librt-0.7.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6066c638cdf85ff92fc6f932d2d73c93a0e03492cdfa8778e6d58c489a3d7259"},
+ {file = "librt-0.7.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a609849aca463074c17de9cda173c276eb8fee9e441053529e7b9e249dc8b8ee"},
+ {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:add4e0a000858fe9bb39ed55f31085506a5c38363e6eb4a1e5943a10c2bfc3d1"},
+ {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a3bfe73a32bd0bdb9a87d586b05a23c0a1729205d79df66dee65bb2e40d671ba"},
+ {file = "librt-0.7.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0ecce0544d3db91a40f8b57ae26928c02130a997b540f908cefd4d279d6c5848"},
+ {file = "librt-0.7.7-cp314-cp314-win32.whl", hash = "sha256:8f7a74cf3a80f0c3b0ec75b0c650b2f0a894a2cec57ef75f6f72c1e82cdac61d"},
+ {file = "librt-0.7.7-cp314-cp314-win_amd64.whl", hash = "sha256:3d1fe2e8df3268dd6734dba33ededae72ad5c3a859b9577bc00b715759c5aaab"},
+ {file = "librt-0.7.7-cp314-cp314-win_arm64.whl", hash = "sha256:2987cf827011907d3dfd109f1be0d61e173d68b1270107bb0e89f2fca7f2ed6b"},
+ {file = "librt-0.7.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e92c8de62b40bfce91d5e12c6e8b15434da268979b1af1a6589463549d491e6"},
+ {file = "librt-0.7.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f683dcd49e2494a7535e30f779aa1ad6e3732a019d80abe1309ea91ccd3230e3"},
+ {file = "librt-0.7.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b15e5d17812d4d629ff576699954f74e2cc24a02a4fc401882dd94f81daba45"},
+ {file = "librt-0.7.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c084841b879c4d9b9fa34e5d5263994f21aea7fd9c6add29194dbb41a6210536"},
+ {file = "librt-0.7.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c8fb9966f84737115513fecbaf257f9553d067a7dd45a69c2c7e5339e6a8dc"},
+ {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9b5fb1ecb2c35362eab2dbd354fd1efa5a8440d3e73a68be11921042a0edc0ff"},
+ {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:d1454899909d63cc9199a89fcc4f81bdd9004aef577d4ffc022e600c412d57f3"},
+ {file = "librt-0.7.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7ef28f2e7a016b29792fe0a2dd04dec75725b32a1264e390c366103f834a9c3a"},
+ {file = "librt-0.7.7-cp314-cp314t-win32.whl", hash = "sha256:5e419e0db70991b6ba037b70c1d5bbe92b20ddf82f31ad01d77a347ed9781398"},
+ {file = "librt-0.7.7-cp314-cp314t-win_amd64.whl", hash = "sha256:d6b7d93657332c817b8d674ef6bf1ab7796b4f7ce05e420fd45bd258a72ac804"},
+ {file = "librt-0.7.7-cp314-cp314t-win_arm64.whl", hash = "sha256:142c2cd91794b79fd0ce113bd658993b7ede0fe93057668c2f98a45ca00b7e91"},
+ {file = "librt-0.7.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c8ffe3431d98cc043a14e88b21288b5ec7ee12cb01260e94385887f285ef9389"},
+ {file = "librt-0.7.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e40d20ae1722d6b8ea6acf4597e789604649dcd9c295eb7361a28225bc2e9e12"},
+ {file = "librt-0.7.7-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f2cb63c49bc96847c3bb8dca350970e4dcd19936f391cfdfd057dcb37c4fa97e"},
+ {file = "librt-0.7.7-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f2f8dcf5ab9f80fb970c6fd780b398efb2f50c1962485eb8d3ab07788595a48"},
+ {file = "librt-0.7.7-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a1f5cc41a570269d1be7a676655875e3a53de4992a9fa38efb7983e97cf73d7c"},
+ {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ff1fb2dfef035549565a4124998fadcb7a3d4957131ddf004a56edeb029626b3"},
+ {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ab2a2a9cd7d044e1a11ca64a86ad3361d318176924bbe5152fbc69f99be20b8c"},
+ {file = "librt-0.7.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad3fc2d859a709baf9dd9607bb72f599b1cfb8a39eafd41307d0c3c4766763cb"},
+ {file = "librt-0.7.7-cp39-cp39-win32.whl", hash = "sha256:f83c971eb9d2358b6a18da51dc0ae00556ac7c73104dde16e9e14c15aaf685ca"},
+ {file = "librt-0.7.7-cp39-cp39-win_amd64.whl", hash = "sha256:264720fc288c86039c091a4ad63419a5d7cabbf1c1c9933336a957ed2483e570"},
+ {file = "librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c"},
]
[[package]]
@@ -1691,19 +1691,19 @@ files = [
[[package]]
name = "marshmallow"
-version = "4.1.2"
+version = "4.2.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "marshmallow-4.1.2-py3-none-any.whl", hash = "sha256:a8cfa18bd8d0e5f7339e734edf84815fe8db1bdb57358c7ccc05472b746eeadc"},
- {file = "marshmallow-4.1.2.tar.gz", hash = "sha256:083f250643d2e75fd363f256aeb6b1af369a7513ad37647ce4a601f6966e3ba5"},
+ {file = "marshmallow-4.2.0-py3-none-any.whl", hash = "sha256:1dc369bd13a8708a9566d6f73d1db07d50142a7580f04fd81e1c29a4d2e10af4"},
+ {file = "marshmallow-4.2.0.tar.gz", hash = "sha256:908acabd5aa14741419d3678d3296bda6abe28a167b7dcd05969ceb8256943ac"},
]
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
-docs = ["autodocsumm (==0.2.14)", "furo (==2025.9.25)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"]
+docs = ["autodocsumm (==0.2.14)", "furo (==2025.12.19)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.13.0)"]
tests = ["pytest", "simplejson"]
[[package]]
@@ -2314,16 +2314,22 @@ files = [
[[package]]
name = "pathspec"
-version = "0.12.1"
+version = "1.0.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
- {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
+ {file = "pathspec-1.0.1-py3-none-any.whl", hash = "sha256:8870061f22c58e6d83463cfce9a7dd6eca0512c772c1001fb09ac64091816721"},
+ {file = "pathspec-1.0.1.tar.gz", hash = "sha256:e2769b508d0dd47b09af6ee2c75b2744a2cb1f474ae4b1494fd6a1b7a841613c"},
]
+[package.extras]
+hyperscan = ["hyperscan (>=0.7)"]
+optional = ["typing-extensions (>=4)"]
+re2 = ["google-re2 (>=1.1)"]
+tests = ["pytest (>=9)", "typing-extensions (>=4.15)"]
+
[[package]]
name = "pexpect"
version = "4.9.0"
@@ -2357,14 +2363,14 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
[[package]]
name = "posthog"
-version = "7.4.2"
+version = "7.4.3"
description = "Integrate PostHog into any python application."
optional = false
python-versions = ">=3.10"
groups = ["dev"]
files = [
- {file = "posthog-7.4.2-py3-none-any.whl", hash = "sha256:36954f06f4adede905d97faeb24926a705a4d86f4a308506b15b41b661ef064c"},
- {file = "posthog-7.4.2.tar.gz", hash = "sha256:5953f31a21c5e2485ac57eb5d600a231a70118f884f438c0e8b493c30373c409"},
+ {file = "posthog-7.4.3-py3-none-any.whl", hash = "sha256:ae068f8954ee7a56d10ce35261580f1b8d99c6a2b6e878964eeacea1ec906b4a"},
+ {file = "posthog-7.4.3.tar.gz", hash = "sha256:02484a32c8bf44ab489dcef270ada46e5ce324021258c322f0d1b567c2d6f174"},
]
[package.dependencies]
@@ -2825,14 +2831,14 @@ cli = ["click (>=5.0)"]
[[package]]
name = "python-jsonpath"
-version = "2.0.1"
+version = "2.0.2"
description = "JSONPath, JSON Pointer and JSON Patch for Python."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "python_jsonpath-2.0.1-py3-none-any.whl", hash = "sha256:ebd518b7c883acc5b976518d76b6c96288405edec7d9ef838641869c1e1a5eb7"},
- {file = "python_jsonpath-2.0.1.tar.gz", hash = "sha256:32a84ebb2dc0ec1b42a6e165b0f9174aef8310bad29154ad9aee31ac37cca18f"},
+ {file = "python_jsonpath-2.0.2-py3-none-any.whl", hash = "sha256:3f8ab612f815ce10c03bf0deaede87235f3381b109a60b4a22744069953627e3"},
+ {file = "python_jsonpath-2.0.2.tar.gz", hash = "sha256:41abb6660b3ee54d5ae77e4b0e901049fb1662ad90de241f038df47edc75ee60"},
]
[package.extras]
@@ -3407,27 +3413,36 @@ blobfile = ["blobfile (>=2)"]
[[package]]
name = "tokenizers"
-version = "0.22.1"
+version = "0.22.2"
description = ""
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73"},
- {file = "tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f"},
- {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a"},
- {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390"},
- {file = "tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82"},
- {file = "tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138"},
- {file = "tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9"},
+ {file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"},
+ {file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b"},
+ {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a"},
+ {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48"},
+ {file = "tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195"},
+ {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319f659ee992222f04e58f84cbf407cfa66a65fe3a8de44e8ad2bc53e7d99012"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e50f8554d504f617d9e9d6e4c2c2884a12b388a97c5c77f0bc6cf4cd032feee"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a62ba2c5faa2dd175aaeed7b15abf18d20266189fb3406c5d0550dd34dd5f37"},
+ {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143b999bdc46d10febb15cbffb4207ddd1f410e2c755857b5a0797961bbdc113"},
+ {file = "tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917"},
]
[package.dependencies]
@@ -3436,7 +3451,7 @@ huggingface-hub = ">=0.16.4,<2.0"
[package.extras]
dev = ["tokenizers[testing]"]
docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
-testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"]
+testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", "ty"]
[[package]]
name = "toml"
@@ -3474,14 +3489,14 @@ telegram = ["requests"]
[[package]]
name = "typer-slim"
-version = "0.21.0"
+version = "0.21.1"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e"},
- {file = "typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557"},
+ {file = "typer_slim-0.21.1-py3-none-any.whl", hash = "sha256:6e6c31047f171ac93cc5a973c9e617dbc5ab2bddc4d0a3135dc161b4e2020e0d"},
+ {file = "typer_slim-0.21.1.tar.gz", hash = "sha256:73495dd08c2d0940d611c5a8c04e91c2a0a98600cbd4ee19192255a233b6dbfd"},
]
[package.dependencies]
diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml
index 8577876238..d6e1c588e3 100644
--- a/sdk/pyproject.toml
+++ b/sdk/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.72.1"
+version = "0.76.0"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = [
diff --git a/web/ee/package.json b/web/ee/package.json
index c9f4581604..d5b52fc6c6 100644
--- a/web/ee/package.json
+++ b/web/ee/package.json
@@ -1,6 +1,6 @@
{
"name": "@agenta/ee",
- "version": "0.72.1",
+ "version": "0.76.0",
"private": true,
"engines": {
"node": ">=18"
@@ -26,8 +26,8 @@
"@lexical/code-shiki": "^0.38.2",
"@monaco-editor/react": "^4.7.0-rc.0",
"@phosphor-icons/react": "^2.1.10",
- "@tanstack/query-core": "^5.90.12",
- "@tanstack/react-query": "^5.90.12",
+ "@tanstack/query-core": "^5.90.16",
+ "@tanstack/react-query": "^5.90.16",
"@tremor/react": "^3.18.7",
"@types/js-yaml": "^4.0.9",
"@types/lodash": "^4.17.18",
@@ -37,7 +37,7 @@
"@types/react-window": "^1.8.8",
"@types/recharts": "^2.0.1",
"@types/uuid": "^10.0.0",
- "antd": "^6.1.0",
+ "antd": "^6.1.3",
"autoprefixer": "10.4.20",
"axios": "^1.12.2",
"classnames": "^2.3.2",
@@ -47,7 +47,7 @@
"dotenv": "^16.5.0",
"fast-deep-equal": "^3.1.3",
"immer": "^10.1.1",
- "jotai": "^2.16.0",
+ "jotai": "^2.16.1",
"jotai-eager": "^0.2.3",
"jotai-immer": "^0.4.1",
"jotai-scheduler": "^0.0.5",
diff --git a/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx
index 5de2069511..12eabd7732 100644
--- a/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx
+++ b/web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/components/modals/LoadEvaluatorPreset/components/LoadEvaluatorPresetContent.tsx
@@ -17,7 +17,7 @@ const LoadEvaluatorPresetContent = ({
const [searchTerm, setSearchTerm] = useState("")
const [format, setFormat] = useState<"yaml" | "json">("yaml")
- const filteredTestset = !searchTerm
+ const filteredPresets = !searchTerm
? settingsPresets
: settingsPresets.filter((preset: SettingsPreset) =>
preset.name.toLowerCase().includes(searchTerm.toLowerCase()),
@@ -45,7 +45,7 @@ const LoadEvaluatorPresetContent = ({