From bf46059b7c2a9ce435c7a431e45dc6035b3de3fc Mon Sep 17 00:00:00 2001
From: GitHub CI
Date: Tue, 14 Oct 2025 17:05:24 +0000
Subject: [PATCH 1/2] release/v0.58.0
---
.gitignore | 7 -
.gitleaks.toml | 32 +
.gitleaksignore | 196 ++
.pre-commit-config.yaml | 8 +-
CONTRIBUTING.md | 7 +-
LICENSE | 14 +-
README.md | 2 +-
SECURITY.md | 77 +-
api/ee/LICENSE | 37 +
.../credentials.json => api/ee/__init__.py | 0
api/ee/databases/__init__.py | 0
api/ee/databases/postgres/init-db-ee.sql | 39 +
.../databases/postgres/migrations/__init__.py | 0
.../postgres/migrations/core/README.md | 35 +
.../postgres/migrations/core/alembic.ini | 112 +
.../core/data_migrations/api_keys.py | 282 +++
.../core/data_migrations/applications.py | 124 +
.../migrations/core/data_migrations/demos.py | 576 +++++
.../core/data_migrations/evaluators.py | 195 ++
.../core/data_migrations/export_records.py | 175 ++
.../core/data_migrations/invitations.py | 192 ++
.../core/data_migrations/projects.py | 501 ++++
.../core/data_migrations/testsets.py | 191 ++
.../core/data_migrations/workspaces.py | 255 ++
.../databases/postgres/migrations/core/env.py | 126 +
.../postgres/migrations/core/script.py.mako | 26 +
...910d2fa9a4_migrate_old_testsets_to_new_.py | 32 +
...37ee784d_migrate_old_evaluators_to_new_.py | 32 +
.../postgres/migrations/core/utils.py | 196 ++
.../0698355c7641_add_tables_for_testsets.py | 388 +++
.../0698355c7642_add_table_for_testcases.py | 112 +
.../versions/0f086ebc2f83_extend_app_type.py | 58 +
.../core/versions/12f477990f1e_add_meters.py | 54 +
...et_user_id_column_in_db_entities_to_be_.py | 69 +
...n_to_add_default_project_and_membership.py | 40 +
...dded_the_app_type_column_to_the_app_db_.py | 59 +
...36752f9_update_secrets_data_schema_type.py | 64 +
.../30dcf07de96a_add_tables_for_queries.py | 403 ++++
...0c7_set_columns_in_api_key_table_to_be_.py | 61 +
.../3b5f5652f611_populate_runs_references.py | 77 +
.../425c68e8de6c_add_secrets_dbe_model.py | 53 +
...98_add_default_project_to_scoped_model_.py | 42 +
...54e81e9eed88_add_tables_for_evaluations.py | 514 ++++
.../5a71b3f140ab_fix_all_preview_schemas.py | 426 ++++
...4688d_add_commit_message_column_to_app_.py | 39 +
.../6965776e6940_add_subscriptions.py | 40 +
...r_organizations_to_organization_members.py | 63 +
.../versions/73a2d8cfaa3c_add_is_demo_flag.py | 30 +
.../versions/73a2d8cfaa3d_add_initial_demo.py | 36 +
...68410ab0_transfer_user_organization_to_.py | 35 +
.../7990f1e12f47_create_free_plans.py | 360 +++
...37a1_transfer_workspace_invitations_to_.py | 37 +
...add_hidden_column_to_app_variants_table.py | 35 +
.../8089ee7692d1_cleanup_preview_entities.py | 168 ++
.../versions/847972cfa14a_add_nodes_dbe.py | 121 +
.../8accbbea1d21_initial_migration.py | 1000 ++++++++
.../versions/91d3b4a8c27f_fix_ag_config.py | 61 +
.../9698355c7649_add_tables_for_workflows.py | 388 +++
.../9698355c7650_rename_metadata_to_meta.py | 51 +
...740b88_create_project_invitations_table.py | 60 +
...migrate_config_parameters_jsonb_to_json.py | 132 +
..._update_evaluators_names_with_app_name_.py | 35 +
...f15a7140ab_add_version_to_eval_entities.py | 107 +
...4_remove_app_id_from_evaluators_configs.py | 38 +
..._scope_project_id_to_db_models_entities.py | 348 +++
.../d5d4d6bf738f_add_evaluation_queues.py | 116 +
...reated_project_members_table_and_added_.py | 68 +
...b_add_modified_by_id_column_to_apps_db_.py | 31 +
.../fa07e07350bf_add_timestamp_to_metrics.py | 34 +
.../fd77265d65dc_fix_preview_entities.py | 232 ++
.../postgres/migrations/find_head.py | 48 +
.../databases/postgres/migrations/runner.py | 21 +
.../migrations/tracing/README copy.md | 35 +
.../postgres/migrations/tracing/__init__.py | 0
.../postgres/migrations/tracing/alembic.ini | 114 +
.../postgres/migrations/tracing/env.py | 100 +
.../migrations/tracing/script.py.mako | 26 +
.../postgres/migrations/tracing/utils.py | 188 ++
.../versions/58b1b61e5d6c_add_spans.py | 202 ++
.../versions/847972cfa14a_add_nodes.py | 121 +
.../versions/fd77265d65dc_fix_spans.py | 202 ++
api/ee/databases/postgres/migrations/utils.py | 313 +++
api/ee/docker/Dockerfile.dev | 44 +
api/ee/docker/Dockerfile.gh | 44 +
api/ee/src/__init__.py | 0
api/ee/src/apis/__init__.py | 0
api/ee/src/apis/fastapi/__init__.py | 0
api/ee/src/apis/fastapi/billing/__init__.py | 0
api/ee/src/apis/fastapi/billing/models.py | 0
api/ee/src/apis/fastapi/billing/router.py | 980 ++++++++
api/ee/src/core/__init__.py | 0
api/ee/src/core/entitlements/__init__.py | 0
api/ee/src/core/entitlements/service.py | 97 +
api/ee/src/core/entitlements/types.py | 277 +++
api/ee/src/core/meters/__init__.py | 0
api/ee/src/core/meters/interfaces.py | 88 +
api/ee/src/core/meters/service.py | 173 ++
api/ee/src/core/meters/types.py | 32 +
api/ee/src/core/subscriptions/__init__.py | 0
api/ee/src/core/subscriptions/interfaces.py | 56 +
api/ee/src/core/subscriptions/service.py | 271 +++
api/ee/src/core/subscriptions/types.py | 40 +
api/ee/src/crons/meters.sh | 17 +
api/ee/src/crons/meters.txt | 2 +
api/ee/src/crons/queries.sh | 24 +
api/ee/src/crons/queries.txt | 2 +
api/ee/src/dbs/__init__.py | 0
api/ee/src/dbs/postgres/__init__.py | 0
api/ee/src/dbs/postgres/meters/__init__.py | 0
api/ee/src/dbs/postgres/meters/dao.py | 290 +++
api/ee/src/dbs/postgres/meters/dbas.py | 29 +
api/ee/src/dbs/postgres/meters/dbes.py | 29 +
api/ee/src/dbs/postgres/shared/__init__.py | 0
.../dbs/postgres/subscriptions/__init__.py | 0
api/ee/src/dbs/postgres/subscriptions/dao.py | 84 +
api/ee/src/dbs/postgres/subscriptions/dbas.py | 19 +
api/ee/src/dbs/postgres/subscriptions/dbes.py | 24 +
.../dbs/postgres/subscriptions/mappings.py | 26 +
api/ee/src/main.py | 123 +
api/ee/src/models/api/api_models.py | 72 +
api/ee/src/models/api/organization_models.py | 33 +
api/ee/src/models/api/user_models.py | 9 +
api/ee/src/models/api/workspace_models.py | 58 +
api/ee/src/models/db_models.py | 518 ++++
.../src/models/extended/deprecated_models.py | 101 +
.../extended/deprecated_transfer_models.py | 347 +++
api/ee/src/models/shared_models.py | 200 ++
api/ee/src/routers/evaluation_router.py | 519 ++++
api/ee/src/routers/human_evaluation_router.py | 460 ++++
api/ee/src/routers/organization_router.py | 239 ++
api/ee/src/routers/workspace_router.py | 173 ++
api/ee/src/services/admin_manager.py | 404 ++++
api/ee/src/services/aggregation_service.py | 135 ++
api/ee/src/services/commoners.py | 179 ++
api/ee/src/services/converters.py | 321 +++
api/ee/src/services/db_manager.py | 35 +
api/ee/src/services/db_manager_ee.py | 2129 +++++++++++++++++
api/ee/src/services/email_helper.py | 51 +
api/ee/src/services/evaluation_service.py | 502 ++++
api/ee/src/services/llm_apps_service.py | 578 +++++
api/ee/src/services/organization_service.py | 121 +
api/ee/src/services/results_service.py | 116 +
api/ee/src/services/selectors.py | 125 +
api/ee/src/services/templates/send_email.html | 7 +
api/ee/src/services/utils.py | 21 +
api/ee/src/services/workspace_manager.py | 355 +++
api/ee/src/tasks/__init__.py | 0
api/ee/src/tasks/evaluations/__init__.py | 0
api/ee/src/tasks/evaluations/batch.py | 254 ++
api/ee/src/tasks/evaluations/legacy.py | 1391 +++++++++++
api/ee/src/tasks/evaluations/live.py | 771 ++++++
api/ee/src/utils/entitlements.py | 169 ++
api/ee/src/utils/permissions.py | 304 +++
api/ee/tests/__init__.py | 0
api/ee/tests/manual/billing.http | 52 +
api/ee/tests/manual/evaluations/live.http | 131 +
api/ee/tests/manual/evaluations/sdk/client.py | 32 +
.../manual/evaluations/sdk/definitions.py | 1818 ++++++++++++++
.../tests/manual/evaluations/sdk/entities.py | 447 ++++
.../tests/manual/evaluations/sdk/evaluate.py | 340 +++
.../manual/evaluations/sdk/evaluations.py | 208 ++
api/ee/tests/manual/evaluations/sdk/loop.py | 97 +
.../manual/evaluations/sdk/mock_entities.py | 90 +
.../tests/manual/evaluations/sdk/services.py | 375 +++
.../manual/evaluators/human-evaluator.http | 73 +
api/ee/tests/pytest/__init__.py | 0
api/ee/tests/requirements.txt | 1 +
api/oss/tests/manual/tracing/windowing.http | 2 +-
api/pyproject.toml | 2 +-
.../playground/02-adding-custom-providers.mdx | 4 +-
ee/LICENSE | 37 +
hooks/setup.sh | 47 +
hosting/docker-compose/ee/.dockerignore | 7 +
hosting/docker-compose/ee/LICENSE | 37 +
.../docker-compose/ee/docker-compose.dev.yml | 372 +++
hosting/docker-compose/ee/env.ee.dev.example | 91 +
hosting/docker-compose/ee/env.ee.gh.example | 80 +
...nv.oss.dev.example => env.oss.dev.example} | 0
....env.oss.gh.example => env.oss.gh.example} | 0
hosting/{ => old}/aws/agenta_instance.tf | 0
hosting/{ => old}/aws/agenta_instance_sg.tf | 0
hosting/{ => old}/aws/instance-setup.sh | 0
hosting/{ => old}/aws/main.tf | 0
hosting/{ => old}/gcp/agenta-instance.tf | 0
hosting/old/gcp/credentials.json | 0
hosting/{ => old}/gcp/main.tf | 0
sdk/pyproject.toml | 2 +-
sdk/tests/legacy/baggage/config.toml | 2 +-
.../legacy/debugging/simple-app/config.toml | 2 +-
services/chat/ee/LICENSE | 37 +
services/chat/ee/__init__.py | 0
services/chat/ee/docker/Dockerfile.gh | 18 +
services/completion/ee/LICENSE | 37 +
services/completion/ee/__init__.py | 0
services/completion/ee/docker/Dockerfile.gh | 18 +
web/ee/.gitignore | 37 +
web/ee/LICENSE | 37 +
web/ee/docker/Dockerfile.dev | 43 +
web/ee/docker/Dockerfile.gh | 43 +
web/ee/next.config.ts | 73 +
web/ee/package.json | 94 +
web/ee/postcss.config.mjs | 3 +
web/ee/public/assets/On-boarding.png | Bin 0 -> 6151628 bytes
web/ee/public/assets/On-boarding.webp | Bin 0 -> 422926 bytes
.../dark-complete-transparent-CROPPED.png | Bin 0 -> 52174 bytes
.../dark-complete-transparent_white_logo.png | Bin 0 -> 2971 bytes
web/ee/public/assets/dark-logo.svg | 1 +
web/ee/public/assets/fallback.png | Bin 0 -> 2404 bytes
web/ee/public/assets/favicon.ico | Bin 0 -> 1178 bytes
.../light-complete-transparent-CROPPED.png | Bin 0 -> 48798 bytes
web/ee/public/assets/light-logo.svg | 1 +
web/ee/public/assets/not-found.png | Bin 0 -> 8584 bytes
web/ee/public/assets/onboard-page-grids.svg | 81 +
web/ee/public/assets/rag-demo-app.webp | Bin 0 -> 11082 bytes
.../BillingPlanBanner/FreePlanBanner.tsx | 29 +
.../BillingPlanBanner/FreeTrialBanner.tsx | 33 +
.../DeleteEvaluationModal.tsx | 59 +
.../components/DeleteEvaluationModal/types.ts | 6 +
.../DeploymentHistory/DeploymentHistory.tsx | 347 +++
.../assets/AutoEvalRunSkeleton.tsx | 28 +
.../AutoEvalRun/assets/EvalNameTag.tsx | 270 +++
.../AutoEvalRun/assets/TagWithLink.tsx | 34 +
.../AutoEvalRun/assets/VariantTag.tsx | 262 ++
.../AutoEvalRun/assets/types.ts | 7 +
.../AutoEvalRun/assets/utils.ts | 52 +
.../AutoEvalRun/assets/variantUtils.ts | 170 ++
.../components/EvalRunCompareMenu/index.tsx | 269 +++
.../FocusDrawerContent/assets/RunOutput.tsx | 60 +
.../assets/RunTraceHeader.tsx | 79 +
.../assets/FocusDrawerContent/index.tsx | 905 +++++++
.../assets/FocusDrawerHeader/index.tsx | 142 ++
.../assets/FocusDrawerSidePanel/index.tsx | 164 ++
.../Skeletons/FocusDrawerContentSkeleton.tsx | 33 +
.../Skeletons/FocusDrawerHeaderSkeleton.tsx | 16 +
.../FocusDrawerSidePanelSkeleton.tsx | 15 +
.../components/EvalRunFocusDrawer/index.tsx | 68 +
.../assets/EvalRunHeaderSkeleton.tsx | 20 +
.../components/EvalRunHeader/index.tsx | 46 +
.../assets/EvalRunOverviewViewerSkeleton.tsx | 25 +
.../EvalRunOverviewViewer/index.tsx | 209 ++
.../EvalRunPromptConfigViewerSkeleton.tsx | 42 +
.../assets/PromptConfigCard.tsx | 633 +++++
.../EvalRunPromptConfigViewer/index.tsx | 152 ++
.../assets/EvalRunScoreTableSkeleton.tsx | 21 +
.../EvalRunScoreTable/assets/TraceMetrics.tsx | 49 +
.../EvalRunScoreTable/assets/constants.ts | 17 +
.../components/EvalRunScoreTable/index.tsx | 510 ++++
.../EvalRunSelectedEvaluations/index.tsx | 73 +
.../index.tsx | 332 +++
.../assets/EvalRunTestCaseViewerSkeleton.tsx | 77 +
.../EvalRunTestCaseViewer/index.tsx | 31 +
.../EvaluatorMetircsSpiderChart/index.tsx | 223 ++
.../EvaluatorMetircsSpiderChart/types.ts | 31 +
.../EvaluatorMetricsChart/assets/BarChart.tsx | 286 +++
.../assets/EvaluatorMetricsChartSkeleton.tsx | 20 +
.../assets/HistogramChart.tsx | 149 ++
.../EvaluatorMetricsChart/index.tsx | 299 +++
.../EvalRunDetails/AutoEvalRun/index.tsx | 51 +
.../HumanEvalRun/assets/annotationUtils.ts | 383 +++
.../HumanEvalRun/assets/helpers.ts | 252 ++
.../HumanEvalRun/assets/optimisticUtils.ts | 41 +
.../HumanEvalRun/assets/runnableSelectors.ts | 64 +
.../HumanEvalRun/assets/stepsMetricsUtils.ts | 180 ++
.../HumanEvalRun/assets/types.ts | 6 +
.../AnnotateScenarioButton/index.tsx | 96 +
.../AnnotateScenarioButton/types.ts | 14 +
.../EvalResultsView/EvaluatorMetricsCard.tsx | 81 +
.../components/EvalResultsView/index.tsx | 39 +
.../components/EvalRunBatchActions.tsx | 238 ++
.../components/EvalRunName/index.tsx | 78 +
.../components/EvalRunScenario/index.tsx | 53 +
.../components/EvalRunScenario/types.ts | 5 +
.../EvalRunScenarioCardBody.tsx | 151 ++
.../EvalRunScenarioCard/InvocationInputs.tsx | 110 +
.../InvocationResponse.tsx | 151 ++
.../EvalRunScenarioCard/InvocationRun.tsx | 20 +
.../EvalRunScenarioCard/assets/KeyValue.tsx | 59 +
.../EvalRunScenarioCard/assets/utils.tsx | 9 +
.../components/EvalRunScenarioCard/index.tsx | 73 +
.../components/EvalRunScenarioCard/types.ts | 29 +
.../EvalRunScenarioCardTitle/index.tsx | 22 +
.../EvalRunScenarioCardTitle/types.ts | 5 +
.../EvalRunScenarioCards.tsx | 75 +
.../EvalRunScenarioCards/assets/constants.ts | 3 +
.../components/EvalRunScenarioFilters.tsx | 48 +
.../assets/InstructionButton.tsx | 51 +
.../Modals/InstructionModal/index.tsx | 37 +
.../assets/RenameEvalButton.tsx | 60 +
.../assets/RenameEvalModalContent.tsx | 35 +
.../Modals/RenameEvalModal/index.tsx | 91 +
.../HumanEvalRun/components/Modals/types.d.ts | 31 +
.../RunEvalScenarioButton/index.tsx | 107 +
.../components/RunEvalScenarioButton/types.ts | 6 +
.../ScenarioAnnotationPanel/index.tsx | 302 +++
.../ScenarioAnnotationPanel/types.ts | 16 +
.../ScenarioLoadingIndicator.tsx | 23 +
.../assets/constants.ts | 7 +
.../components/SingleScenarioViewer/index.tsx | 130 +
.../components/SingleScenarioViewer/types.ts | 3 +
.../EvalRunDetails/HumanEvalRun/index.tsx | 54 +
.../src/components/EvalRunDetails/UrlSync.tsx | 127 +
.../assets/renderChatMessages.tsx | 132 +
.../components/ComparisonDataFetcher.tsx | 99 +
.../components/EvalResultsView/index.tsx | 80 +
.../EvalRunScenarioNavigator/index.tsx | 296 +++
.../EvalRunScenarioStatusTag/assets/index.tsx | 32 +
.../EvalRunScenarioStatusTag/index.tsx | 67 +
.../assets/constants.ts | 20 +
.../EvalRunScenariosViewSelector/index.tsx | 51 +
.../SaveDataModal/assets/SaveDataButton.tsx | 65 +
.../assets/SaveDataModalContent.tsx | 82 +
.../components/SaveDataModal/assets/types.ts | 34 +
.../components/SaveDataModal/index.tsx | 119 +
.../ComparisonScenarioTable.tsx | 173 ++
.../ScenarioTable.tsx | 68 +
.../assets/ActionCell.tsx | 82 +
.../assets/CellComponents.tsx | 354 +++
.../assets/ComparisonModeToggle.tsx | 237 ++
.../CollapsedAnnotationValueCell.tsx | 106 +
.../MetricCell/CollapsedMetricValueCell.tsx | 308 +++
.../MetricCell/CollapsedMetricsCell.tsx | 34 +
.../assets/MetricCell/MetricCell.tsx | 322 +++
.../assets/MetricCell/types.ts | 41 +
.../assets/StatusCell.tsx | 30 +
...VirtualizedScenarioTableAnnotateDrawer.tsx | 81 +
.../assets/constants.ts | 92 +
.../assets/dataSourceBuilder.ts | 394 +++
.../assets/flatDataSourceBuilder.ts | 8 +
.../VirtualizedScenarioTable/assets/types.ts | 18 +
.../VirtualizedScenarioTable/assets/utils.tsx | 453 ++++
.../useExpandableComparisonDataSource.tsx | 387 +++
.../hooks/useScrollToScenario.ts | 88 +
.../hooks/useTableDataSource.ts | 156 ++
.../VirtualizedScenarioTable/index.tsx | 23 +
.../VirtualizedScenarioTable/types.ts | 19 +
.../src/components/EvalRunDetails/index.tsx | 310 +++
.../EvalRunDetails/state/evalType.ts | 10 +
.../EvalRunDetails/state/focusScenarioAtom.ts | 89 +
.../EvalRunDetails/state/urlState.ts | 36 +
.../ABTestingEvaluationTable.tsx | 823 +++++++
.../SingleModelEvaluationTable.tsx | 752 ++++++
.../EvaluationTable/assets/styles.ts | 140 ++
.../components/ParamsFormWithRun.tsx | 148 ++
.../src/components/EvaluationTable/types.d.ts | 21 +
.../EvaluationCardView/EvaluationCard.tsx | 78 +
.../EvaluationChatResponse.tsx | 69 +
.../EvaluationCardView/EvaluationInputs.tsx | 50 +
.../EvaluationVariantCard.tsx | 105 +
.../EvaluationVotePanel.tsx | 405 ++++
.../EvaluationCardView/VariantAlphabet.tsx | 44 +
.../EvaluationCardView/assets/styles.ts | 108 +
.../Evaluations/EvaluationCardView/index.tsx | 504 ++++
.../Evaluations/EvaluationCardView/types.d.ts | 15 +
.../Evaluations/EvaluationErrorModal.tsx | 48 +
.../Evaluations/HumanEvaluationResult.tsx | 0
.../Evaluations/ShareEvaluationModal.tsx | 61 +
.../HumanEvaluationModal.tsx | 420 ++++
.../HumanEvaluationModal/assets/styles.ts | 105 +
.../HumanEvaluationModal/types.d.ts | 5 +
.../HumanEvaluations/AbTestingEvaluation.tsx | 551 +++++
.../SingleModelEvaluation.tsx | 228 ++
.../assets/EvaluationStatusCell.tsx | 147 ++
.../assets/LegacyEvalResultCell.tsx | 32 +
.../MetricDetailsPopover/assets/ChartAxis.tsx | 91 +
.../assets/ChartFrame.tsx | 71 +
.../assets/ResponsiveFrequencyChart.tsx | 463 ++++
.../assets/ResponsiveMetricChart.tsx | 634 +++++
.../MetricDetailsPopover/assets/chartUtils.ts | 11 +
.../MetricDetailsPopover/assets/utils.ts | 170 ++
.../assets/MetricDetailsPopover/index.tsx | 387 +++
.../assets/MetricDetailsPopover/types.ts | 36 +
.../SingleModelEvaluationHeader/index.tsx | 328 +++
.../assets/TableDropdownMenu/index.tsx | 138 ++
.../assets/TableDropdownMenu/types.ts | 16 +
.../HumanEvaluations/assets/styles.ts | 29 +
.../HumanEvaluations/assets/utils.tsx | 889 +++++++
.../src/components/HumanEvaluations/types.ts | 26 +
.../PostSignupForm/PostSignupForm.tsx | 370 +++
.../PostSignupForm/assets/styles.ts | 32 +
.../PostSignupForm/assets/types.d.ts | 8 +
.../PromptVersioningDrawer.tsx | 152 ++
.../SaveTestsetModal/SaveTestsetModal.tsx | 86 +
.../components/SaveTestsetModal/types.d.ts | 13 +
.../Scripts/assets/CloudScripts.tsx | 47 +
.../src/components/SidePanel/Subscription.tsx | 29 +
.../app-management/components/ApiKeyInput.tsx | 61 +
.../components/DemoApplicationsSection.tsx | 96 +
.../ObservabilityDashboardSection.tsx | 180 ++
.../EvaluationErrorModal.tsx | 77 +
.../EvaluationErrorPopover.tsx | 43 +
.../EvaluationErrorText.tsx | 19 +
.../pages/evaluations/EvaluationsView.tsx | 160 ++
.../FilterColumns/FilterColumns.tsx | 88 +
.../Components/AdvancedSettings.tsx | 112 +
.../Components/NewEvaluationModalContent.tsx | 294 +++
.../Components/SelectAppSection.tsx | 118 +
.../SelectEvaluatorSection.tsx | 360 +++
.../Components/SelectTestsetSection.tsx | 137 ++
.../Components/SelectVariantSection.tsx | 113 +
.../NewEvaluation/assets/TabLabel/index.tsx | 20 +
.../NewEvaluation/assets/TabLabel/types.ts | 6 +
.../NewEvaluation/assets/constants.ts | 7 +
.../NewEvaluation/assets/styles.ts | 80 +
.../pages/evaluations/NewEvaluation/index.tsx | 551 +++++
.../pages/evaluations/NewEvaluation/types.ts | 92 +
.../autoEvaluation/AutoEvaluation.tsx | 318 +++
.../ConfigureEvaluator/AdvancedSettings.tsx | 128 +
.../ConfigureEvaluator/DebugSection.tsx | 851 +++++++
.../ConfigureEvaluator/DynamicFormField.tsx | 152 ++
.../EvaluatorTestcaseModal.tsx | 174 ++
.../EvaluatorVariantModal.tsx | 158 ++
.../ConfigureEvaluator/Messages.tsx | 158 ++
.../ConfigureEvaluator/assets/styles.ts | 32 +
.../ConfigureEvaluator/index.tsx | 340 +++
.../ConfigureEvaluator/types.ts | 14 +
.../Evaluators/DeleteModal.tsx | 73 +
.../Evaluators/EvaluatorCard.tsx | 213 ++
.../Evaluators/EvaluatorList.tsx | 172 ++
.../EvaluatorsModal/Evaluators/index.tsx | 197 ++
.../EvaluatorsModal/EvaluatorsModal.tsx | 201 ++
.../NewEvaluator/NewEvaluatorCard.tsx | 114 +
.../NewEvaluator/NewEvaluatorList.tsx | 85 +
.../EvaluatorsModal/NewEvaluator/index.tsx | 142 ++
.../autoEvaluation/Filters/SearchFilter.tsx | 78 +
.../assets/AutoEvaluationHeader.tsx | 679 ++++++
.../autoEvaluation/assets/styles.ts | 8 +
.../autoEvaluation/assets/types.ts | 22 +
.../cellRenderers/StatusRenderer.tsx | 62 +
.../cellRenderers/cellRenderers.tsx | 270 +++
.../evaluationCompare/EvaluationCompare.tsx | 629 +++++
.../EvaluationScenarios.tsx | 474 ++++
.../src/components/pages/evaluations/utils.ts | 185 ++
.../observability/dashboard/widgetCard.tsx | 85 +
.../deployments/DeploymentHistoryModal.tsx | 415 ++++
.../deployments/DeploymentRevertModal.tsx | 79 +
.../overview/deployments/HistoryConfig.tsx | 112 +
.../observability/ObservabilityOverview.tsx | 135 ++
.../AutoRenewalCancelModalContent/index.tsx | 33 +
.../assets/constants.ts | 10 +
.../AutoRenewalCancelModal/assets/types.d.ts | 8 +
.../Modals/AutoRenewalCancelModal/index.tsx | 74 +
.../PricingModal/assets/PricingCard/index.tsx | 96 +
.../assets/PricingModalContent/index.tsx | 95 +
.../assets/PricingModalTitle/index.tsx | 17 +
.../assets/SubscriptionPlanDetails/index.tsx | 22 +
.../Modals/PricingModal/assets/constants.ts | 45 +
.../Modals/PricingModal/assets/types.d.ts | 28 +
.../Billing/Modals/PricingModal/index.tsx | 27 +
.../Billing/assets/UsageProgressBar/index.tsx | 33 +
.../pages/settings/Billing/assets/types.d.ts | 12 +
.../pages/settings/Billing/index.tsx | 177 ++
web/ee/src/contexts/RunIdContext.tsx | 40 +
web/ee/src/hooks/useCrispChat.ts | 43 +
web/ee/src/lib/helpers/evaluate.ts | 449 ++++
web/ee/src/lib/helpers/hashUtils.ts | 73 +
web/ee/src/lib/helpers/traceUtils.ts | 146 ++
.../lib/hooks/useEvalScenarioQueue/index.ts | 348 +++
.../useEvalScenarioQueue/responseQueue.ts | 48 +
.../assets/atoms/bulkFetch.ts | 96 +
.../assets/atoms/cache.ts | 6 +
.../assets/atoms/index.ts | 19 +
.../assets/atoms/migrationHelper.ts | 18 +
.../assets/atoms/progress.ts | 263 ++
.../assets/atoms/runScopedAtoms.ts | 105 +
.../assets/atoms/runScopedMetrics.ts | 546 +++++
.../assets/atoms/runScopedScenarios.ts | 376 +++
.../assets/atoms/store.ts | 74 +
.../assets/atoms/types.ts | 16 +
.../assets/atoms/utils.ts | 24 +
.../useEvaluationRunData/assets/constants.ts | 25 +
.../assets/helpers/buildRunIndex.ts | 124 +
.../helpers/fetchScenarioListViaWorker.ts | 48 +
.../assets/helpers/fetchScenarioViaWorker.ts | 184 ++
.../assets/helpers/scenarioFilters.ts | 66 +
.../assets/helpers/workerContext/index.ts | 145 ++
.../assets/helpers/workerContext/types.ts | 31 +
.../lib/hooks/useEvaluationRunData/index.ts | 272 +++
.../lib/hooks/useEvaluationRunData/types.ts | 141 ++
.../useEvalRunScenarioData.tsx | 43 +
.../useEvaluationRunMetrics/assets/utils.ts | 24 +
.../hooks/useEvaluationRunMetrics/index.ts | 112 +
.../hooks/useEvaluationRunMetrics/types.ts | 75 +
.../useEvaluationRunScenarioSteps/types.ts | 162 ++
.../hooks/useEvaluationRunScenarios/index.ts | 133 +
.../hooks/useEvaluationRunScenarios/types.ts | 24 +
web/ee/src/lib/hooks/useEvaluations.ts | 345 +++
.../lib/hooks/useInvocationResult/index.ts | 143 ++
.../lib/hooks/useInvocationResult/types.ts | 18 +
.../usePreviewEvaluations/assets/utils.ts | 396 +++
.../lib/hooks/usePreviewEvaluations/index.ts | 459 ++++
.../projectVariantConfigs.ts | 131 +
.../states/queryFilterAtoms.ts | 7 +
.../lib/hooks/usePreviewEvaluations/types.ts | 84 +
.../usePreviewRunningEvaluations/index.ts | 58 +
.../states/runningEvalAtom.ts | 10 +
.../src/lib/hooks/useRunMetricsMap/index.ts | 171 ++
web/ee/src/lib/metricColumnFactory.tsx | 112 +
web/ee/src/lib/metricSorter.ts | 19 +
web/ee/src/lib/metricUtils.ts | 278 +++
web/ee/src/lib/metrics/utils.ts | 93 +
web/ee/src/lib/tableUtils.ts | 36 +
web/ee/src/lib/types_ee.ts | 165 ++
.../src/lib/workers/evalRunner/bulkWorker.ts | 143 ++
.../workers/evalRunner/evalRunner.worker.ts | 259 ++
.../evalRunner/fetchRunMetrics.worker.ts | 151 ++
.../workers/evalRunner/fetchSteps.worker.ts | 75 +
.../lib/workers/evalRunner/pureEnrichment.ts | 610 +++++
.../workers/evalRunner/runMetricsWorker.ts | 78 +
.../workers/evalRunner/scenarioListWorker.ts | 116 +
web/ee/src/lib/workers/evalRunner/types.ts | 39 +
.../src/lib/workers/evalRunner/workerFetch.ts | 298 +++
web/ee/src/pages/_app.tsx | 11 +
web/ee/src/pages/_document.tsx | 3 +
web/ee/src/pages/auth/[[...path]].tsx | 3 +
.../pages/auth/callback/[[...callback]].tsx | 3 +
web/ee/src/pages/post-signup/index.tsx | 5 +
web/ee/src/pages/w/[workspace_id]/index.tsx | 3 +
.../apps/[app_id]/deployments/index.tsx | 3 +
.../apps/[app_id]/endpoints/index.tsx | 5 +
.../[evaluation_id]/index.tsx | 115 +
.../apps/[app_id]/evaluations/index.tsx | 7 +
.../results/[evaluation_id]/index.tsx | 7 +
.../evaluations/results/compare/index.tsx | 7 +
.../[evaluation_id]/index.tsx | 7 +
.../apps/[app_id]/overview/index.tsx | 3 +
.../apps/[app_id]/playground/index.tsx | 3 +
.../apps/[app_id]/traces/index.tsx | 3 +
.../apps/[app_id]/variants/index.tsx | 3 +
.../p/[project_id]/apps/index.tsx | 3 +
.../p/[project_id]/evaluations/index.tsx | 7 +
.../results/[evaluation_id]/index.tsx | 7 +
.../evaluations/results/compare/index.tsx | 7 +
.../[evaluation_id]/index.tsx | 7 +
.../p/[project_id]/observability/index.tsx | 3 +
.../p/[project_id]/settings/index.tsx | 3 +
.../p/[project_id]/share/index.tsx | 86 +
.../testsets/[testset_id]/index.tsx | 3 +
.../p/[project_id]/testsets/index.tsx | 3 +
web/ee/src/pages/w/[workspace_id]/p/index.tsx | 3 +
web/ee/src/pages/w/index.tsx | 3 +
web/ee/src/pages/workspaces/accept.tsx | 3 +
web/ee/src/services/billing/index.tsx | 58 +
web/ee/src/services/billing/types.d.ts | 45 +
.../src/services/evaluationRuns/api/index.ts | 332 +++
.../src/services/evaluationRuns/api/types.ts | 18 +
web/ee/src/services/evaluationRuns/utils.ts | 0
web/ee/src/services/evaluations/api/index.ts | 328 +++
.../src/services/evaluations/api_ee/index.ts | 44 +
.../src/services/evaluations/workerUtils.ts | 157 ++
.../services/human-evaluations/api/index.ts | 294 +++
.../hooks/useEvaluationResults.ts | 26 +
.../src/services/observability/api/helper.ts | 61 +
.../src/services/observability/api/index.ts | 168 ++
web/ee/src/services/observability/api/mock.ts | 148 ++
.../services/promptVersioning/api/index.ts | 41 +
.../runMetrics/api/assets/contants.ts | 18 +
web/ee/src/services/runMetrics/api/index.ts | 696 ++++++
web/ee/src/services/runMetrics/api/types.ts | 22 +
.../src/services/variantConfigs/api/index.ts | 77 +
web/ee/src/state/billing/atoms.ts | 239 ++
web/ee/src/state/billing/hooks.ts | 137 ++
web/ee/src/state/billing/index.ts | 23 +
web/ee/src/state/observability/dashboard.ts | 61 +
web/ee/src/state/observability/index.ts | 1 +
web/ee/src/state/url/focusDrawer.ts | 131 +
web/ee/tailwind.config.ts | 3 +
.../1-settings/api-keys-management.spec.ts | 4 +
web/ee/tests/1-settings/model-hub.spec.ts | 4 +
web/ee/tests/2-app/create.spec.ts | 5 +
web/ee/tests/3-playground/run-variant.spec.ts | 4 +
.../prompt-registry-flow.spec.ts | 4 +
web/ee/tests/5-testsset/testset.spec.ts | 4 +
.../tests/6-auto-evaluation/assets/README.md | 67 +
.../tests/6-auto-evaluation/assets/types.ts | 42 +
web/ee/tests/6-auto-evaluation/index.ts | 92 +
.../run-auto-evaluation.spec.ts | 4 +
web/ee/tests/6-auto-evaluation/tests.ts | 97 +
.../7-observability/observability.spec.ts | 4 +
.../tests/8-deployment/deploy-variant.spec.ts | 4 +
.../tests/9-human-annotation/assets/types.ts | 22 +
.../human-annotation.spec.ts | 4 +
web/ee/tests/9-human-annotation/index.ts | 181 ++
web/ee/tests/9-human-annotation/tests.ts | 244 ++
web/ee/tsconfig.json | 12 +
web/oss/package.json | 2 +-
web/package.json | 2 +-
586 files changed, 75721 insertions(+), 31 deletions(-)
create mode 100644 .gitleaks.toml
create mode 100644 .gitleaksignore
create mode 100644 api/ee/LICENSE
rename hosting/gcp/credentials.json => api/ee/__init__.py (100%)
create mode 100644 api/ee/databases/__init__.py
create mode 100644 api/ee/databases/postgres/init-db-ee.sql
create mode 100644 api/ee/databases/postgres/migrations/__init__.py
create mode 100644 api/ee/databases/postgres/migrations/core/README.md
create mode 100644 api/ee/databases/postgres/migrations/core/alembic.ini
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/api_keys.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/applications.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/demos.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/export_records.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/invitations.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/projects.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
create mode 100644 api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
create mode 100644 api/ee/databases/postgres/migrations/core/env.py
create mode 100644 api/ee/databases/postgres/migrations/core/script.py.mako
create mode 100644 api/ee/databases/postgres/migrations/core/temp/80910d2fa9a4_migrate_old_testsets_to_new_.py
create mode 100644 api/ee/databases/postgres/migrations/core/temp/bd7937ee784d_migrate_old_evaluators_to_new_.py
create mode 100644 api/ee/databases/postgres/migrations/core/utils.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/0698355c7641_add_tables_for_testsets.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/0698355c7642_add_table_for_testcases.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/0f086ebc2f83_extend_app_type.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/12f477990f1e_add_meters.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/154098b1e56c_set_user_id_column_in_db_entities_to_be_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/1c2d3e4f5a6b_workspaces_migration_to_add_default_project_and_membership.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/24f8bdb390ee_added_the_app_type_column_to_the_app_db_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/2a91436752f9_update_secrets_data_schema_type.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/30dcf07de96a_add_tables_for_queries.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/320a4a7ee0c7_set_columns_in_api_key_table_to_be_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/3b5f5652f611_populate_runs_references.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/425c68e8de6c_add_secrets_dbe_model.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/4d9a58ff8f98_add_default_project_to_scoped_model_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/54e81e9eed88_add_tables_for_evaluations.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/5a71b3f140ab_fix_all_preview_schemas.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/6161b674688d_add_commit_message_column_to_app_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/6965776e6940_add_subscriptions.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/6aafdfc2befb_rename_user_organizations_to_organization_members.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3c_add_is_demo_flag.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3d_add_initial_demo.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/770d68410ab0_transfer_user_organization_to_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/79b9acb137a1_transfer_workspace_invitations_to_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/7cc66fc40298_add_hidden_column_to_app_variants_table.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/8089ee7692d1_cleanup_preview_entities.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/847972cfa14a_add_nodes_dbe.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/8accbbea1d21_initial_migration.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/91d3b4a8c27f_fix_ag_config.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/9698355c7649_add_tables_for_workflows.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/9698355c7650_rename_metadata_to_meta.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/9b0e1a740b88_create_project_invitations_table.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/aa1b2c3d4e5f_migrate_config_parameters_jsonb_to_json.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/ad0987a77380_update_evaluators_names_with_app_name_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/b3f15a7140ab_add_version_to_eval_entities.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/b3f6bff547d4_remove_app_id_from_evaluators_configs.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/d0b8e05ca190_scope_project_id_to_db_models_entities.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/d5d4d6bf738f_add_evaluation_queues.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/e14e8689cd03_created_project_members_table_and_added_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/e9fa2135f3fb_add_modified_by_id_column_to_apps_db_.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/fa07e07350bf_add_timestamp_to_metrics.py
create mode 100644 api/ee/databases/postgres/migrations/core/versions/fd77265d65dc_fix_preview_entities.py
create mode 100644 api/ee/databases/postgres/migrations/find_head.py
create mode 100644 api/ee/databases/postgres/migrations/runner.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/README copy.md
create mode 100644 api/ee/databases/postgres/migrations/tracing/__init__.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/alembic.ini
create mode 100644 api/ee/databases/postgres/migrations/tracing/env.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/script.py.mako
create mode 100644 api/ee/databases/postgres/migrations/tracing/utils.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/versions/58b1b61e5d6c_add_spans.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/versions/847972cfa14a_add_nodes.py
create mode 100644 api/ee/databases/postgres/migrations/tracing/versions/fd77265d65dc_fix_spans.py
create mode 100644 api/ee/databases/postgres/migrations/utils.py
create mode 100644 api/ee/docker/Dockerfile.dev
create mode 100644 api/ee/docker/Dockerfile.gh
create mode 100644 api/ee/src/__init__.py
create mode 100644 api/ee/src/apis/__init__.py
create mode 100644 api/ee/src/apis/fastapi/__init__.py
create mode 100644 api/ee/src/apis/fastapi/billing/__init__.py
create mode 100644 api/ee/src/apis/fastapi/billing/models.py
create mode 100644 api/ee/src/apis/fastapi/billing/router.py
create mode 100644 api/ee/src/core/__init__.py
create mode 100644 api/ee/src/core/entitlements/__init__.py
create mode 100644 api/ee/src/core/entitlements/service.py
create mode 100644 api/ee/src/core/entitlements/types.py
create mode 100644 api/ee/src/core/meters/__init__.py
create mode 100644 api/ee/src/core/meters/interfaces.py
create mode 100644 api/ee/src/core/meters/service.py
create mode 100644 api/ee/src/core/meters/types.py
create mode 100644 api/ee/src/core/subscriptions/__init__.py
create mode 100644 api/ee/src/core/subscriptions/interfaces.py
create mode 100644 api/ee/src/core/subscriptions/service.py
create mode 100644 api/ee/src/core/subscriptions/types.py
create mode 100644 api/ee/src/crons/meters.sh
create mode 100644 api/ee/src/crons/meters.txt
create mode 100644 api/ee/src/crons/queries.sh
create mode 100644 api/ee/src/crons/queries.txt
create mode 100644 api/ee/src/dbs/__init__.py
create mode 100644 api/ee/src/dbs/postgres/__init__.py
create mode 100644 api/ee/src/dbs/postgres/meters/__init__.py
create mode 100644 api/ee/src/dbs/postgres/meters/dao.py
create mode 100644 api/ee/src/dbs/postgres/meters/dbas.py
create mode 100644 api/ee/src/dbs/postgres/meters/dbes.py
create mode 100644 api/ee/src/dbs/postgres/shared/__init__.py
create mode 100644 api/ee/src/dbs/postgres/subscriptions/__init__.py
create mode 100644 api/ee/src/dbs/postgres/subscriptions/dao.py
create mode 100644 api/ee/src/dbs/postgres/subscriptions/dbas.py
create mode 100644 api/ee/src/dbs/postgres/subscriptions/dbes.py
create mode 100644 api/ee/src/dbs/postgres/subscriptions/mappings.py
create mode 100644 api/ee/src/main.py
create mode 100644 api/ee/src/models/api/api_models.py
create mode 100644 api/ee/src/models/api/organization_models.py
create mode 100644 api/ee/src/models/api/user_models.py
create mode 100644 api/ee/src/models/api/workspace_models.py
create mode 100644 api/ee/src/models/db_models.py
create mode 100644 api/ee/src/models/extended/deprecated_models.py
create mode 100644 api/ee/src/models/extended/deprecated_transfer_models.py
create mode 100644 api/ee/src/models/shared_models.py
create mode 100644 api/ee/src/routers/evaluation_router.py
create mode 100644 api/ee/src/routers/human_evaluation_router.py
create mode 100644 api/ee/src/routers/organization_router.py
create mode 100644 api/ee/src/routers/workspace_router.py
create mode 100644 api/ee/src/services/admin_manager.py
create mode 100644 api/ee/src/services/aggregation_service.py
create mode 100644 api/ee/src/services/commoners.py
create mode 100644 api/ee/src/services/converters.py
create mode 100644 api/ee/src/services/db_manager.py
create mode 100644 api/ee/src/services/db_manager_ee.py
create mode 100644 api/ee/src/services/email_helper.py
create mode 100644 api/ee/src/services/evaluation_service.py
create mode 100644 api/ee/src/services/llm_apps_service.py
create mode 100644 api/ee/src/services/organization_service.py
create mode 100644 api/ee/src/services/results_service.py
create mode 100644 api/ee/src/services/selectors.py
create mode 100644 api/ee/src/services/templates/send_email.html
create mode 100644 api/ee/src/services/utils.py
create mode 100644 api/ee/src/services/workspace_manager.py
create mode 100644 api/ee/src/tasks/__init__.py
create mode 100644 api/ee/src/tasks/evaluations/__init__.py
create mode 100644 api/ee/src/tasks/evaluations/batch.py
create mode 100644 api/ee/src/tasks/evaluations/legacy.py
create mode 100644 api/ee/src/tasks/evaluations/live.py
create mode 100644 api/ee/src/utils/entitlements.py
create mode 100644 api/ee/src/utils/permissions.py
create mode 100644 api/ee/tests/__init__.py
create mode 100644 api/ee/tests/manual/billing.http
create mode 100644 api/ee/tests/manual/evaluations/live.http
create mode 100644 api/ee/tests/manual/evaluations/sdk/client.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/definitions.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/entities.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/evaluate.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/evaluations.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/loop.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/mock_entities.py
create mode 100644 api/ee/tests/manual/evaluations/sdk/services.py
create mode 100644 api/ee/tests/manual/evaluators/human-evaluator.http
create mode 100644 api/ee/tests/pytest/__init__.py
create mode 100644 api/ee/tests/requirements.txt
create mode 100644 ee/LICENSE
create mode 100755 hooks/setup.sh
create mode 100644 hosting/docker-compose/ee/.dockerignore
create mode 100644 hosting/docker-compose/ee/LICENSE
create mode 100644 hosting/docker-compose/ee/docker-compose.dev.yml
create mode 100644 hosting/docker-compose/ee/env.ee.dev.example
create mode 100644 hosting/docker-compose/ee/env.ee.gh.example
rename hosting/docker-compose/oss/{.env.oss.dev.example => env.oss.dev.example} (100%)
rename hosting/docker-compose/oss/{.env.oss.gh.example => env.oss.gh.example} (100%)
rename hosting/{ => old}/aws/agenta_instance.tf (100%)
rename hosting/{ => old}/aws/agenta_instance_sg.tf (100%)
rename hosting/{ => old}/aws/instance-setup.sh (100%)
rename hosting/{ => old}/aws/main.tf (100%)
rename hosting/{ => old}/gcp/agenta-instance.tf (100%)
create mode 100644 hosting/old/gcp/credentials.json
rename hosting/{ => old}/gcp/main.tf (100%)
create mode 100644 services/chat/ee/LICENSE
create mode 100644 services/chat/ee/__init__.py
create mode 100644 services/chat/ee/docker/Dockerfile.gh
create mode 100644 services/completion/ee/LICENSE
create mode 100644 services/completion/ee/__init__.py
create mode 100644 services/completion/ee/docker/Dockerfile.gh
create mode 100644 web/ee/.gitignore
create mode 100644 web/ee/LICENSE
create mode 100644 web/ee/docker/Dockerfile.dev
create mode 100644 web/ee/docker/Dockerfile.gh
create mode 100644 web/ee/next.config.ts
create mode 100644 web/ee/package.json
create mode 100644 web/ee/postcss.config.mjs
create mode 100644 web/ee/public/assets/On-boarding.png
create mode 100644 web/ee/public/assets/On-boarding.webp
create mode 100644 web/ee/public/assets/dark-complete-transparent-CROPPED.png
create mode 100644 web/ee/public/assets/dark-complete-transparent_white_logo.png
create mode 100644 web/ee/public/assets/dark-logo.svg
create mode 100644 web/ee/public/assets/fallback.png
create mode 100644 web/ee/public/assets/favicon.ico
create mode 100644 web/ee/public/assets/light-complete-transparent-CROPPED.png
create mode 100644 web/ee/public/assets/light-logo.svg
create mode 100644 web/ee/public/assets/not-found.png
create mode 100644 web/ee/public/assets/onboard-page-grids.svg
create mode 100644 web/ee/public/assets/rag-demo-app.webp
create mode 100644 web/ee/src/components/Banners/BillingPlanBanner/FreePlanBanner.tsx
create mode 100644 web/ee/src/components/Banners/BillingPlanBanner/FreeTrialBanner.tsx
create mode 100644 web/ee/src/components/DeleteEvaluationModal/DeleteEvaluationModal.tsx
create mode 100644 web/ee/src/components/DeleteEvaluationModal/types.ts
create mode 100644 web/ee/src/components/DeploymentHistory/DeploymentHistory.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/AutoEvalRunSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/EvalNameTag.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/TagWithLink.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/utils.ts
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/variantUtils.ts
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunOutput.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/assets/RunTraceHeader.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerHeader/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerContentSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerHeaderSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/Skeletons/FocusDrawerSidePanelSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/assets/EvalRunHeaderSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunHeader/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunOverviewViewer/assets/EvalRunOverviewViewerSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunOverviewViewer/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/EvalRunPromptConfigViewerSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/EvalRunScoreTableSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/TraceMetrics.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/assets/constants.ts
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunScoreTable/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunSelectedEvaluations/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestCaseViewUtilityOptions/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestCaseViewer/assets/EvalRunTestCaseViewerSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunTestCaseViewer/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetircsSpiderChart/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/BarChart.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/EvaluatorMetricsChartSkeleton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/HistogramChart.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/AutoEvalRun/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/annotationUtils.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/helpers.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/optimisticUtils.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/runnableSelectors.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/stepsMetricsUtils.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/assets/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/AnnotateScenarioButton/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/EvaluatorMetricsCard.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalResultsView/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunBatchActions.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunName/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenario/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/EvalRunScenarioCardBody.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationInputs.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationResponse.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/InvocationRun.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/KeyValue.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/assets/utils.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCard/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCardTitle/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/EvalRunScenarioCards.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioCards/assets/constants.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/EvalRunScenarioFilters.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/assets/InstructionButton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/InstructionModal/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalButton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/assets/RenameEvalModalContent.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/types.d.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/RunEvalScenarioButton/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioAnnotationPanel/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/ScenarioLoadingIndicator.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/ScenarioLoadingIndicator/assets/constants.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/components/SingleScenarioViewer/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/HumanEvalRun/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/UrlSync.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/assets/renderChatMessages.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/ComparisonDataFetcher.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalResultsView/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalRunScenarioNavigator/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/assets/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalRunScenarioStatusTag/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/assets/constants.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/EvalRunScenariosViewSelector/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataButton.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/SaveDataModalContent.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/SaveDataModal/assets/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/SaveDataModal/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ComparisonScenarioTable.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ActionCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/ComparisonModeToggle.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedAnnotationValueCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricValueCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/CollapsedMetricsCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/MetricCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/MetricCell/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/StatusCell.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/VirtualizedScenarioTableAnnotateDrawer.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/flatDataSourceBuilder.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useScrollToScenario.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/types.ts
create mode 100644 web/ee/src/components/EvalRunDetails/index.tsx
create mode 100644 web/ee/src/components/EvalRunDetails/state/evalType.ts
create mode 100644 web/ee/src/components/EvalRunDetails/state/focusScenarioAtom.ts
create mode 100644 web/ee/src/components/EvalRunDetails/state/urlState.ts
create mode 100644 web/ee/src/components/EvaluationTable/ABTestingEvaluationTable.tsx
create mode 100644 web/ee/src/components/EvaluationTable/SingleModelEvaluationTable.tsx
create mode 100644 web/ee/src/components/EvaluationTable/assets/styles.ts
create mode 100644 web/ee/src/components/EvaluationTable/components/ParamsFormWithRun.tsx
create mode 100644 web/ee/src/components/EvaluationTable/types.d.ts
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/EvaluationCard.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/EvaluationChatResponse.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/EvaluationInputs.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVariantCard.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/EvaluationVotePanel.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/VariantAlphabet.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/assets/styles.ts
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/index.tsx
create mode 100644 web/ee/src/components/Evaluations/EvaluationCardView/types.d.ts
create mode 100644 web/ee/src/components/Evaluations/EvaluationErrorModal.tsx
create mode 100644 web/ee/src/components/Evaluations/HumanEvaluationResult.tsx
create mode 100644 web/ee/src/components/Evaluations/ShareEvaluationModal.tsx
create mode 100644 web/ee/src/components/HumanEvaluationModal/HumanEvaluationModal.tsx
create mode 100644 web/ee/src/components/HumanEvaluationModal/assets/styles.ts
create mode 100644 web/ee/src/components/HumanEvaluationModal/types.d.ts
create mode 100644 web/ee/src/components/HumanEvaluations/AbTestingEvaluation.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/SingleModelEvaluation.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/EvaluationStatusCell.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/LegacyEvalResultCell.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartAxis.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ChartFrame.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveFrequencyChart.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/ResponsiveMetricChart.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/chartUtils.ts
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/assets/utils.ts
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/index.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/MetricDetailsPopover/types.ts
create mode 100644 web/ee/src/components/HumanEvaluations/assets/SingleModelEvaluationHeader/index.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts
create mode 100644 web/ee/src/components/HumanEvaluations/assets/styles.ts
create mode 100644 web/ee/src/components/HumanEvaluations/assets/utils.tsx
create mode 100644 web/ee/src/components/HumanEvaluations/types.ts
create mode 100644 web/ee/src/components/PostSignupForm/PostSignupForm.tsx
create mode 100644 web/ee/src/components/PostSignupForm/assets/styles.ts
create mode 100644 web/ee/src/components/PostSignupForm/assets/types.d.ts
create mode 100644 web/ee/src/components/PromptVersioningDrawer/PromptVersioningDrawer.tsx
create mode 100644 web/ee/src/components/SaveTestsetModal/SaveTestsetModal.tsx
create mode 100644 web/ee/src/components/SaveTestsetModal/types.d.ts
create mode 100644 web/ee/src/components/Scripts/assets/CloudScripts.tsx
create mode 100644 web/ee/src/components/SidePanel/Subscription.tsx
create mode 100644 web/ee/src/components/pages/app-management/components/ApiKeyInput.tsx
create mode 100644 web/ee/src/components/pages/app-management/components/DemoApplicationsSection.tsx
create mode 100644 web/ee/src/components/pages/app-management/components/ObservabilityDashboardSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorModal.tsx
create mode 100644 web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorPopover.tsx
create mode 100644 web/ee/src/components/pages/evaluations/EvaluationErrorProps/EvaluationErrorText.tsx
create mode 100644 web/ee/src/components/pages/evaluations/EvaluationsView.tsx
create mode 100644 web/ee/src/components/pages/evaluations/FilterColumns/FilterColumns.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/AdvancedSettings.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/NewEvaluationModalContent.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectAppSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectEvaluatorSection/SelectEvaluatorSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectTestsetSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/Components/SelectVariantSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/index.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/assets/TabLabel/types.ts
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/assets/constants.ts
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/assets/styles.ts
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/index.tsx
create mode 100644 web/ee/src/components/pages/evaluations/NewEvaluation/types.ts
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/AdvancedSettings.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DebugSection.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/DynamicFormField.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorTestcaseModal.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/EvaluatorVariantModal.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/Messages.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/assets/styles.ts
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/index.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/ConfigureEvaluator/types.ts
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/DeleteModal.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorCard.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/EvaluatorList.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/Evaluators/index.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/EvaluatorsModal.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorCard.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/NewEvaluatorList.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/EvaluatorsModal/NewEvaluator/index.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/Filters/SearchFilter.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/assets/AutoEvaluationHeader.tsx
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/assets/styles.ts
create mode 100644 web/ee/src/components/pages/evaluations/autoEvaluation/assets/types.ts
create mode 100644 web/ee/src/components/pages/evaluations/cellRenderers/StatusRenderer.tsx
create mode 100644 web/ee/src/components/pages/evaluations/cellRenderers/cellRenderers.tsx
create mode 100644 web/ee/src/components/pages/evaluations/evaluationCompare/EvaluationCompare.tsx
create mode 100644 web/ee/src/components/pages/evaluations/evaluationScenarios/EvaluationScenarios.tsx
create mode 100644 web/ee/src/components/pages/evaluations/utils.ts
create mode 100644 web/ee/src/components/pages/observability/dashboard/widgetCard.tsx
create mode 100644 web/ee/src/components/pages/overview/deployments/DeploymentHistoryModal.tsx
create mode 100644 web/ee/src/components/pages/overview/deployments/DeploymentRevertModal.tsx
create mode 100644 web/ee/src/components/pages/overview/deployments/HistoryConfig.tsx
create mode 100644 web/ee/src/components/pages/overview/observability/ObservabilityOverview.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/AutoRenewalCancelModal/assets/AutoRenewalCancelModalContent/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/AutoRenewalCancelModal/assets/constants.ts
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/AutoRenewalCancelModal/assets/types.d.ts
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/AutoRenewalCancelModal/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/PricingCard/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/PricingModalContent/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/PricingModalTitle/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/SubscriptionPlanDetails/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/constants.ts
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/assets/types.d.ts
create mode 100644 web/ee/src/components/pages/settings/Billing/Modals/PricingModal/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/assets/UsageProgressBar/index.tsx
create mode 100644 web/ee/src/components/pages/settings/Billing/assets/types.d.ts
create mode 100644 web/ee/src/components/pages/settings/Billing/index.tsx
create mode 100644 web/ee/src/contexts/RunIdContext.tsx
create mode 100644 web/ee/src/hooks/useCrispChat.ts
create mode 100644 web/ee/src/lib/helpers/evaluate.ts
create mode 100644 web/ee/src/lib/helpers/hashUtils.ts
create mode 100644 web/ee/src/lib/helpers/traceUtils.ts
create mode 100644 web/ee/src/lib/hooks/useEvalScenarioQueue/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvalScenarioQueue/responseQueue.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/bulkFetch.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/cache.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/migrationHelper.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedScenarios.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/store.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/utils.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/constants.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioListViaWorker.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/fetchScenarioViaWorker.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/scenarioFilters.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/workerContext/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunData/useEvalRunScenarioData.tsx
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunMetrics/assets/utils.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunMetrics/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunMetrics/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunScenarioSteps/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunScenarios/index.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluationRunScenarios/types.ts
create mode 100644 web/ee/src/lib/hooks/useEvaluations.ts
create mode 100644 web/ee/src/lib/hooks/useInvocationResult/index.ts
create mode 100644 web/ee/src/lib/hooks/useInvocationResult/types.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewEvaluations/index.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewEvaluations/projectVariantConfigs.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewEvaluations/states/queryFilterAtoms.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewEvaluations/types.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewRunningEvaluations/index.ts
create mode 100644 web/ee/src/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom.ts
create mode 100644 web/ee/src/lib/hooks/useRunMetricsMap/index.ts
create mode 100644 web/ee/src/lib/metricColumnFactory.tsx
create mode 100644 web/ee/src/lib/metricSorter.ts
create mode 100644 web/ee/src/lib/metricUtils.ts
create mode 100644 web/ee/src/lib/metrics/utils.ts
create mode 100644 web/ee/src/lib/tableUtils.ts
create mode 100644 web/ee/src/lib/types_ee.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/bulkWorker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/evalRunner.worker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/fetchRunMetrics.worker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/fetchSteps.worker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/pureEnrichment.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/runMetricsWorker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/scenarioListWorker.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/types.ts
create mode 100644 web/ee/src/lib/workers/evalRunner/workerFetch.ts
create mode 100644 web/ee/src/pages/_app.tsx
create mode 100644 web/ee/src/pages/_document.tsx
create mode 100644 web/ee/src/pages/auth/[[...path]].tsx
create mode 100644 web/ee/src/pages/auth/callback/[[...callback]].tsx
create mode 100644 web/ee/src/pages/post-signup/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/deployments/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/endpoints/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/human_a_b_testing/[evaluation_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/compare/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/single_model_test/[evaluation_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/overview/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/playground/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/traces/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/variants/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/compare/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/single_model_test/[evaluation_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/observability/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/settings/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/share/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/testsets/[testset_id]/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/[project_id]/testsets/index.tsx
create mode 100644 web/ee/src/pages/w/[workspace_id]/p/index.tsx
create mode 100644 web/ee/src/pages/w/index.tsx
create mode 100644 web/ee/src/pages/workspaces/accept.tsx
create mode 100644 web/ee/src/services/billing/index.tsx
create mode 100644 web/ee/src/services/billing/types.d.ts
create mode 100644 web/ee/src/services/evaluationRuns/api/index.ts
create mode 100644 web/ee/src/services/evaluationRuns/api/types.ts
create mode 100644 web/ee/src/services/evaluationRuns/utils.ts
create mode 100644 web/ee/src/services/evaluations/api/index.ts
create mode 100644 web/ee/src/services/evaluations/api_ee/index.ts
create mode 100644 web/ee/src/services/evaluations/workerUtils.ts
create mode 100644 web/ee/src/services/human-evaluations/api/index.ts
create mode 100644 web/ee/src/services/human-evaluations/hooks/useEvaluationResults.ts
create mode 100644 web/ee/src/services/observability/api/helper.ts
create mode 100644 web/ee/src/services/observability/api/index.ts
create mode 100644 web/ee/src/services/observability/api/mock.ts
create mode 100644 web/ee/src/services/promptVersioning/api/index.ts
create mode 100644 web/ee/src/services/runMetrics/api/assets/contants.ts
create mode 100644 web/ee/src/services/runMetrics/api/index.ts
create mode 100644 web/ee/src/services/runMetrics/api/types.ts
create mode 100644 web/ee/src/services/variantConfigs/api/index.ts
create mode 100644 web/ee/src/state/billing/atoms.ts
create mode 100644 web/ee/src/state/billing/hooks.ts
create mode 100644 web/ee/src/state/billing/index.ts
create mode 100644 web/ee/src/state/observability/dashboard.ts
create mode 100644 web/ee/src/state/observability/index.ts
create mode 100644 web/ee/src/state/url/focusDrawer.ts
create mode 100644 web/ee/tailwind.config.ts
create mode 100644 web/ee/tests/1-settings/api-keys-management.spec.ts
create mode 100644 web/ee/tests/1-settings/model-hub.spec.ts
create mode 100644 web/ee/tests/2-app/create.spec.ts
create mode 100644 web/ee/tests/3-playground/run-variant.spec.ts
create mode 100644 web/ee/tests/4-prompt-registry/prompt-registry-flow.spec.ts
create mode 100644 web/ee/tests/5-testsset/testset.spec.ts
create mode 100644 web/ee/tests/6-auto-evaluation/assets/README.md
create mode 100644 web/ee/tests/6-auto-evaluation/assets/types.ts
create mode 100644 web/ee/tests/6-auto-evaluation/index.ts
create mode 100644 web/ee/tests/6-auto-evaluation/run-auto-evaluation.spec.ts
create mode 100644 web/ee/tests/6-auto-evaluation/tests.ts
create mode 100644 web/ee/tests/7-observability/observability.spec.ts
create mode 100644 web/ee/tests/8-deployment/deploy-variant.spec.ts
create mode 100644 web/ee/tests/9-human-annotation/assets/types.ts
create mode 100644 web/ee/tests/9-human-annotation/human-annotation.spec.ts
create mode 100644 web/ee/tests/9-human-annotation/index.ts
create mode 100644 web/ee/tests/9-human-annotation/tests.ts
create mode 100644 web/ee/tsconfig.json
diff --git a/.gitignore b/.gitignore
index a7cf771599..0c4b3800f5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,13 +21,6 @@ myenv/
**/error-*.log
-
-# hosting/
-! hosting/docker-compose/oss/.env.oss.dev.example
-! hosting/docker-compose/oss/.env.oss.gh.example
-! hosting/docker-compose/ee/.env.ee.dev.example
-! hosting/docker-compose/ee/.env.ee.gh.example
-
# examples/
examples/**/config.toml
examples/**/agenta.py
diff --git a/.gitleaks.toml b/.gitleaks.toml
new file mode 100644
index 0000000000..d1991dc726
--- /dev/null
+++ b/.gitleaks.toml
@@ -0,0 +1,32 @@
+title = "Agenta Gitleaks Configuration"
+version = 2
+
+[extend]
+useDefault = true
+
+[allowlist]
+paths = [
+ # ---------------------------------------------------------------- PUBLIC DOCS
+ '''^website/docs/reference/api/.*\.mdx''',
+ '''^core/docs/docs/reference/api/.*\.mdx''',
+ '''^docs/docs/reference/api/.*\.mdx''',
+ '''^docs/.docusaurus/.*''',
+ # -------------------------------------------------------------- WEB ARTIFACTS
+ '''^.*/\.pnpm-store/.*''',
+ '''^.*/public/__env\.js$''',
+ '''^.*/\.next/.*''',
+ # -------------------------------------------------------------- ALL ENV FILES
+ '''^.*\.env.*$''',
+ # ----------------------------------------------------------------------------
+]
+regexes = [
+ # ------------------------------------------------------------ FALSE POSITIVES
+ '''is_completion=True''',
+ '''YOUR_API_KEY''',
+ '''_SECRET_KEY''',
+ # ----------------------------------------------------------------------------
+]
+
+# USEFUL GITLEAKS COMMANDS
+# gitleaks --config .gitleaks.toml --exit-code 1 --verbose git
+# gitleaks --config .gitleaks.toml --exit-code 1 --verbose detect --no-git
diff --git a/.gitleaksignore b/.gitleaksignore
new file mode 100644
index 0000000000..097b6b8358
--- /dev/null
+++ b/.gitleaksignore
@@ -0,0 +1,196 @@
+# LEGACY / REVOKED / BENIGN CREDENTIALS, FROM PAST COMMITS, FROM BEFORE CLEANUP
+docs/docusaurus.config.ts:generic-api-key:236
+api/oss/tests/manual/tracing/windowing.http:generic-api-key:3
+sdk/tests/legacy/baggage/config.toml:generic-api-key:4
+sdk/tests/legacy/debugging/simple-app/config.toml:generic-api-key:4
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/auth/admin.http:generic-api-key:3
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/annotations/crud.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/evaluators/crud.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/testsets/crud.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/02_span_id.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/03_parent_id.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/01_trace_id.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/07_end_time.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/06_start_time.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/05_span_name.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/04_span_kind.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/08_status_code.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/11_links.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/00_user_id.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/12_references.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/09_status_message.http:generic-api-key:2
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/filtering/10_attributes.http:generic-api-key:2
+854f1ca002740cd51252f81660701a3b6f9d6a8a:agenta-cli/debugging/simple-app/config.toml:generic-api-key:4
+51020488ce57a4b964c05cc0c41cecb4eb67692c:agenta-cli/debugging/simple-app/config.toml:generic-api-key:4
+6a4288ba3b4a2f95f24ed372bce7ac0679b5b868:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openinference.py:generic-api-key:12
+6a4288ba3b4a2f95f24ed372bce7ac0679b5b868:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openllmetery.py:generic-api-key:12
+6a4288ba3b4a2f95f24ed372bce7ac0679b5b868:agenta-cli/tests/observability_sdk/sanity_check/app_local.py:generic-api-key:8
+0f9c9ac3afcb8df950a743206715ab5ebe8808eb:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openllmetery.py:generic-api-key:12
+0f9c9ac3afcb8df950a743206715ab5ebe8808eb:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openinference.py:generic-api-key:12
+0f9c9ac3afcb8df950a743206715ab5ebe8808eb:agenta-cli/tests/observability_sdk/sanity_check/app_local.py:generic-api-key:8
+8cd7319eb87e87723a310555a820433f77ab01fd:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openinference.py:generic-api-key:11
+8cd7319eb87e87723a310555a820433f77ab01fd:agenta-cli/tests/observability_sdk/sanity_check/app_local.py:generic-api-key:7
+50c0e27be4960b5f06b5edbed6af912d79ea0f27:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain_openinference.py:generic-api-key:11
+50c0e27be4960b5f06b5edbed6af912d79ea0f27:agenta-cli/tests/observability_sdk/sanity_check/app_local.py:generic-api-key:7
+03b90aadcd58abd101454da5e3b925dde8e6cd43:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain.py:generic-api-key:11
+a1dbd3f3eafbe326a246a16fe70e02350cefdf2f:agenta-cli/tests/observability_sdk/integrations/langchain/simple_chain.py:generic-api-key:11
+86c2a2430e3ddbc544361343b7e9ea0152e53ab7:api/oss/tests/workflows/observability/test_otlp_ingestion.py:generic-api-key:21
+dc4370980d17ba1643a5c081670404f942ebfc57:agenta-cli/tests/management_sdk/manual_tests/apps_with_new_sdk/config.toml:generic-api-key:4
+850314eb630ca7fdcf756c7ffe36a6adad5cc845:agenta-cli/tests/management_sdk/manual_tests/apps_with_new_sdk/config.toml:generic-api-key:4
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/crud.http:generic-api-key:3
+3db7c34a8f206fb4a19e525ac5a964185d502c4a:api/oss/tests/manual/tracing/windowing.http:generic-api-key:3
+7ee494e8cdad4f54073be483e373e7a5bf273ea5:agenta-cli/tests/baggage/config.toml:generic-api-key:4
+a5e3197cd247c5468d8739ef9de811cd2a1cbc2f:agenta-cli/tests/baggage/config.toml:generic-api-key:4
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/kubernetes/oss/secret.yml:kubernetes-secret-yaml:2
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/kubernetes/oss/secret.yml:generic-api-key:12
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/helm/oss/values.yaml:generic-api-key:88
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/helm/oss/values.yaml:generic-api-key:89
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/kubernetes/oss/secret.yml:generic-api-key:8
+e84abaed2592e50d660d180d7fd373376b544f14:hosting/kubernetes/oss/secret.yml:generic-api-key:9
+81a2b05aa4624cfc39587e5384bf7c106e547449:.github/workflows/frontend-test.yml:openai-api-key:27
+4857d8f04896e27d707e2967bb361eb1a0b129db:.github/workflows/frontend-test.yml:openai-api-key:27
+8465021df57fca629f14c269d3f37d18d6fdcd11:services/completion-new-sdk-prompt/docker-compose.yml:openai-api-key:10
+406e68077c51da204b1f63f193a2defe6031c966:agenta-web/cypress.config.ts:openai-api-key:10
+450a435754557bfa1d3d3e372f4b47e4eb63f93e:agenta-web/cypress.config.ts:openai-api-key:10
+066e345ad9ba7318fc59b191cf33af2e81634aa8:agenta-web/cypress/support/commands/evaluations.ts:openai-api-key:106
+3533b30e483378a8ecb900c603a3c54ffc9cc390:agenta-web/cypress/support/commands/evaluations.ts:openai-api-key:106
+41e5d327e87083f55850c6933611cdc79ea9d204:agenta-backend/agenta_backend/tests/variants_evaluators_router/conftest.py:openai-api-key:25
+9968400e3095fdc1fb219f45c0d73db13c6de499:agenta-backend/agenta_backend/tests/variants_evaluators_router/conftest.py:openai-api-key:25
+a8efa140a02295ef6accbd02bc7c4c4eeb75e435:agenta-backend/agenta_backend/tests/variants_evaluators_router/conftest.py:openai-api-key:17
+d343b2a5b12387fc6b99d508b5e776f7689736c1:agenta-backend/agenta_backend/tests/variants_evaluators_router/conftest.py:openai-api-key:17
+5f37c440e203cf56d7f08a8efdd7ca372c646beb:docs/docs/prompt-management/05-adding-custom-providers.mdx:generic-api-key:81
+73644b725b5409be78d1aeecf7f5ff6a24ab3643:docs/docusaurus.config.ts:generic-api-key:220
+41c85fef68f9f8c2e4576956369ef600223193c8:website/docusaurus.config.ts:generic-api-key:184
+179d78e547e2eb92737cdd0ba7fd3eeb1f4bc5ce:website/docusaurus.config.ts:generic-api-key:184
+faf49eadbd38fd6771c4687fea78528ad73741b6:api/oss/tests/manual/annotations/crud.http:generic-api-key:2
+f86dddabb759924689022d2451d97efe218848c9:api/oss/tests/manual/evaluations/crud.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/auth/admin.http:generic-api-key:3
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/annotations/crud.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/evaluators/crud.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/testsets/crud.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/00_user_id.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/02_span_id.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/03_parent_id.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/01_trace_id.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/04_span_kind.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/06_start_time.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/08_status_code.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/05_span_name.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/07_end_time.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/09_status_message.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/12_references.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/11_links.http:generic-api-key:2
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/filtering/10_attributes.http:generic-api-key:2
+4888444e93a8438334a9dfb81c7979500d0ab4bf:api/oss/tests/manual/testsets/crud.http:generic-api-key:2
+5289a1c740cff9dec0047e7dc05902edbc471649:api/oss/tests/manual/tracing/filtering/12_references.http:generic-api-key:2
+96b9056a6ff1160f11dcc302321d4a29a7f1b8dd:api/oss/tests/manual/workflows/artifacts.http:generic-api-key:2
+6a4af8b70816f18ae69056df96b54c622e7ef494:api/oss/tests/manual/feedback/crud.http:generic-api-key:2
+7b0eeb2ae0cfa80e9a79cee814ed10c9b57ee9d3:api/oss/tests/manual/annotators/crud.http:generic-api-key:2
+876ba2f78358d43cc6dafe518e38ef404b6462f0:api/oss/tests/manual/annotations/crud.http:generic-api-key:2
+05f8741ea3349096e60a1686c1cef3585a6d34d7:api/oss/tests/manual/tracing/filtering/00_user_id.http:generic-api-key:2
+f47bb6f3b65c50664b354f33081d131289fa47cd:api/oss/tests/manual/tracing/filtering/11_links.http:generic-api-key:2
+18bfd66e6bc309ada998457a32b9a4ca689015a2:api/oss/tests/manual/tracing/filtering/10_attributes.http:generic-api-key:2
+0dcfe02574a545c8400b1b5385d7662143ec2544:api/oss/tests/manual/tracing/filtering/08_status_code.http:generic-api-key:2
+260822ac28ec5c08f9b4c2b04e895d46fcbfb164:api/oss/tests/manual/tracing/filtering/09_status_message.http:generic-api-key:2
+c16ca8eca0a2743c541457a80f88fe0ec71151cb:api/oss/tests/manual/tracing/filtering_parent_id.http:generic-api-key:2
+fe9a8b1c7518160bc3c9f80eff3ba1076a2a5030:api/oss/tests/manual/tracing/filtering_end_time.http:generic-api-key:2
+f158907f559e92fb91672c696715a90aef5470ab:api/oss/tests/manual/tracing/filtering_span_id.http:generic-api-key:2
+fe9a8b1c7518160bc3c9f80eff3ba1076a2a5030:api/oss/tests/manual/tracing/filtering_start_time.http:generic-api-key:2
+86dda27e6458ea8f7e64bfb4a9f63946c8fc82ce:api/oss/tests/manual/tracing/filtering_trace_id.http:generic-api-key:2
+7afc6f26080c6c37219995089aed409e50ef6279:api/oss/tests/manual/tracing/filtering_span_name.http:generic-api-key:2
+71a49c35758dab163bbbe700f55f6f50e6bdf9a5:api/oss/tests/manual/tracing/filtering_span_kind.http:generic-api-key:2
+b14e377b19cc4f77db9d0a2b51f72b88b6f54c6c:api/oss/tests/manual/auth/admin.http:generic-api-key:3
+0c6acff0523bd4e594e43caf63c4342e319476b8:hosting/kubernetes/oss/secret.yml:kubernetes-secret-yaml:2
+97c08e2f4ad87c2aacf6760da60eb01ec8d5d329:cloud/tests/conftest.py:generic-api-key:114
+6526d232893d18496af47a05c7b99e7c0c1fe510:docs/docs/prompt-management/05-adding-custom-providers.mdx:generic-api-key:71
+0c6acff0523bd4e594e43caf63c4342e319476b8:hosting/kubernetes/oss/secret.yml:generic-api-key:12
+bf7e1824839cea10432731174549faeb9bad3545:hosting/helm/oss/values.yaml:generic-api-key:83
+bf7e1824839cea10432731174549faeb9bad3545:hosting/helm/oss/values.yaml:generic-api-key:84
+0c6acff0523bd4e594e43caf63c4342e319476b8:hosting/kubernetes/oss/secret.yml:generic-api-key:8
+0c6acff0523bd4e594e43caf63c4342e319476b8:hosting/kubernetes/oss/secret.yml:generic-api-key:9
+fd477298c83aa220b01c6704058382c1ded1fdca:core/agenta-cli/debugging/simple-app/config.toml:generic-api-key:4
+fd477298c83aa220b01c6704058382c1ded1fdca:core/agenta-cli/tests/baggage/config.toml:generic-api-key:4
+f92a341a7e45fc051a08da1fa619137a192c89ae:api/ee/tests/manual/tracing.http:generic-api-key:5
+eba1ed50e6846a323d456b6da510f42d8c8bbe9a:api/ee/tests/manual/billing.http:generic-api-key:4
+9d741648f9ec1719c6f7f0fcb16cbf116458916c:api/oss/tests/manual/annotations/crud.http:generic-api-key:3
+9d741648f9ec1719c6f7f0fcb16cbf116458916c:api/oss/tests/manual/tracing/crud.http:generic-api-key:3
+273d2f5a1b37ef9420c4e40303b8fc6233362571:api/ee/tests/manual/billing.http:generic-api-key:4
+c078d4b1395ea2856891424f82e80f4fe60d7136:api/ee/tests/manual/billing.http:generic-api-key:4
+2793a4b2f065d7b588fa6733b74f68c0748473a5:api/oss/tests/manual/tracing/crud.http:generic-api-key:3
+ef6f83612a7cfd552147b49928feb8a5d4429c0d:api/oss/tests/manual/tracing/filtering.http:generic-api-key:3
+b14e377b19cc4f77db9d0a2b51f72b88b6f54c6c:api/oss/tests/manual/annotations/crud.http:generic-api-key:3
+b14e377b19cc4f77db9d0a2b51f72b88b6f54c6c:api/oss/tests/manual/tracing/crud.http:generic-api-key:3
+19ccc3f1f292edca26e840428ebc6224cbaef78a:api/ee/tests/manual/annotations/crud.http:generic-api-key:3
+bf42b5eaa7e805a249f52d65a6882d6ade2828f3:api/ee/tests/manual/tracing/windowing.http:generic-api-key:5
+bb0c1b3fb0032b6dbebe659d745d5cb90aa306ce:api/ee/tests/manual/tracing.http:generic-api-key:5
+16622c30916fae1b284b1b7150e4b7c57413ad17:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:16
+75ed5549eeb4685c5234c1ec577721920cc0ec9c:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:16
+4e743f16edcb3ff4e13b1400b9ff8175b072a5e1:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:16
+b587813ed56832b2df7fb7560775ee0b75f03674:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:12
+3abc3f4d2051c4df2f64c6d88608bd9bf1ae265f:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:9
+35442f703897393a3d2a5e9aa7a42985787bb24f:api/ee/tests/manual/evaluations/sdk/test.py:generic-api-key:16
+12f36507e801d41e2388889777c195557e7a6e5c:api/ee/tests/manual/evaluations/sdk/test_serve.py:generic-api-key:16
+12f36507e801d41e2388889777c195557e7a6e5c:api/ee/tests/manual/evaluations/sdk/test_handlers.py:generic-api-key:16
+e6d87a97aa4750ace564ac28eafda0123c21e017:api/oss/tests/workflows/observability/test_otlp_ingestion.py:generic-api-key:21
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docs/docusaurus.config.ts:generic-api-key:232
+c8d8f465b61764195de460164e6c27e0fe4b2b9a:docs/docs/self-host/05-advanced-configuration.mdx:generic-api-key:37
+a268b8a81a700704e28d82c5cb9af31dde32146b:ee/docker/docker-compose.demo.prod.yml:generic-api-key:25
+56829b2eccdec425954243d0ce5e4fcac9d05e9c:ee/docker/docker-compose.cloud.dev.yml:generic-api-key:25
+91678b6a27c326e0002205f79fd8999a7591e38f:ee/docker/docker-compose.demo.prod.yml:generic-api-key:25
+91678b6a27c326e0002205f79fd8999a7591e38f:ee/docker/docker-compose.demo.dev.yml:generic-api-key:25
+56829b2eccdec425954243d0ce5e4fcac9d05e9c:ee/docker/docker-compose.cloud.dev.yml:generic-api-key:22
+ad6b459dfc5ac1e5c140fcf3e03e247ba31383ae:ee/docker/docker-compose.demo.prod.yml:generic-api-key:22
+594f33f5b7eb665edb38208666d27d6de6365946:ee/docker/docker-compose.demo.prod.yml:generic-api-key:73
+594f33f5b7eb665edb38208666d27d6de6365946:ee/docker/docker-compose.demo.dev.yml:generic-api-key:22
+f6ef6aa32d569ee025bdb3ce9f515521a4095494:cloud/agenta-backend/agenta_backend/cloud/__init__.py:generic-api-key:155
+b3e5fae0e270f2c92a65360123c980d725c5f226:ee/agenta-backend/agenta_backend/ee/__init__.py:generic-api-key:107
+c98a5da1a33d2c0986e3c66329eaa5237fbccf3d:hosting/docker-compose/ee/aws/docker-compose.oss.prod.yml:generic-api-key:76
+4ee55c08b2fed661eaf90876f96c329d7c7eeb6b:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:46
+169c54d84f5d1931601550a9d3aa76874ef73ec5:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:47
+f725cfc9247743bdc44f84edee109ff36193d741:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:46
+cbd8ac00ecdc2c8124b989a274c6f835c09f8474:cloud/docker/docker-compose.oss.prod.yml:generic-api-key:46
+2fb6b0f94f8bf711255e0901c03787b73f3d650f:cloud/docker/docker-compose.oss.prod.yml:generic-api-key:46
+a5e7781869b2a4bf22dd5e22fd5e5ae2ec8d02ea:cloud/docker/newrelic-infra.yml:generic-api-key:1
+2fb6b0f94f8bf711255e0901c03787b73f3d650f:cloud/docker/docker-compose.oss.prod.yml:generic-api-key:171
+f6ef6aa32d569ee025bdb3ce9f515521a4095494:cloud/agenta-backend/agenta_backend/cloud/__init__.py:generic-api-key:144
+b3e5fae0e270f2c92a65360123c980d725c5f226:ee/agenta-backend/agenta_backend/ee/__init__.py:generic-api-key:96
+a268b8a81a700704e28d82c5cb9af31dde32146b:ee/docker/docker-compose.demo.prod.yml:generic-api-key:22
+9c55f5572904ae07b73f73ee365e833d0637633a:ee/docker/docker-compose.demo.prod.yml:generic-api-key:22
+ad74134f522cde71f860cb59b6363a8fdf0a64c6:ee/setup_agenta_web.sh:generic-api-key:26
+5a10aacebd0ed4f2e613eb9176e95836aea34f15:ee/setup_agenta_web.sh:generic-api-key:26
+637068dd09eff7b30b776061863027a9b9aa1deb:ee/setup_agenta_web.sh:generic-api-key:26
+590578c803d94d8ccb1a6ca977471f3d44b43fc3:hosting/helm/oss/templates/config/app-configmap.yaml:generic-api-key:45
+1d8f08b267675726441fcaaae24572bb635c5eac:api/oss/src/utils/env.py:generic-api-key:53
+55f27e52327062382beb299b162f94895268d766:web/oss/public/__ENV.js:generic-api-key:1
+c98a5da1a33d2c0986e3c66329eaa5237fbccf3d:hosting/docker-compose/ee/aws/docker-compose.oss.prod.yml:generic-api-key:73
+bf0cd42bffc2581b1df6f56fa6e4b20ff9b68c33:hosting/docker-compose/ee/aws/docker-compose.oss.aws.yml:generic-api-key:61
+52cd40cefd3121eea2e21205e8208712b093529a:core/hosting/docker-compose/ee/docker-compose.dev.yml:generic-api-key:18
+6efb8a0c9620a316cf81fe961b1407e93aa2efa7:core/hosting/docker-compose/oss/docker-compose.gh.yml:generic-api-key:17
+6efb8a0c9620a316cf81fe961b1407e93aa2efa7:core/hosting/docker-compose/oss/docker-compose.dev.yml:generic-api-key:17
+44d1669c1a53b3ca47e3689dda5500e6f742f525:core/hosting/docker-compose/ee/docker-compose.dev.yml:generic-api-key:18
+58a4230a8f2e63b2836f81bcf2341ba12003189e:core/hosting/docker-compose/oss/docker-compose.yml:generic-api-key:30
+fd477298c83aa220b01c6704058382c1ded1fdca:core/agenta-cli/agenta/cli/helper.py:generic-api-key:19
+fd477298c83aa220b01c6704058382c1ded1fdca:core/agenta-web/prod.gh.Dockerfile:generic-api-key:7
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docker-compose.gh.yml:generic-api-key:26
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docker-compose.gh.yml:generic-api-key:95
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docker-compose.yml:generic-api-key:30
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docker-compose.yml:generic-api-key:111
+fd477298c83aa220b01c6704058382c1ded1fdca:core/docker-compose.prod.yml:generic-api-key:102
+4ee55c08b2fed661eaf90876f96c329d7c7eeb6b:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:43
+169c54d84f5d1931601550a9d3aa76874ef73ec5:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:44
+9e7831e7500364776cb3e9eac41448907ef92dcd:cloud/docker/docker-compose.test.yml:generic-api-key:28
+9e7831e7500364776cb3e9eac41448907ef92dcd:cloud/docker/docker-compose.test.yml:generic-api-key:83
+fe72ad1a8d14e1f3bce547ef224fc75e3df8f4ff:cloud/docker/docker-compose.cloud.test.yml:generic-api-key:28
+fe72ad1a8d14e1f3bce547ef224fc75e3df8f4ff:cloud/docker/docker-compose.cloud.test.yml:generic-api-key:83
+6198bbc532d8e984ef94276b58a7ab8dc65a279f:cloud/docker/docker-compose.oss.stage.yml:generic-api-key:43
+4b4cdbdf4b8ad4a9342fdb939b7e30f88420fccd:cloud/docker/docker-compose.oss.prod.yml:generic-api-key:44
+a268b8a81a700704e28d82c5cb9af31dde32146b:ee/docker/docker-compose.demo.prod.yml:sendgrid-api-token:26
+11ad273f1039e9263cf8d2f61338a121d59b9cc7:ee/docker/docker-compose.cloud.prod.yml:sendgrid-api-token:22
+11ad273f1039e9263cf8d2f61338a121d59b9cc7:ee/docker/docker-compose.cloud.dev.yml:sendgrid-api-token:21
+11ad273f1039e9263cf8d2f61338a121d59b9cc7:ee/docker/docker-compose.demo.prod.yml:sendgrid-api-token:26
+11ad273f1039e9263cf8d2f61338a121d59b9cc7:ee/docker/docker-compose.demo.dev.yml:sendgrid-api-token:26
+b7bc21c67bbae3c06c372bc585c4917a80613a14:cloud/agenta-backend/agenta_backend/cloud/routers/payment_router.py:stripe-access-token:13
+d8b4af2ae8c1084dbdd30fca59aa84e8ece047db:examples/python/annotation-example.py:openai-api-key:19
+a268b8a81a700704e28d82c5cb9af31dde32146b:ee/docker/docker-compose.demo.prod.yml:openai-api-key:24
+02d9f665aed89e8d69e06acdc7d01d699ee5b0dd:ee/docker/docker-compose.demo.prod.yml:openai-api-key:24
+c8d8f465b61764195de460164e6c27e0fe4b2b9a:docs/docs/self-host/05-advanced-configuration.mdx:generic-api-key:46
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8363b88652..f498cd623a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,6 +3,12 @@ repos:
hooks:
- id: gitleaks-pre-commit
name: gitleaks git (staged only)
- entry: echo "Aloha"
+ entry: bash -c 'gitleaks --config .gitleaks.toml --exit-code 1 --verbose git --staged'
language: system
pass_filenames: false
+ - id: gitleaks-pre-push
+ name: gitleaks git (pre-push, scan diff)
+ entry: bash -c 'gitleaks --config .gitleaks.toml --exit-code 1 --verbose git --log-opts "$(git merge-base HEAD "origin/$(git rev-parse --abbrev-ref HEAD)" 2>/dev/null || git merge-base HEAD origin/main)..HEAD"'
+ language: system
+ stages: [pre-push]
+ pass_filenames: false
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5cfd9357c1..5810643c7c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -40,4 +40,9 @@ We had many zombie issues and PRs (assigned but inactive) in the past. We want t
- An issue may only be assigned to one person for up to one week (three days for very simple issues). If the issue remains unsolved after a week, it will be unassigned and made available to others.
- Any pull request (PR) left inactive by the author for over a week will be closed. The author can reopen it if they wish to continue.
-We look forward to seeing your contributions to Agenta!
\ No newline at end of file
+We look forward to seeing your contributions to Agenta!
+
+## Contributor License Agreement
+If you want to contribute, we need you to sign a Contributor License Agreement. We need this to avoid potential intellectual property problems in the future. You can sign the agreement by clicking a button. Here is how it works:
+
+After you open a PR, a bot will automatically comment asking you to sign the agreement. Click on the link in the comment, login with your Github account, and sign the agreement.
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
index 79b3725428..1fff9c4444 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,16 @@
-The MIT License
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta”
+
+Portions of this software are licensed as follows:
-Copyright (c) Agentatech UG (haftungsbeschränkt)
+- All content that resides under any "ee/" directory of this repository, if
+such directories exist, are licensed under the license defined in "ee/LICENSE".
+- All third party components incorporated into the Agenta Software are licensed
+under the original license provided by the owner of the applicable component.
+- Content outside of the above mentioned directories or restrictions above is
+available under the "MIT Expat" license as defined below.
+
+The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 082ea25f29..6f9ef58382 100644
--- a/README.md
+++ b/README.md
@@ -266,4 +266,4 @@ This project follows the [all-contributors](https://github.com/all-contributors/
## Disabling Anonymized Tracking
-By default, Agenta automatically reports anonymized basic usage statistics. This helps us understand how Agenta is used and track its overall usage and growth. This data does not include any sensitive information. To disable anonymized telemetry set `AGENTA_TELEMETRY_ENABLED` to `false` in your `.env` file.
\ No newline at end of file
+By default, Agenta automatically reports anonymized basic usage statistics. This helps us understand how Agenta is used and track its overall usage and growth. This data does not include any sensitive information. To disable anonymized telemetry set `AGENTA_TELEMETRY_ENABLED` to `false` in your `.env` file.
diff --git a/SECURITY.md b/SECURITY.md
index f45e7a7624..fabf40c910 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -1,19 +1,76 @@
# Security Policy
+
## Reporting a Vulnerability
If you believe you have found a security vulnerability in any Agenta repository, please report it to us through coordinated disclosure.
-Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.
+**Do not** report security vulnerabilities via public GitHub issues, pull requests, or discussions.
+
+Instead, please send an email to **security@agenta.ai**.
+
+---
+
+## Information to Include
+
+Please include as much of the following as you can to help us reproduce and resolve the issue:
+
+- Type of issue (e.g., buffer overflow, SQL injection, cross-site scripting).
+- Full paths of source files related to the issue.
+- The location of the affected source code (tag, branch, commit SHA, or direct URL).
+- Any special configuration or environment required to reproduce.
+- Step-by-step instructions to reproduce.
+- Proof-of-concept or exploit code (if possible).
+- Expected vs actual behaviour and potential impact.
+- Your contact details and disclosure timeline preference.
+
+---
+
+## Our Process
+
+- **Acknowledgement**: We will acknowledge receipt within **3 business days**.
+- **Triage**: We aim to complete an initial triage within **7 calendar days** and will share severity and next steps.
+- **Remediation & Disclosure**: For critical vulnerabilities we aim to release a fix or mitigation within **30 days**. For other issues, typically within **90 days**. We will coordinate any public disclosure with you.
+- We will provide status updates as needed during remediation.
+
+---
+
+## Safe Harbor
+
+We respect and protect good-faith security research. If you follow this policy:
+
+- We will not initiate legal action against you for good-faith testing conducted as part of coordinated disclosure.
+- Do not access, modify, or exfiltrate data beyond what is necessary to demonstrate the issue.
+- Do not disrupt production services or attempt destructive actions.
+
+---
+
+## Scope Exclusions
+
+The following are **out of scope**:
+
+- Third-party services not operated by Agenta.
+- Physical security attacks or social engineering of personnel.
+- Low-risk informational issues without security impact (e.g., generic version banners).
+- Denial-of-service attacks (**we will not accept DoS testing against production**).
+
+---
+
+## Recognition & Credits
+
+If you report a valid vulnerability and want public recognition, tell us how you wish to be credited (full name, handle, company, or anonymous). Recognition is discretionary and will be coordinated with you.
+
+---
+
+## Emergency / Out-of-band
+
+If email is unavailable and you need an immediate or urgent channel, contact our general line: **team@agenta.ai** (monitored during business hours). For truly critical emergencies, include “EMERGENCY / SECURITY” in the subject line of your email.
-Instead, please send an email to team@agenta.ai.
+---
-Please include as much of the information listed below as you can to help us better understand and resolve the issue:
+## Contact retention & privacy
- The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting)
- Full paths of source file(s) related to the manifestation of the issue
- The location of the affected source code (tag/branch/commit or direct URL)
- Any special configuration required to reproduce the issue
- Step-by-step instructions to reproduce the issue
- Proof-of-concept or exploit code (if possible)
- Impact of the issue, including how an attacker might exploit the issue
+- Report metadata will be retained for incident tracking and compliance.
+- Personal data you provide will be handled according to our privacy policy.
+- We will only share reporter data internally on a need-to-know basis.
+---
diff --git a/api/ee/LICENSE b/api/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/api/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/hosting/gcp/credentials.json b/api/ee/__init__.py
similarity index 100%
rename from hosting/gcp/credentials.json
rename to api/ee/__init__.py
diff --git a/api/ee/databases/__init__.py b/api/ee/databases/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/databases/postgres/init-db-ee.sql b/api/ee/databases/postgres/init-db-ee.sql
new file mode 100644
index 0000000000..e949c33926
--- /dev/null
+++ b/api/ee/databases/postgres/init-db-ee.sql
@@ -0,0 +1,39 @@
+-- Ensure we are connected to the default postgres database before creating new databases
+\c postgres
+
+-- Create the 'username' role with a password if it doesn't exist
+SELECT 'CREATE ROLE username WITH LOGIN PASSWORD ''password'''
+WHERE NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'username')\gexec
+
+-- Create the 'agenta_ee_core' database if it doesn't exist
+SELECT 'CREATE DATABASE agenta_ee_core'
+WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'agenta_ee_core')\gexec
+
+-- Create the 'agenta_ee_tracing' database if it doesn't exist
+SELECT 'CREATE DATABASE agenta_ee_tracing'
+WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'agenta_ee_tracing')\gexec
+
+-- Create the 'agenta_ee_supertokens' database if it doesn't exist
+SELECT 'CREATE DATABASE agenta_ee_supertokens'
+WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'agenta_ee_supertokens')\gexec
+
+-- Grant necessary permissions to 'username' for both databases
+GRANT ALL PRIVILEGES ON DATABASE agenta_ee_core TO username;
+GRANT ALL PRIVILEGES ON DATABASE agenta_ee_tracing TO username;
+GRANT ALL PRIVILEGES ON DATABASE agenta_ee_supertokens TO username;
+
+
+-- Switch to 'agenta_ee_core' and grant schema permissions
+\c agenta_ee_core
+GRANT ALL ON SCHEMA public TO username;
+
+-- Switch to 'agenta_ee_tracing' and grant schema permissions
+\c agenta_ee_tracing
+GRANT ALL ON SCHEMA public TO username;
+
+-- Switch to 'agenta_ee_supertokens' and grant schema permissions
+\c agenta_ee_supertokens
+GRANT ALL ON SCHEMA public TO username;
+
+-- Return to postgres
+\c postgres
diff --git a/api/ee/databases/postgres/migrations/__init__.py b/api/ee/databases/postgres/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/databases/postgres/migrations/core/README.md b/api/ee/databases/postgres/migrations/core/README.md
new file mode 100644
index 0000000000..8d8552e3c3
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/README.md
@@ -0,0 +1,35 @@
+# Migrations with Alembic
+
+Generic single-database configuration with an async dbapi.
+
+## Autogenerate Migrations
+
+One of Alembic's key features is its ability to auto-generate migration scripts. By analyzing the current database state and comparing it with the application's table metadata, Alembic can automatically generate the necessary migration scripts using the `--autogenerate` flag in the alembic revision command.
+
+Note that autogenerate sometimes does not detect all database changes and it is always necessary to manually review (and correct if needed) the candidate migrations that autogenerate produces.
+
+### Making migrations
+
+To make migrations after creating a new table schema or modifying a current column in a table, run the following commands:
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini revision --autogenerate -m "migration message"
+```
+
+The above command will create a script that contains the changes that was made to the database schema. Kindly update "migration message" with a message that is clear to indicate what change was made. Here are some examples:
+
+- added username column in users table
+- renamed template_uri to template_repository_uri
+- etc
+
+### Applying Migrations
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini upgrade head
+```
+
+The above command will be used to apply the changes in the script created to the database table(s). If you'd like to revert the migration, run the following command:
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini downgrade head
+```
diff --git a/api/ee/databases/postgres/migrations/core/alembic.ini b/api/ee/databases/postgres/migrations/core/alembic.ini
new file mode 100644
index 0000000000..1888be8152
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/alembic.ini
@@ -0,0 +1,112 @@
+# A generic, single database configuration.
+
+[alembic]
+script_location = /app/ee/databases/postgres/migrations/core
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to postgres/versions. When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:postgres/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts. See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/api_keys.py b/api/ee/databases/postgres/migrations/core/data_migrations/api_keys.py
new file mode 100644
index 0000000000..769b6b8157
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/api_keys.py
@@ -0,0 +1,282 @@
+import uuid
+import traceback
+from typing import Optional
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import Connection, update, func, or_, insert, delete
+
+from oss.src.models.db_models import APIKeyDB
+from ee.src.models.db_models import ProjectDB
+from ee.src.models.extended.deprecated_models import DeprecatedAPIKeyDB
+
+
+BATCH_SIZE = 200
+
+
+def get_project_id_from_workspace_id(
+ session: Connection, workspace_id: str
+) -> Optional[str]:
+ statement = select(ProjectDB).filter_by(
+ workspace_id=uuid.UUID(workspace_id), is_default=True
+ )
+ project = session.execute(statement).fetchone()
+ return str(project.id) if project is not None else None
+
+
+def get_workspace_id_from_project_id(
+ session: Connection, project_id: str
+) -> Optional[str]:
+ statement = select(ProjectDB).filter_by(id=uuid.UUID(project_id))
+ project = session.execute(statement).fetchone()
+ return str(project.workspace_id) if project is not None else None
+
+
+def update_api_key_to_make_use_of_project_id(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS = 0
+
+ # Count total rows with user_id & workspace_id isnot NULL and project_id is NULL
+ stmt = (
+ select(func.count())
+ .select_from(DeprecatedAPIKeyDB)
+ .filter(
+ DeprecatedAPIKeyDB.user_id.isnot(None),
+ DeprecatedAPIKeyDB.workspace_id.isnot(None),
+ DeprecatedAPIKeyDB.project_id.is_(None),
+ )
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_API_KEYS_WITH_USER_AND_WORKSPACE_ID = result if result is not None else 0
+ print(
+ f"Total rows in api_keys table with user_id and workspace_id not been NULL is {TOTAL_API_KEYS_WITH_USER_AND_WORKSPACE_ID}"
+ )
+
+ while True:
+ # Fetch a batch of api_keys with user_id & workspace_id not been NULL
+ records = session.execute(
+ select(DeprecatedAPIKeyDB)
+ .filter(
+ or_(
+ DeprecatedAPIKeyDB.user_id.isnot(None),
+ DeprecatedAPIKeyDB.user_id != "None",
+ ),
+ or_(
+ DeprecatedAPIKeyDB.workspace_id != "None",
+ DeprecatedAPIKeyDB.workspace_id.isnot(None),
+ ),
+ DeprecatedAPIKeyDB.project_id.is_(None),
+ )
+ .offset(offset)
+ .limit(BATCH_SIZE)
+ ).fetchall()
+ batch_migrated = len(records)
+ if not records:
+ break
+
+ # Process and update records in the batch
+ for record in records:
+ print(
+ "Record (has workspace_id?, workspace id, user id, id, types [workspace_id & user_id]) --- ",
+ hasattr(record, "workspace_id"),
+ record.workspace_id,
+ record.user_id,
+ record.id,
+ type(record.workspace_id),
+ type(record.user_id),
+ )
+ if (
+ hasattr(record, "workspace_id")
+ and record.workspace_id
+ not in [
+ "None",
+ "",
+ ]
+ and record.user_id not in ["None", ""]
+ ):
+ project_id = get_project_id_from_workspace_id(
+ session=session, workspace_id=str(record.workspace_id)
+ )
+ if project_id is None:
+ SKIPPED_RECORDS += 1
+ print(
+ f"Could not retrieve project_id from workspace_id for APIKey with ID {str(record.id)}."
+ )
+
+ batch_migrated -= 1
+ print(
+ "Subtracting record from part of batch. Now, Skipping record..."
+ )
+ continue
+
+ # Add the new object to the session.
+ insert_statement = insert(APIKeyDB).values(
+ prefix=record.prefix,
+ hashed_key=record.hashed_key,
+ created_by_id=uuid.UUID(record.user_id),
+ project_id=uuid.UUID(project_id),
+ rate_limit=record.rate_limit,
+ hidden=record.hidden,
+ expiration_date=record.expiration_date,
+ created_at=record.created_at,
+ updated_at=record.updated_at,
+ )
+ session.execute(insert_statement)
+ else:
+ SKIPPED_RECORDS += 1
+ print(
+ f"No workspace_id found for APIKey with ID {str(record.id)}. Skipping record..."
+ )
+
+ batch_migrated -= 1
+ print(
+ "Subtracting record from part of batch. Now, Skipping record..."
+ )
+ continue
+
+ # Update migration progress tracking
+ TOTAL_MIGRATED += batch_migrated
+ offset += BATCH_SIZE
+ remaining_records = (
+ TOTAL_API_KEYS_WITH_USER_AND_WORKSPACE_ID - TOTAL_MIGRATED
+ )
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}.",
+ fg="yellow",
+ )
+ )
+
+ # Break if all records have been processed
+ if remaining_records <= 0:
+ break
+
+ # Count total rows with user_id and/or workspace_id been NULL
+ stmt = (
+ select(func.count())
+ .select_from(DeprecatedAPIKeyDB)
+ .filter(DeprecatedAPIKeyDB.project_id.is_(None))
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_API_KEYS_WITH_NO_USER_AND_WORKSPACE_ID = (
+ result if result is not None else 0
+ )
+ if TOTAL_API_KEYS_WITH_NO_USER_AND_WORKSPACE_ID >= 1:
+ session.execute(
+ delete(DeprecatedAPIKeyDB).where(
+ DeprecatedAPIKeyDB.project_id.is_(None)
+ )
+ )
+
+ print(
+ f"Total rows in api_keys table with user_id and workspace_id been NULL is {TOTAL_API_KEYS_WITH_NO_USER_AND_WORKSPACE_ID} and have been deleted."
+ )
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"ERROR updating api_keys to make use of project_id: {traceback.format_exc()}",
+ fg="red",
+ )
+ )
+ raise e
+
+
+def revert_api_key_to_make_use_of_workspace_id(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS = 0
+
+ # Count total rows with created_by_id & project_id isnot NULL
+ stmt = (
+ select(func.count())
+ .select_from(DeprecatedAPIKeyDB)
+ .filter(
+ DeprecatedAPIKeyDB.created_by_id.isnot(None),
+ DeprecatedAPIKeyDB.project_id.isnot(None),
+ DeprecatedAPIKeyDB.workspace_id.is_(None),
+ )
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_API_KEYS_WITH_USER_AND_PROJECT_ID = result if result is not None else 0
+ print(
+ f"Total rows in api_keys table with created_by_id and project_id not been NULL is {TOTAL_API_KEYS_WITH_USER_AND_PROJECT_ID}"
+ )
+
+ while True:
+ # Fetch a batch of api_keys with created_by_id & project_id isnot NULL
+ records = session.execute(
+ select(DeprecatedAPIKeyDB)
+ .filter(
+ DeprecatedAPIKeyDB.created_by_id.isnot(None),
+ DeprecatedAPIKeyDB.project_id.isnot(None),
+ DeprecatedAPIKeyDB.workspace_id.is_(None),
+ )
+ .offset(offset)
+ .limit(BATCH_SIZE)
+ ).fetchall()
+
+ if not records or len(records) <= 0:
+ break # Exit if no more records to process
+
+ # Process and update records in the batch
+ for record in records:
+ workspace_id = get_workspace_id_from_project_id(
+ session=session, project_id=str(record.project_id)
+ )
+ if workspace_id is None:
+ SKIPPED_RECORDS += 1
+ print(
+ f"Could not retrieve workspace_id from project_id for APIKey with ID {str(record.id)}. Skipping record..."
+ )
+ continue
+
+ session.execute(
+ update(DeprecatedAPIKeyDB)
+ .where(DeprecatedAPIKeyDB.id == record.id)
+ .values(
+ user_id=str(record.created_by_id),
+ workspace_id=workspace_id,
+ )
+ )
+
+ # Update migration progress tracking
+ batch_migrated = len(records)
+ TOTAL_MIGRATED += batch_migrated
+ offset += BATCH_SIZE
+ remaining_records = TOTAL_API_KEYS_WITH_USER_AND_PROJECT_ID - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}.",
+ fg="yellow",
+ )
+ )
+
+ # Count total rows with created_by_id and/or project_id been NULL
+ stmt = (
+ select(func.count())
+ .select_from(DeprecatedAPIKeyDB)
+ .filter(
+ or_(
+ DeprecatedAPIKeyDB.created_by_id.is_(None),
+ DeprecatedAPIKeyDB.project_id.is_(None),
+ ),
+ )
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_API_KEYS_WITH_NO_USER_AND_PROJECT_ID = result if result is not None else 0
+ print(
+ f"Total rows in api_keys table with created_by_id and project_id been NULL is {TOTAL_API_KEYS_WITH_NO_USER_AND_PROJECT_ID}"
+ )
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"ERROR reverting api_keys to make use of workspace_id: {traceback.format_exc()}",
+ fg="red",
+ )
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/applications.py b/api/ee/databases/postgres/migrations/core/data_migrations/applications.py
new file mode 100644
index 0000000000..95353642ec
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/applications.py
@@ -0,0 +1,124 @@
+import uuid
+import traceback
+from typing import Optional
+
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import delete, Connection, update, func
+
+from oss.src.models.deprecated_models import ( # type: ignore
+ DeprecatedEvaluatorConfigDBwApp as DeprecatedEvaluatorConfigDB,
+ DeprecatedAppDB,
+)
+
+
+BATCH_SIZE = 200
+
+
+def get_app_db(session: Connection, app_id: str) -> Optional[DeprecatedAppDB]:
+ query = session.execute(select(DeprecatedAppDB).filter_by(id=uuid.UUID(app_id)))
+ return query.fetchone() # type: ignore
+
+
+def update_evaluators_with_app_name(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS = 0
+
+ # Count total rows with a non-null app_id
+ total_query = (
+ select(func.count())
+ .select_from(DeprecatedEvaluatorConfigDB)
+ .filter(DeprecatedEvaluatorConfigDB.app_id.isnot(None))
+ )
+ result = session.execute(total_query).scalar()
+ TOTAL_EVALUATOR_CONFIGS = result if result is not None else 0
+ print(
+ f"Total rows in evaluator_configs table with app_id: {TOTAL_EVALUATOR_CONFIGS}"
+ )
+
+ while True:
+ # Fetch a batch of evaluator_configs with non-null app_id
+ records = session.execute(
+ select(DeprecatedEvaluatorConfigDB)
+ .filter(DeprecatedEvaluatorConfigDB.app_id.isnot(None))
+ .offset(offset)
+ .limit(BATCH_SIZE)
+ ).fetchall()
+ if not records:
+ break
+
+ # Process and update records in the batch
+ for record in records:
+ if hasattr(record, "app_id") and record.app_id is not None:
+ evaluator_config_app = get_app_db(
+ session=session, app_id=str(record.app_id)
+ )
+ if evaluator_config_app is not None:
+ # Update the name with the app_name as a prefix
+ new_name = f"{record.name} ({evaluator_config_app.app_name})"
+ session.execute(
+ update(DeprecatedEvaluatorConfigDB)
+ .where(DeprecatedEvaluatorConfigDB.id == record.id)
+ .values(name=new_name)
+ )
+ else:
+ print(
+ f"Skipping... No application found for evaluator_config {str(record.id)}."
+ )
+ SKIPPED_RECORDS += 1
+ else:
+ print(
+ f"Skipping... evaluator_config {str(record.id)} have app_id that is NULL."
+ )
+ SKIPPED_RECORDS += 1
+
+ session.commit()
+
+ # Update progress tracking
+ batch_migrated = len(records)
+ TOTAL_MIGRATED += batch_migrated
+ offset += BATCH_SIZE
+ remaining_records = TOTAL_EVALUATOR_CONFIGS - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Break if all records have been processed
+ if remaining_records <= 0:
+ break
+
+ # Delete deprecated evaluator configs with app_id as None
+ stmt = (
+ select(func.count())
+ .select_from(DeprecatedEvaluatorConfigDB)
+ .filter(DeprecatedEvaluatorConfigDB.app_id.is_(None))
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_EVALUATOR_CONFIGS_WITH_NO_APPID = result if result is not None else 0
+ print(
+ f"Total rows in evaluator_configs table with no app_id: {TOTAL_EVALUATOR_CONFIGS_WITH_NO_APPID}. Deleting these rows..."
+ )
+
+ session.execute(
+ delete(DeprecatedEvaluatorConfigDB).where(
+ DeprecatedEvaluatorConfigDB.app_id.is_(None)
+ )
+ )
+ session.commit()
+ print("Successfully deleted rows in evaluator_configs with no app_id.")
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"ERROR updating evaluator config names: {traceback.format_exc()}",
+ fg="red",
+ )
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/demos.py b/api/ee/databases/postgres/migrations/core/data_migrations/demos.py
new file mode 100644
index 0000000000..06e2403fd2
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/demos.py
@@ -0,0 +1,576 @@
+from os import getenv
+from uuid import UUID
+from json import loads
+from functools import wraps
+from traceback import format_exc
+from typing import List, Optional
+
+from click import echo, style
+from pydantic import BaseModel
+
+
+from sqlalchemy import Connection, delete, insert
+from sqlalchemy.future import select
+
+from oss.src.models.db_models import UserDB
+from ee.src.models.db_models import (
+ ProjectDB,
+ OrganizationMemberDB,
+ WorkspaceMemberDB,
+ ProjectMemberDB,
+)
+
+
+BATCH_SIZE = 100
+DEMOS = "AGENTA_DEMOS"
+DEMO_ROLE = "viewer"
+OWNER_ROLE = "owner"
+
+
+class Demo(BaseModel):
+ organization_id: UUID
+ workspace_id: UUID
+ project_id: UUID
+
+
+class User(BaseModel):
+ user_id: UUID
+
+
+class Member(BaseModel):
+ user_id: UUID
+
+ organization_id: Optional[UUID] = None
+ workspace_id: Optional[UUID] = None
+ project_id: Optional[UUID] = None
+
+ role: Optional[str] = None
+
+
+def with_rollback():
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception as exc:
+ session = kwargs.get("session")
+
+ session.rollback()
+
+ log_error(format_exc())
+
+ raise exc
+
+ return wrapper
+
+ return decorator
+
+
+def log_info(message) -> None:
+ echo(style(f"{message}", fg="green"), color=True)
+
+
+def log_error(message) -> None:
+ echo(style(f"ERROR: {message}", fg="red"), color=True)
+
+
+def fetch_project(
+ session: Connection,
+ project_id: UUID,
+) -> ProjectDB:
+ result = session.execute(
+ select(
+ ProjectDB.id,
+ ProjectDB.workspace_id,
+ ProjectDB.organization_id,
+ ).where(
+ ProjectDB.id == project_id,
+ )
+ ).first()
+
+ project = ProjectDB(
+ id=result.id,
+ workspace_id=result.workspace_id,
+ organization_id=result.organization_id,
+ )
+
+ return project
+
+
+def list_all_demos(session: Connection) -> List[Demo]:
+ demos = []
+
+ try:
+ demo_project_ids = loads(getenv(DEMOS) or "[]")
+
+ for project_id in demo_project_ids:
+ project = fetch_project(
+ session,
+ project_id,
+ )
+
+ try:
+ demos.append(
+ Demo(
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=project_id,
+ )
+ )
+
+ except: # pylint: disable=bare-except
+ pass
+
+ except: # pylint: disable=bare-except
+ pass
+
+ return demos
+
+
+def list_all_users(
+ session: Connection,
+) -> List[User]:
+ user_ids = session.execute(select(UserDB.id)).scalars().all()
+
+ all_users = [User(user_id=user_id) for user_id in user_ids]
+
+ return all_users
+
+
+def fetch_organization_members(
+ session: Connection,
+ organization_id: UUID,
+) -> List[Member]:
+ result = session.execute(
+ select(
+ OrganizationMemberDB.user_id,
+ OrganizationMemberDB.organization_id,
+ ).where(
+ OrganizationMemberDB.organization_id == organization_id,
+ )
+ ).all()
+
+ organization_members = [
+ Member(
+ user_id=row.user_id,
+ organization_id=row.organization_id,
+ )
+ for row in result
+ ]
+
+ return organization_members
+
+
+def get_new_organization_members(
+ users: List[User],
+ members: List[Member],
+) -> List[Member]:
+ user_ids = {user.user_id for user in users}
+ member_user_ids = {member.user_id for member in members}
+
+ new_user_ids = user_ids - member_user_ids
+
+ new_members = [Member(user_id=user_id) for user_id in new_user_ids]
+
+ return new_members
+
+
+def add_new_members_to_organization(
+ session: Connection,
+ organization_id: UUID,
+ new_members: List[Member],
+) -> None:
+ for i in range(0, len(new_members), BATCH_SIZE):
+ batch = new_members[i : i + BATCH_SIZE]
+
+ values = [
+ {
+ "user_id": member.user_id,
+ "organization_id": organization_id,
+ }
+ for member in batch
+ ]
+
+ session.execute(insert(OrganizationMemberDB).values(values))
+
+
+def remove_all_members_from_organization(
+ session: Connection,
+ organization_id: UUID,
+) -> None:
+ session.execute(
+ delete(OrganizationMemberDB).where(
+ OrganizationMemberDB.organization_id == organization_id,
+ )
+ )
+
+
+def fetch_workspace_members(
+ session: Connection,
+ workspace_id: UUID,
+) -> List[Member]:
+ result = session.execute(
+ select(
+ WorkspaceMemberDB.user_id,
+ WorkspaceMemberDB.workspace_id,
+ WorkspaceMemberDB.role,
+ ).where(
+ WorkspaceMemberDB.workspace_id == workspace_id,
+ )
+ ).all()
+
+ members = [
+ Member(
+ user_id=row.user_id,
+ workspace_id=row.workspace_id,
+ role=row.role,
+ )
+ for row in result
+ ]
+
+ return members
+
+
+def get_faulty_workspace_members(
+ members: List[Member],
+) -> List[Member]:
+ member_user_ids = {
+ member.user_id
+ for member in members
+ if member.role not in [DEMO_ROLE, OWNER_ROLE]
+ }
+
+ new_members = [Member(user_id=user_id) for user_id in member_user_ids]
+
+ return new_members
+
+
+def remove_faulty_workspace_members(
+ session: Connection,
+ workspace_id: UUID,
+ faulty_members: List[Member],
+) -> None:
+ faulty_user_ids = [member.user_id for member in faulty_members]
+
+ for i in range(0, len(faulty_user_ids), BATCH_SIZE):
+ batch = faulty_user_ids[i : i + BATCH_SIZE]
+
+ session.execute(
+ delete(WorkspaceMemberDB)
+ .where(WorkspaceMemberDB.workspace_id == workspace_id)
+ .where(WorkspaceMemberDB.user_id.in_(batch))
+ )
+
+
+def get_new_workspace_members(
+ users: List[User],
+ members: List[Member],
+) -> List[Member]:
+ user_ids = {user.user_id for user in users}
+ member_user_ids = {
+ member.user_id for member in members if member.role in [DEMO_ROLE, OWNER_ROLE]
+ }
+
+ new_user_ids = user_ids - member_user_ids
+
+ new_members = [Member(user_id=user_id) for user_id in new_user_ids]
+
+ return new_members
+
+
+def add_new_members_to_workspace(
+ session: Connection,
+ workspace_id: UUID,
+ new_members: List[Member],
+) -> None:
+ for i in range(0, len(new_members), BATCH_SIZE):
+ batch = new_members[i : i + BATCH_SIZE]
+
+ values = [
+ {
+ "user_id": member.user_id,
+ "workspace_id": workspace_id,
+ "role": DEMO_ROLE,
+ }
+ for member in batch
+ ]
+
+ session.execute(insert(WorkspaceMemberDB).values(values))
+
+
+def remove_all_members_from_workspace(
+ session: Connection,
+ workspace_id: UUID,
+) -> None:
+ session.execute(
+ delete(WorkspaceMemberDB).where(
+ WorkspaceMemberDB.workspace_id == workspace_id,
+ )
+ )
+
+
+def fetch_project_members(
+ session: Connection,
+ project_id: UUID,
+) -> List[Member]:
+ result = session.execute(
+ select(
+ ProjectMemberDB.user_id,
+ ProjectMemberDB.project_id,
+ ProjectMemberDB.role,
+ ).where(
+ ProjectMemberDB.project_id == project_id,
+ )
+ ).all()
+
+ members = [
+ Member(
+ user_id=row.user_id,
+ project_id=row.project_id,
+ role=row.role,
+ )
+ for row in result
+ ]
+
+ return members
+
+
+def get_faulty_project_members(
+ members: List[Member],
+) -> List[Member]:
+ member_user_ids = {
+ member.user_id
+ for member in members
+ if member.role not in [DEMO_ROLE, OWNER_ROLE]
+ }
+
+ new_members = [Member(user_id=user_id) for user_id in member_user_ids]
+
+ return new_members
+
+
+def remove_faulty_project_members(
+ session: Connection,
+ project_id: UUID,
+ faulty_members: List[Member],
+) -> None:
+ faulty_user_ids = [member.user_id for member in faulty_members]
+
+ for i in range(0, len(faulty_user_ids), BATCH_SIZE):
+ batch = faulty_user_ids[i : i + BATCH_SIZE]
+
+ session.execute(
+ delete(ProjectMemberDB)
+ .where(ProjectMemberDB.project_id == project_id)
+ .where(ProjectMemberDB.user_id.in_(batch))
+ )
+
+
+def get_new_project_members(
+ users: List[User],
+ members: List[Member],
+) -> List[Member]:
+ user_ids = {user.user_id for user in users}
+ member_user_ids = {
+ member.user_id for member in members if member.role in [DEMO_ROLE, OWNER_ROLE]
+ }
+
+ new_user_ids = user_ids - member_user_ids
+
+ new_members = [Member(user_id=user_id) for user_id in new_user_ids]
+
+ return new_members
+
+
+def add_new_members_to_project(
+ session: Connection,
+ project_id: UUID,
+ new_members: List[Member],
+) -> None:
+ for i in range(0, len(new_members), BATCH_SIZE):
+ batch = new_members[i : i + BATCH_SIZE]
+
+ values = [
+ {
+ "user_id": member.user_id,
+ "project_id": project_id,
+ "role": DEMO_ROLE,
+ "is_demo": True,
+ }
+ for member in batch
+ ]
+
+ session.execute(insert(ProjectMemberDB).values(values))
+
+
+def remove_all_members_from_project(
+ session: Connection,
+ project_id: UUID,
+) -> None:
+ session.execute(
+ delete(ProjectMemberDB).where(
+ ProjectMemberDB.project_id == project_id,
+ )
+ )
+
+
+@with_rollback()
+def add_users_to_demos(session: Connection) -> None:
+ log_info("Populating demos.")
+
+ all_demos = list_all_demos(session)
+
+ log_info(f"Found {len(all_demos)} demos.")
+
+ all_users = list_all_users(session)
+
+ log_info(f"Found {len(all_users)} users.")
+
+ for i, demo in enumerate(all_demos):
+ log_info(f"Populating demo #{i}.")
+
+ # DEMO ORGANIZATIONS
+ organization_members = fetch_organization_members(
+ session,
+ demo.organization_id,
+ )
+
+ log_info(f"Found {len(organization_members)} organization members.")
+
+ new_organization_members = get_new_organization_members(
+ all_users,
+ organization_members,
+ )
+
+ log_info(f"Missing {len(new_organization_members)} organization members.")
+
+ add_new_members_to_organization(
+ session,
+ demo.organization_id,
+ new_organization_members,
+ )
+
+ log_info(f"Added {len(new_organization_members)} organization members.")
+ # ------------------
+
+ # DEMO WORKSPACES
+ workspace_members = fetch_workspace_members(
+ session,
+ demo.workspace_id,
+ )
+
+ log_info(f"Found {len(workspace_members)} workspace members.")
+
+ faulty_workspace_members = get_faulty_workspace_members(
+ workspace_members,
+ )
+
+ log_info(f"Found {len(faulty_workspace_members)} faulty workspace members.")
+
+ remove_faulty_workspace_members(
+ session,
+ demo.workspace_id,
+ faulty_workspace_members,
+ )
+
+ log_info(f"Removed {len(faulty_workspace_members)} faulty workspace members.")
+
+ new_workspace_members = get_new_workspace_members(
+ all_users,
+ workspace_members,
+ )
+
+ log_info(f"Missing {len(new_workspace_members)} workspace members.")
+
+ add_new_members_to_workspace(
+ session,
+ demo.workspace_id,
+ new_workspace_members,
+ )
+
+ log_info(f"Added {len(new_workspace_members)} workspace members.")
+ # ---------------
+
+ # DEMO PROJECTS
+ project_members = fetch_project_members(
+ session,
+ demo.project_id,
+ )
+
+ log_info(f"Found {len(project_members)} project members.")
+
+ faulty_project_members = get_faulty_project_members(
+ project_members,
+ )
+
+ log_info(f"Found {len(faulty_project_members)} faulty project members.")
+
+ remove_faulty_project_members(
+ session,
+ demo.project_id,
+ faulty_project_members,
+ )
+
+ log_info(f"Removed {len(faulty_project_members)} faulty project members.")
+
+ new_project_members = get_new_project_members(
+ all_users,
+ project_members,
+ )
+
+ log_info(f"Missing {len(new_project_members)} project members.")
+
+ add_new_members_to_project(
+ session,
+ demo.project_id,
+ new_project_members,
+ )
+
+ log_info(f"Added {len(new_project_members)} project members.")
+ # -------------
+
+ log_info(f"Done with demo #{i}.")
+
+ log_info("Done with demos.")
+
+
+@with_rollback()
+def remove_users_from_demos(session: Connection) -> None:
+ log_info("Cleaning up demos.")
+
+ all_demos = list_all_demos(session)
+
+ for i, demo in enumerate(all_demos):
+ log_info(f"Cleaning up demo #{i}.")
+
+ # DEMO PROJECTS
+ remove_all_members_from_project(
+ session,
+ demo.project_id,
+ )
+ # -------------
+
+ log_info("Removed project members.")
+
+ # DEMO WORKSPACES
+ remove_all_members_from_workspace(
+ session,
+ demo.workspace_id,
+ )
+ # ---------------
+
+ log_info("Removed workspace members.")
+
+ # DEMO ORGANIZATIONS
+ remove_all_members_from_organization(
+ session,
+ demo.organization_id,
+ )
+ # ------------------
+
+ log_info("Removed organization members.")
+
+ log_info(f"Done with demo #{i}.")
+
+ log_info("Done with demos.")
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
new file mode 100644
index 0000000000..c6b82d338c
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/evaluators.py
@@ -0,0 +1,195 @@
+import uuid
+import asyncio
+import traceback
+from typing import Optional
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import func
+from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
+
+
+from ee.src.models.db_models import WorkspaceMemberDB as WorkspaceMemberDBE
+from oss.src.models.db_models import ProjectDB as ProjectDBE
+from oss.src.dbs.postgres.workflows.dbes import (
+ WorkflowArtifactDBE,
+ WorkflowVariantDBE,
+ WorkflowRevisionDBE,
+)
+from oss.src.dbs.postgres.git.dao import GitDAO
+from oss.src.core.evaluators.service import SimpleEvaluatorsService, EvaluatorsService
+from oss.src.models.deprecated_models import (
+ DeprecatedAutoEvaluatorConfigDBwProject as DeprecatedEvaluatorConfigDBwProject,
+)
+from oss.src.core.workflows.service import WorkflowsService
+from oss.src.core.tracing.service import TracingService
+from oss.src.apis.fastapi.tracing.router import TracingRouter
+from oss.src.dbs.postgres.tracing.dao import TracingDAO
+
+
+# Define constants
+DEFAULT_BATCH_SIZE = 200
+
+# Initialize plug-ins for migration
+tracing_service = TracingService(
+ tracing_dao=TracingDAO(),
+)
+tracing = TracingRouter(
+ tracing_service=tracing_service,
+)
+evaluators_service = EvaluatorsService(
+ workflows_service=WorkflowsService(
+ workflows_dao=GitDAO(
+ ArtifactDBE=WorkflowArtifactDBE,
+ VariantDBE=WorkflowVariantDBE,
+ RevisionDBE=WorkflowRevisionDBE,
+ ),
+ )
+)
+simple_evaluators_service = SimpleEvaluatorsService(
+ evaluators_service=evaluators_service,
+)
+
+
+async def _fetch_project_owner(
+ *,
+ project_id: uuid.UUID,
+ connection: AsyncConnection,
+) -> Optional[uuid.UUID]:
+ """Fetch the owner user ID for a given project."""
+ workspace_owner_query = (
+ select(WorkspaceMemberDBE.user_id)
+ .select_from(WorkspaceMemberDBE, ProjectDBE)
+ .where(
+ WorkspaceMemberDBE.workspace_id == ProjectDBE.workspace_id,
+ WorkspaceMemberDBE.role == "owner",
+ ProjectDBE.id == project_id,
+ )
+ )
+ result = await connection.execute(workspace_owner_query)
+ owner = result.scalar_one_or_none()
+ return owner
+
+
+async def migration_old_evaluator_configs_to_new_evaluator_configs(
+ connection: AsyncConnection,
+):
+ """Migrate old evaluator configurations to new workflow-based system."""
+ try:
+ offset = 0
+ total_migrated = 0
+ skipped_records = 0
+
+ # Count total rows with a non-null project_id
+ total_query = (
+ select(func.count())
+ .select_from(DeprecatedEvaluatorConfigDBwProject)
+ .filter(DeprecatedEvaluatorConfigDBwProject.project_id.isnot(None))
+ )
+ result = await connection.execute(total_query)
+ total_rows = result.scalar()
+ total_evaluators = total_rows or 0
+
+ click.echo(
+ click.style(
+ f"Total rows in evaluator_configs with project_id: {total_evaluators}",
+ fg="yellow",
+ )
+ )
+
+ while offset < total_evaluators:
+ # STEP 1: Fetch evaluator configurations with non-null project_id
+ result = await connection.execute(
+ select(DeprecatedEvaluatorConfigDBwProject)
+ .filter(DeprecatedEvaluatorConfigDBwProject.project_id.isnot(None))
+ .offset(offset)
+ .limit(DEFAULT_BATCH_SIZE)
+ )
+ evaluator_configs_rows = result.fetchall()
+
+ if not evaluator_configs_rows:
+ break
+
+ # Process and transfer records to evaluator workflows
+ for old_evaluator in evaluator_configs_rows:
+ try:
+ # STEP 2: Get owner from project_id
+ owner = await _fetch_project_owner(
+ project_id=old_evaluator.project_id, # type: ignore
+ connection=connection,
+ )
+ if not owner:
+ skipped_records += 1
+ click.echo(
+ click.style(
+ f"Skipping record with ID {old_evaluator.id} due to missing owner in workspace member table",
+ fg="yellow",
+ )
+ )
+ continue
+
+ # STEP 3: Migrate records using transfer_* util function
+ new_evaluator = await simple_evaluators_service.transfer(
+ project_id=old_evaluator.project_id,
+ user_id=owner,
+ evaluator_id=old_evaluator.id,
+ )
+ if not new_evaluator:
+ skipped_records += 1
+ click.echo(
+ click.style(
+ f"Skipping record with ID {old_evaluator.id} due to old evaluator not existing in database table",
+ fg="yellow",
+ )
+ )
+ continue
+
+ except Exception as e:
+ click.echo(
+ click.style(
+ f"Failed to migrate evaluator {old_evaluator.id}: {str(e)}",
+ fg="red",
+ )
+ )
+ click.echo(click.style(traceback.format_exc(), fg="red"))
+ skipped_records += 1
+ continue
+
+ # Update progress tracking for current batch
+ batch_migrated = len(evaluator_configs_rows)
+ offset += DEFAULT_BATCH_SIZE
+ total_migrated += batch_migrated
+
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch.",
+ fg="yellow",
+ )
+ )
+
+ # Update progress tracking for all batches
+ remaining_records = total_evaluators - total_migrated
+ click.echo(click.style(f"Total migrated: {total_migrated}", fg="yellow"))
+ click.echo(click.style(f"Skipped records: {skipped_records}", fg="yellow"))
+ click.echo(
+ click.style(f"Records left to migrate: {remaining_records}", fg="yellow")
+ )
+
+ except Exception as e:
+ click.echo(f"Error occurred: {e}")
+ click.echo(click.style(traceback.format_exc(), fg="red"))
+
+
+def run_migration(sqlalchemy_url: str):
+ import concurrent.futures
+
+ async def _start():
+ connection = create_async_engine(url=sqlalchemy_url)
+ async with connection.connect() as connection:
+ await migration_old_evaluator_configs_to_new_evaluator_configs(
+ connection=connection
+ )
+
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future = executor.submit(asyncio.run, _start())
+ future.result()
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/export_records.py b/api/ee/databases/postgres/migrations/core/data_migrations/export_records.py
new file mode 100644
index 0000000000..f6aa6e3a0d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/export_records.py
@@ -0,0 +1,175 @@
+import traceback
+import click
+from sqlalchemy.future import select
+from sqlalchemy import Connection, insert, func
+from ee.src.models.db_models import OrganizationMemberDB # type: ignore
+from ee.src.models.extended.deprecated_models import UserOrganizationDB # type: ignore
+
+BATCH_SIZE = 200
+
+
+def transfer_records_from_user_organization_to_organization_members(
+ session: Connection,
+):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+
+ # Count total rows in user_organizations table
+ total_query = select(func.count()).select_from(UserOrganizationDB)
+ result = session.execute(total_query).scalar()
+ TOTAL_USERS_ORGANIZATIONS = result if result is not None else 0
+ print(f"Total rows in UserOrganizationDB table: {TOTAL_USERS_ORGANIZATIONS}")
+
+ while True:
+ # Fetch a batch of records from user_organizations with ordering
+ users_in_organizations = session.execute(
+ select(UserOrganizationDB).offset(offset).limit(BATCH_SIZE)
+ ).fetchall()
+
+ actual_batch_size = len(users_in_organizations)
+ if actual_batch_size == 0:
+ break
+
+ for user_organization in users_in_organizations:
+ # Check if the record already exists in OrganizationMemberDB
+ existing_record = session.execute(
+ select(OrganizationMemberDB).where(
+ OrganizationMemberDB.user_id == user_organization.user_id,
+ OrganizationMemberDB.organization_id
+ == user_organization.organization_id,
+ )
+ ).fetchone()
+ if existing_record:
+ # Log that a duplicate was found
+ click.echo(
+ click.style(
+ f"Duplicate record found for user_id {user_organization.user_id} and organization_id {user_organization.organization_id}. Skipping.",
+ fg="yellow",
+ )
+ )
+ continue # Skip inserting this record
+
+ # Insert a new record in OrganizationMemberDB
+ insert_statement = insert(OrganizationMemberDB).values(
+ user_id=user_organization.user_id,
+ organization_id=user_organization.organization_id,
+ )
+ session.execute(insert_statement)
+
+ # Commit the batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_USERS_ORGANIZATIONS - TOTAL_MIGRATED
+
+ click.echo(
+ click.style(
+ f"Processed {actual_batch_size} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Check if there are still remaining records
+ remaining_records_query = select(func.count()).select_from(UserOrganizationDB)
+ remaining_count = session.execute(remaining_records_query).scalar()
+ records_left_count = remaining_count if remaining_count is not None else 0
+ if records_left_count > 0:
+ click.echo(
+ click.style(
+ f"There are still {remaining_count} records left in UserOrganizationDB that were not migrated.",
+ fg="red",
+ )
+ )
+
+ click.echo(
+ click.style(
+ "\nSuccessfully migrated records and handled duplicates in user_organization table to organization_members.",
+ fg="green",
+ ),
+ color=True,
+ )
+ except Exception as e:
+ # Handle exceptions and rollback if necessary
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while transferring records: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+def transfer_records_from_organization_members_to_user_organization(
+ session: Connection,
+):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+
+ # Count total rows in OrganizationMemberDB
+ total_query = select(func.count()).select_from(OrganizationMemberDB)
+ result = session.execute(total_query).scalar()
+ TOTAL_ORGANIZATIONS_MEMBERS = result if result is not None else 0
+ print(
+ f"Total rows in OrganizationMemberDB table: {TOTAL_ORGANIZATIONS_MEMBERS}"
+ )
+
+ while True:
+ # Retrieve a batch of records from OrganizationMemberDB
+ members_in_organizations = session.execute(
+ select(OrganizationMemberDB).offset(offset).limit(BATCH_SIZE)
+ ).fetchall()
+ actual_batch_size = len(members_in_organizations)
+ if not members_in_organizations:
+ break
+
+ # Process each record in the current batch
+ for user_organization in members_in_organizations:
+ # Create a new record in UserOrganizationDB
+ insert_statement = insert(UserOrganizationDB).values(
+ user_id=user_organization.user_id,
+ organization_id=user_organization.organization_id,
+ )
+ session.execute(insert_statement)
+
+ # Commit the batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_ORGANIZATIONS_MEMBERS - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {actual_batch_size} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Break the loop if all records are migrated
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ "\nSuccessfully migrated records in organization_members table to user_organizations table.",
+ fg="green",
+ ),
+ color=True,
+ )
+ except Exception as e:
+ # Handle exceptions and rollback if necessary
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while transferring records from organization_members to user_organizations: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/invitations.py b/api/ee/databases/postgres/migrations/core/data_migrations/invitations.py
new file mode 100644
index 0000000000..802f2ef4fe
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/invitations.py
@@ -0,0 +1,192 @@
+import os
+import uuid
+import traceback
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy.orm import joinedload
+from sqlalchemy import delete, Connection, insert, func
+
+from oss.src.models.db_models import UserDB, InvitationDB, ProjectDB
+from ee.src.models.extended.deprecated_models import OldInvitationDB
+
+
+BATCH_SIZE = 200
+
+
+def transfer_invitations_from_old_table_to_new_table(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+ SKIPPED_INVITATIONS = 0
+
+ # Count total rows in OldInvitationDB table
+ count_query = select(func.count()).select_from(OldInvitationDB)
+ result = session.execute(count_query).scalar()
+ TOTAL_INVITATIONS = result if result is not None else 0
+ print(f"Total rows in OldInvitationDB table is {TOTAL_INVITATIONS}")
+
+ while True:
+ # Retrieve a batch of old invitations
+ query = session.execute(
+ select(OldInvitationDB).offset(offset).limit(BATCH_SIZE)
+ )
+ old_invitations = query.fetchall()
+ actual_batch_size = len(old_invitations)
+ if not old_invitations:
+ break
+
+ for old_invitation in old_invitations:
+ user = session.execute(
+ select(UserDB).where(UserDB.email == old_invitation.email)
+ ).fetchone()
+
+ project = session.execute(
+ select(ProjectDB).where(
+ ProjectDB.workspace_id == uuid.UUID(old_invitation.workspace_id)
+ )
+ ).fetchone()
+ if user and project:
+ print(
+ f"Found user {user.username} in workspace invitation ({str(old_invitation.id)})"
+ )
+ print(
+ f"Found project {str(project.id)} that will be used to transfer workspace invitation into."
+ )
+ # Map fields from OldInvitationDB to InvitationDB
+ statement = insert(InvitationDB).values(
+ id=old_invitation.id,
+ token=old_invitation.token,
+ email=old_invitation.email,
+ used=old_invitation.used,
+ role=old_invitation.workspace_roles[0],
+ user_id=user.id,
+ project_id=project.id,
+ expiration_date=old_invitation.expiration_date,
+ )
+
+ # Add the new invitation to the session
+ session.execute(statement)
+
+ # Remove old invitation
+ session.execute(
+ delete(OldInvitationDB).where(
+ OldInvitationDB.id == old_invitation.id
+ )
+ )
+ else:
+ print(
+ f"Skipping unused workspace invitation {str(old_invitation.id)}. No matching user or project."
+ )
+ SKIPPED_INVITATIONS += 1
+
+ # Commit the changes for the current batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_INVITATIONS - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {actual_batch_size} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ f"\nSuccessfully transferred workspaces invitations to projects invitations table. Skipped {SKIPPED_INVITATIONS} records.",
+ fg="green",
+ ),
+ color=True,
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while transferring workspaces invitations: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+def revert_invitations_transfer_from_new_table_to_old_table(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+
+ # Count total rows in invitations table
+ stmt = select(func.count()).select_from(InvitationDB)
+ result = session.execute(stmt).scalar()
+ TOTAL_INVITATIONS = result if result is not None else 0
+ print(f"Total rows in project_invitations table is {TOTAL_INVITATIONS}")
+
+ while True:
+ # Retrieve a batch of project invitations
+ project_invitations = session.execute(
+ select(InvitationDB)
+ .offset(offset)
+ .limit(BATCH_SIZE)
+ .options(joinedload(InvitationDB.project))
+ ).fetchall()
+ if not project_invitations:
+ break
+
+ for project_invitation in project_invitations:
+ # Map fields from InvitationDB to OldInvitationDB
+ statement = insert(OldInvitationDB).values(
+ id=project_invitation.id,
+ token=project_invitation.token,
+ email=project_invitation.email,
+ used=project_invitation.used,
+ organization_id=str(project_invitation.project.workspace_id),
+ workspace_id=str(project_invitation.project.workspace_id),
+ workspace_roles=[project_invitation.role],
+ expiration_date=project_invitation.expiration_date,
+ )
+ session.execute(statement)
+
+ # Remove previous invitation (that references project_id)
+ session.execute(
+ delete(InvitationDB).where(InvitationDB.id == project_invitation.id)
+ )
+
+ # Commit the changes for the current batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += BATCH_SIZE
+ offset += BATCH_SIZE
+ click.echo(
+ click.style(
+ f"Processed {offset} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {TOTAL_INVITATIONS - TOTAL_MIGRATED}",
+ fg="yellow",
+ )
+ )
+
+ click.echo(
+ click.style(
+ "\nSuccessfully transferred projects invitations to the workspaces invitations table.",
+ fg="green",
+ ),
+ color=True,
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while transferring projects invitations: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/projects.py b/api/ee/databases/postgres/migrations/core/data_migrations/projects.py
new file mode 100644
index 0000000000..293b05f52a
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/projects.py
@@ -0,0 +1,501 @@
+import uuid
+import traceback
+from typing import Dict, Optional
+from collections import defaultdict
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import Connection, update, func, or_
+
+from ee.src.models.extended.deprecated_transfer_models import ( # type: ignore
+ ProjectDB,
+ AppDB,
+ AppVariantDB,
+ AppVariantRevisionsDB,
+ VariantBaseDB,
+ DeploymentDB,
+ AppEnvironmentDB,
+ AppEnvironmentRevisionDB,
+ EvaluationScenarioDB,
+ EvaluationDB,
+ EvaluatorConfigDB,
+ HumanEvaluationDB,
+ HumanEvaluationScenarioDB,
+ TestSetDB,
+)
+
+
+MODELS = [
+ AppDB, # have workspace_id
+ AppVariantDB, # have workspace_id
+ AppVariantRevisionsDB, # doesn't have, but can make use of variant_id to get workspace_id
+ VariantBaseDB, # have workspace_id
+ DeploymentDB, # have workspace_id
+ AppEnvironmentDB, # have workspace_id
+ AppEnvironmentRevisionDB, # have workspace_id
+ EvaluationScenarioDB, # have workspace_id
+ EvaluationDB, # have workspace_id
+ EvaluatorConfigDB, # have workspace_id
+ HumanEvaluationDB, # have workspace_id
+ HumanEvaluationScenarioDB, # have workspace_id
+ TestSetDB, # have workspace_id
+]
+
+
+def get_workspace_project_by_id(
+ session: Connection, workspace_id: str
+) -> Optional[str]:
+ workspace_project = session.execute(
+ select(ProjectDB).filter_by(
+ is_default=True, workspace_id=uuid.UUID(workspace_id)
+ )
+ ).fetchone()
+ return str(workspace_project.id) if workspace_project is not None else None
+
+
+def get_variant_by_id(session: Connection, variant_id: str) -> Optional[AppVariantDB]:
+ query = session.execute(select(AppVariantDB).filter_by(id=uuid.UUID(variant_id)))
+ return query.fetchone() # type: ignore
+
+
+def get_app_by_id(session: Connection, app_id: str) -> Optional[AppDB]:
+ query = session.execute(select(AppDB).filter_by(id=uuid.UUID(app_id)))
+ return query.fetchone() # type: ignore
+
+
+def get_evaluation_by_id(
+ session: Connection, evaluation_id: str
+) -> Optional[EvaluationDB]:
+ query = session.execute(select(EvaluationDB).filter_by(id=uuid.UUID(evaluation_id)))
+ return query.fetchone() # type: ignore
+
+
+def get_workspace_project_id(session: Connection, workspace_id: str) -> Optional[str]:
+ query = session.execute(
+ select(ProjectDB).filter_by(
+ workspace_id=uuid.UUID(workspace_id), is_default=True
+ )
+ )
+ workspace_project = query.fetchone()
+ return str(workspace_project.id) if workspace_project is not None else None
+
+
+def repair_evaluation_scenario_to_have_project_id(session: Connection):
+ offset = 0
+ BATCH_SIZE = 200
+ TOTAL_MIGRATED = 0
+
+ # Count total rows for evaluation_scenarios with project_id = None
+ count_query = (
+ select(func.count())
+ .select_from(EvaluationScenarioDB)
+ .filter(EvaluationScenarioDB.project_id.is_(None))
+ )
+ result = session.execute(count_query).scalar()
+ TOTAL_ROWS_OF_TABLE = result if result is not None else 0
+ print(
+ f"\nTotal rows in {EvaluationScenarioDB.__tablename__} table with no workspace_id: {TOTAL_ROWS_OF_TABLE}. Repairing rows to make use of workspace_id from either variant_id or evaluation_id..."
+ )
+
+ while True:
+ # Fetch records where project_id is None
+ records = session.execute(
+ select(EvaluationScenarioDB)
+ .filter(
+ EvaluationScenarioDB.project_id.is_(None),
+ or_(
+ EvaluationScenarioDB.variant_id.isnot(None),
+ EvaluationScenarioDB.evaluation_id.isnot(None),
+ ),
+ )
+ .limit(BATCH_SIZE)
+ ).fetchall()
+
+ # If no more records are returned, break the loop
+ if not records or len(records) == 0:
+ break
+
+ # Update records with default project_id
+ for record in records:
+ workspace_id = None
+
+ if hasattr(record, "variant_id") and record.variant_id is not None:
+ variant = get_variant_by_id(
+ session=session, variant_id=str(record.variant_id)
+ )
+ if variant is None:
+ print(
+ f"ES {str(record.id)} did not return any variant to retrieve the workspace_id. Now, trying evaluation..."
+ )
+ else:
+ workspace_id = str(variant.workspace_id)
+
+ if (
+ workspace_id is None
+ and hasattr(record, "evaluation_id")
+ and record.evaluation_id is not None
+ ):
+ evaluation = get_evaluation_by_id(
+ session=session, evaluation_id=str(record.evaluation_id)
+ )
+ if evaluation is None:
+ print(
+ f"ES {str(record.id)} did not return any evaluation or variant to retrieve the workspace_id. Skipping record..."
+ )
+ continue # Skip this record as no valid workspace_id found
+
+ workspace_id = str(evaluation.workspace_id)
+
+ # Update model record workspace_id field if a valid project_id was found
+ if workspace_id is not None:
+ workspace_project_id = get_workspace_project_by_id(
+ session=session, workspace_id=workspace_id
+ )
+ session.execute(
+ update(EvaluationScenarioDB)
+ .where(EvaluationScenarioDB.id == record.id)
+ .values(project_id=uuid.UUID(workspace_project_id))
+ )
+ else:
+ print(
+ f"Evaluation scenario {str(record.id)} did not find a variant_id {record.variant_id} and evaluation {record.evaluation_id} to make use of."
+ )
+
+ session.commit()
+
+ # Update migration progress
+ batch_migrated = len(records)
+ TOTAL_MIGRATED += batch_migrated
+ offset += batch_migrated
+ remaining_records = TOTAL_ROWS_OF_TABLE - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Break if all records have been processed
+ records_with_no_variant_and_workspace_count_query = (
+ select(func.count())
+ .select_from(EvaluationScenarioDB)
+ .filter(
+ EvaluationScenarioDB.project_id.is_(None),
+ EvaluationScenarioDB.evaluation_id.is_(None),
+ EvaluationScenarioDB.variant_id.is_(None),
+ )
+ )
+ result = session.execute(
+ records_with_no_variant_and_workspace_count_query
+ ).scalar()
+ UNREPAIRABLE_DATA = result if result is not None else 0
+ click.echo(
+ click.style(
+ f"Total malformed records with no variant_id & evaluation_id: {UNREPAIRABLE_DATA}",
+ fg="yellow",
+ )
+ )
+
+ # Final reporting
+ click.echo(
+ click.style(
+ f"Migration to repair evaluation_scenario to have project_id completed.",
+ fg="green",
+ )
+ )
+
+
+def repair_evaluator_configs_to_have_project_id(session: Connection):
+ offset = 0
+ BATCH_SIZE = 200
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS = 0
+
+ # Count total rows for evaluator_configs with workspace_id = None
+ count_query = (
+ select(func.count())
+ .select_from(EvaluatorConfigDB)
+ .filter(EvaluatorConfigDB.project_id.is_(None))
+ )
+ result = session.execute(count_query).scalar()
+ TOTAL_ROWS_OF_TABLE = result if result is not None else 0
+ print(
+ f"\nTotal rows in {EvaluatorConfigDB.__tablename__} table with no workspace_id: {TOTAL_ROWS_OF_TABLE}. Repairing rows to make use of workspace_id from app..."
+ )
+
+ while True:
+ # Fetch records where project_id is None
+ records = session.execute(
+ select(EvaluatorConfigDB)
+ .filter(EvaluatorConfigDB.project_id.is_(None))
+ .limit(BATCH_SIZE)
+ ).fetchall()
+
+ # Update records with default project_id
+ for record in records:
+ workspace_id = None
+
+ if hasattr(record, "app_id") and (
+ record.app_id is None or record.app_id == ""
+ ):
+ print(f"Evaluator config {str(record.id)} have no app_id. Skipping...")
+ SKIPPED_RECORDS += 1
+ continue
+
+ if hasattr(record, "app_id") and record.app_id is not None:
+ app_db = get_app_by_id(session=session, app_id=str(record.app_id))
+ if app_db is None:
+ print(
+ f"Evaluator config {str(record.id)} have an app_id, but no application was found with the ID. Skipping..."
+ )
+ SKIPPED_RECORDS += 1
+ continue
+
+ workspace_id = str(app_db.workspace_id)
+
+ # Update model record workspace_id field if a valid project_id was found
+ if workspace_id is not None:
+ workspace_project_id = get_workspace_project_by_id(
+ session=session, workspace_id=workspace_id
+ )
+ session.execute(
+ update(EvaluatorConfigDB)
+ .where(EvaluatorConfigDB.id == record.id)
+ .values(project_id=uuid.UUID(workspace_project_id))
+ )
+ else:
+ print(
+ f"Evaluator config {str(record.id)} did not find a workspace_id to make use of."
+ )
+
+ session.commit()
+
+ # Update migration progress
+ batch_migrated = len(records)
+ TOTAL_MIGRATED += batch_migrated
+ offset += batch_migrated
+ remaining_records = TOTAL_ROWS_OF_TABLE - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Break if all records have been processed
+ if batch_migrated <= 0:
+ break
+
+ records_with_no_project_id = (
+ select(func.count())
+ .select_from(EvaluatorConfigDB)
+ .filter(EvaluatorConfigDB.project_id.is_(None))
+ )
+ result = session.execute(records_with_no_project_id).scalar()
+ TOTAL_ROWS_OF_RECORDS_WITH_NO_PROJECT_ID = result if result is not None else 0
+
+ # Final reporting
+ click.echo(
+ click.style(
+ f"Migration to repair evaluator_configs to have project_id completed. Total records with no project_id: {TOTAL_ROWS_OF_RECORDS_WITH_NO_PROJECT_ID}",
+ fg="green",
+ )
+ )
+
+
+def add_project_id_to_db_entities(session: Connection):
+ try:
+ for model in MODELS:
+ offset = 0
+ BATCH_SIZE = 200
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS: Dict[str, int] = defaultdict(int)
+
+ def update_skipped_records_counter(model_tablename: str):
+ if SKIPPED_RECORDS.get(model_tablename, None) is None:
+ SKIPPED_RECORDS[model_tablename] = 1
+ else:
+ SKIPPED_RECORDS[model_tablename] += 1
+
+ # Count total rows for tables with project_id = None
+ count_query = (
+ select(func.count())
+ .select_from(model)
+ .filter(model.project_id.is_(None))
+ )
+ result = session.execute(count_query).scalar()
+ TOTAL_ROWS_OF_TABLE = result if result is not None else 0
+ print(f"Total rows in {model.__tablename__} table is {TOTAL_ROWS_OF_TABLE}")
+
+ if hasattr(model, "workspace_id"):
+ query = select(model).filter(
+ model.project_id.is_(None), model.workspace_id.isnot(None)
+ )
+ else:
+ # this will only be applied for AppVariantRevisionsDB model
+ query = select(model).filter(model.project_id.is_(None))
+
+ while True:
+ # Fetch records where project_id is None and workspace_id is not None
+ records = session.execute(query.limit(BATCH_SIZE)).fetchall()
+ actual_batch_size = len(records)
+
+ # Add debugging logs for each batch
+ click.echo(
+ click.style(
+ f"Fetching {actual_batch_size} records starting from offset {offset} in {model.__tablename__}.",
+ fg="blue",
+ )
+ )
+
+ # Update records with default project_id
+ for record in records:
+ if hasattr(record, "workspace_id"):
+ workspace_project_id = get_workspace_project_id(
+ session=session, workspace_id=str(record.workspace_id)
+ )
+ elif (
+ hasattr(record, "variant_id") and record.variant_id is not None
+ ) and not hasattr(
+ record, "workspace_id"
+ ): # this will only be applied for AppVariantRevisionsDB model
+ variant = get_variant_by_id(
+ session=session, variant_id=str(record.variant_id)
+ )
+ if variant is not None:
+ workspace_project_id = get_workspace_project_id(
+ session=session, workspace_id=str(variant.workspace_id)
+ )
+ else:
+ print(
+ f"Skipping record... {str(record.id)} in {model.__tablename__} table did not return any variant {str(record.variant_id)}."
+ )
+ update_skipped_records_counter(
+ model_tablename=model.__tablename__
+ )
+ workspace_project_id = None
+ else:
+ print(
+ f"Skipping record... {str(record.id)} in {model.__tablename__} table due to no variant_id / workspace_id"
+ )
+ actual_batch_size -= 1 # remove malformed record from records
+ update_skipped_records_counter(
+ model_tablename=model.__tablename__
+ )
+ workspace_project_id = None
+
+ if workspace_project_id is not None:
+ # Update model record project_id field
+ session.execute(
+ update(model)
+ .where(model.id == record.id)
+ .values(project_id=uuid.UUID(workspace_project_id))
+ )
+
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_ROWS_OF_TABLE - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {actual_batch_size} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if actual_batch_size <= 0:
+ break
+
+ # Run migration to 'repair' evaluation_scenario to make use of workspace_id from either evalution or variant to get project_id
+ repair_evaluation_scenario_to_have_project_id(session=session)
+
+ # Run migration to 'repair' evaluator_configs to make use of workspace_id from app to get project_id
+ repair_evaluator_configs_to_have_project_id(session=session)
+
+ click.echo(
+ click.style(
+ f"Migration for adding project_id to all records listed in {[model.__tablename__ for model in MODELS]} tables are completed. Skipped records: {SKIPPED_RECORDS}",
+ fg="green",
+ )
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"ERROR adding project_id to db entities: {traceback.format_exc()}",
+ fg="red",
+ )
+ )
+ raise e
+
+
+def remove_project_id_from_db_entities(session: Connection):
+ try:
+ for model in MODELS:
+ offset = 0
+ BATCH_SIZE = 200
+ TOTAL_MIGRATED = 0
+
+ # Count total rows for tables where project_id is not None
+ count_query = (
+ select(func.count())
+ .select_from(model)
+ .where(model.project_id.isnot(None))
+ )
+ result = session.execute(count_query).scalar()
+ TOTAL_ROWS_OF_TABLE = result if result is not None else 0
+ print(f"Total rows in {model.__tablename__} table is {TOTAL_ROWS_OF_TABLE}")
+
+ while True:
+ # Retrieve records from model where its project_id is not None
+ records = session.execute(
+ select(model)
+ .where(model.project_id.isnot(None))
+ .offset(offset)
+ .limit(BATCH_SIZE)
+ ).fetchall()
+ actual_batch_size = len(records)
+ if not records:
+ break
+
+ # Update records project_id column with None
+ for record in records:
+ record.project_id = None
+
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_ROWS_OF_TABLE - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {actual_batch_size} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records}",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ f"Migration for removing project_id to all records listed in {[model.__tablename__ for model in MODELS]} tables are completed.",
+ fg="green",
+ )
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"ERROR removing project_id to db entities: {traceback.format_exc()}",
+ fg="red",
+ )
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
new file mode 100644
index 0000000000..add9acf809
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/testsets.py
@@ -0,0 +1,191 @@
+import uuid
+import asyncio
+import traceback
+from typing import Optional
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import func
+from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
+
+from ee.src.models.db_models import WorkspaceMemberDB as WorkspaceMemberDBE
+from oss.src.models.db_models import ProjectDB as ProjectDBE
+from oss.src.dbs.postgres.testcases.dbes import (
+ TestcaseBlobDBE,
+)
+from oss.src.dbs.postgres.blobs.dao import BlobsDAO
+from oss.src.dbs.postgres.testsets.dbes import (
+ TestsetArtifactDBE,
+ TestsetVariantDBE,
+ TestsetRevisionDBE,
+)
+from oss.src.dbs.postgres.git.dao import GitDAO
+from oss.src.core.testcases.service import TestcasesService
+from oss.src.models.deprecated_models import DeprecatedTestSetDB
+from oss.src.core.testsets.service import TestsetsService, SimpleTestsetsService
+
+
+# Define constants
+DEFAULT_BATCH_SIZE = 200
+
+# Initialize plug-ins for migration
+testcases_dao = BlobsDAO(
+ BlobDBE=TestcaseBlobDBE,
+)
+testsets_dao = GitDAO(
+ ArtifactDBE=TestsetArtifactDBE,
+ VariantDBE=TestsetVariantDBE,
+ RevisionDBE=TestsetRevisionDBE,
+)
+testcases_service = TestcasesService(
+ testcases_dao=testcases_dao,
+)
+testsets_service = TestsetsService(
+ testsets_dao=testsets_dao,
+ testcases_service=testcases_service,
+)
+simple_testsets_service = SimpleTestsetsService(
+ testsets_service=testsets_service,
+)
+
+
+async def _fetch_project_owner(
+ *,
+ project_id: uuid.UUID,
+ connection: AsyncConnection,
+) -> Optional[uuid.UUID]:
+ """Fetch the owner user ID for a given project."""
+ workspace_owner_query = (
+ select(WorkspaceMemberDBE.user_id)
+ .select_from(WorkspaceMemberDBE, ProjectDBE)
+ .where(
+ WorkspaceMemberDBE.workspace_id == ProjectDBE.workspace_id,
+ WorkspaceMemberDBE.role == "owner",
+ ProjectDBE.id == project_id,
+ )
+ )
+ result = await connection.execute(workspace_owner_query)
+ owner = result.scalar_one_or_none()
+ return owner
+
+
+async def migration_old_testsets_to_new_testsets(
+ connection: AsyncConnection,
+):
+ """Migrate old testsets to new testsets system."""
+ try:
+ offset = 0
+ total_migrated = 0
+ skipped_records = 0
+
+ # Count total rows with a non-null project_id
+ total_query = (
+ select(func.count())
+ .select_from(DeprecatedTestSetDB)
+ .filter(DeprecatedTestSetDB.project_id.isnot(None))
+ )
+ result = await connection.execute(total_query)
+ total_rows = result.scalar()
+ total_testsets = total_rows or 0
+
+ click.echo(
+ click.style(
+ f"Total rows in testsets with project_id: {total_testsets}",
+ fg="yellow",
+ )
+ )
+
+ while offset < total_testsets:
+ # STEP 1: Fetch evaluator configurations with non-null project_id
+ result = await connection.execute(
+ select(DeprecatedTestSetDB)
+ .filter(DeprecatedTestSetDB.project_id.isnot(None))
+ .offset(offset)
+ .limit(DEFAULT_BATCH_SIZE)
+ )
+ testsets_rows = result.fetchall()
+
+ if not testsets_rows:
+ break
+
+ # Process and transfer records to testset workflows
+ for testset in testsets_rows:
+ try:
+ # STEP 2: Get owner from project_id
+ owner = await _fetch_project_owner(
+ project_id=testset.project_id, # type: ignore
+ connection=connection,
+ )
+ if not owner:
+ skipped_records += 1
+ click.echo(
+ click.style(
+ f"Skipping record with ID {testset.id} due to missing owner in workspace member table",
+ fg="yellow",
+ )
+ )
+ continue
+
+ # STEP 3: Migrate records using transfer_* util function
+ new_testset = await simple_testsets_service.transfer(
+ project_id=testset.project_id,
+ user_id=owner,
+ testset_id=testset.id,
+ )
+ if not new_testset:
+ skipped_records += 1
+ click.echo(
+ click.style(
+ f"Skipping record with ID {testset.id} due to old testset not existing in database table",
+ fg="yellow",
+ )
+ )
+ continue
+
+ except Exception as e:
+ click.echo(
+ click.style(
+ f"Failed to migrate testset {testset.id}: {str(e)}",
+ fg="red",
+ )
+ )
+ click.echo(click.style(traceback.format_exc(), fg="red"))
+ skipped_records += 1
+ continue
+
+ # Update progress tracking for current batch
+ batch_migrated = len(testsets_rows)
+ offset += DEFAULT_BATCH_SIZE
+ total_migrated += batch_migrated
+
+ click.echo(
+ click.style(
+ f"Processed {batch_migrated} records in this batch.",
+ fg="yellow",
+ )
+ )
+
+ # Update progress tracking for all batches
+ remaining_records = total_testsets - total_migrated
+ click.echo(click.style(f"Total migrated: {total_migrated}", fg="yellow"))
+ click.echo(click.style(f"Skipped records: {skipped_records}", fg="yellow"))
+ click.echo(
+ click.style(f"Records left to migrate: {remaining_records}", fg="yellow")
+ )
+
+ except Exception as e:
+ click.echo(f"Error occurred: {e}")
+ click.echo(click.style(traceback.format_exc(), fg="red"))
+
+
+def run_migration(sqlalchemy_url: str):
+ import concurrent.futures
+
+ async def _start():
+ connection = create_async_engine(url=sqlalchemy_url)
+ async with connection.connect() as connection:
+ await migration_old_testsets_to_new_testsets(connection=connection)
+
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future = executor.submit(asyncio.run, _start())
+ future.result()
diff --git a/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
new file mode 100644
index 0000000000..2c5a241acc
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/data_migrations/workspaces.py
@@ -0,0 +1,255 @@
+import traceback
+
+import click
+from sqlalchemy.future import select
+from sqlalchemy import delete, Connection, insert, func
+
+from ee.src.models.db_models import ProjectDB, WorkspaceDB
+from ee.src.models.db_models import (
+ WorkspaceMemberDB,
+ ProjectMemberDB,
+)
+
+BATCH_SIZE = 200
+
+
+def get_or_create_workspace_default_project(
+ session: Connection, workspace: WorkspaceDB
+) -> None:
+ project = session.execute(
+ select(ProjectDB).filter_by(
+ is_default=True,
+ workspace_id=workspace.id,
+ )
+ ).fetchone()
+
+ if project is None:
+ statement = insert(ProjectDB).values(
+ project_name="Default Project",
+ is_default=True,
+ workspace_id=workspace.id,
+ organization_id=workspace.organization_id,
+ )
+ session.execute(statement)
+
+
+def create_default_project_for_workspaces(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+
+ # Count total rows in workspaces table
+ stmt = select(func.count()).select_from(WorkspaceDB)
+ result = session.execute(stmt).scalar()
+ TOTAL_WORKSPACES = result if result is not None else 0
+ print(f"Total rows in workspaces table is {TOTAL_WORKSPACES}")
+
+ while True:
+ # Retrieve a batch of workspaces without a project
+ workspaces = session.execute(
+ select(WorkspaceDB).offset(offset).limit(BATCH_SIZE)
+ ).fetchall()
+ actual_batch_size = len(workspaces)
+ if not workspaces:
+ break
+
+ for workspace in workspaces:
+ # Create a new default project for each workspace
+ get_or_create_workspace_default_project(
+ session=session, workspace=workspace # type: ignore
+ )
+
+ # Commit the changes for the current batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_WORKSPACES - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {offset} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records} ",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ "\nSuccessfully created default projects for workspaces.",
+ fg="green",
+ ),
+ color=True,
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while creating default projects: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+def create_default_project_memberships(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+ SKIPPED_RECORDS = 0
+
+ # Count total rows in workspaces_members table
+ stmt = select(func.count()).select_from(WorkspaceMemberDB)
+ result = session.execute(stmt).scalar()
+ TOTAL_WORKSPACES_MEMBERS = result if result is not None else 0
+ print(f"Total rows in workspaces_members table is {TOTAL_WORKSPACES_MEMBERS}")
+
+ while True:
+ # Retrieve a batch of workspace members
+ workspace_members = session.execute(
+ select(WorkspaceMemberDB).offset(offset).limit(BATCH_SIZE)
+ ).fetchall()
+ actual_batch_size = len(workspace_members)
+ if not workspace_members:
+ break
+
+ for workspace_member in workspace_members:
+ # Find the default project for the member's workspace
+ project_query = session.execute(
+ select(ProjectDB)
+ .where(
+ ProjectDB.workspace_id == workspace_member.workspace_id,
+ ProjectDB.is_default == True,
+ )
+ .limit(1)
+ )
+ default_project = project_query.fetchone()
+ if default_project:
+ # Create a new project membership for each workspace member
+ statement = insert(ProjectMemberDB).values(
+ user_id=workspace_member.user_id,
+ project_id=getattr(default_project, "id"),
+ role=workspace_member.role,
+ )
+ session.execute(statement)
+ else:
+ print(
+ f"Skipping record... Did not find any default project for workspace {str(workspace_member.workspace_id)}"
+ )
+ SKIPPED_RECORDS += 1
+
+ # Commit the changes for the current batch
+ session.commit()
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_WORKSPACES_MEMBERS - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {offset} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records} ",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ f"\nSuccessfully created default project memberships for workspace members. Skipped {SKIPPED_RECORDS} records.",
+ fg="green",
+ ),
+ color=True,
+ )
+
+ except Exception as e:
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while creating project memberships: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+def remove_default_projects_from_workspaces(session: Connection):
+ try:
+ offset = 0
+ TOTAL_MIGRATED = 0
+
+ # Count total rows in projects table
+ stmt = (
+ select(func.count())
+ .select_from(ProjectDB)
+ .where(ProjectDB.is_default == True)
+ )
+ result = session.execute(stmt).scalar()
+ TOTAL_PROJECTS = result if result is not None else 0
+ print(f"Total rows in projects table is {TOTAL_PROJECTS}")
+
+ while True:
+ # Retrieve a batch of workspaces with a default project
+ projects_to_delete = session.execute(
+ select(ProjectDB)
+ .where(ProjectDB.is_default == True)
+ .offset(offset)
+ .limit(BATCH_SIZE) # type: ignore
+ ).fetchall()
+ actual_batch_size = len(projects_to_delete)
+ if not projects_to_delete:
+ break
+
+ for project in projects_to_delete:
+ if project is not None and len(project) >= 1:
+ # Remove associated project memberships
+ session.execute(
+ delete(ProjectMemberDB).where(
+ ProjectMemberDB.project_id == project.id
+ )
+ )
+
+ # Remove the default project itself
+ session.execute(delete(ProjectDB).where(ProjectDB.id == project.id))
+
+ # Update migration progress
+ TOTAL_MIGRATED += actual_batch_size
+ offset += actual_batch_size
+ remaining_records = TOTAL_PROJECTS - TOTAL_MIGRATED
+ click.echo(
+ click.style(
+ f"Processed {offset} records in this batch. Total records migrated: {TOTAL_MIGRATED}. Records left to migrate: {remaining_records} ",
+ fg="yellow",
+ )
+ )
+
+ # Stop the loop when all records have been processed
+ if remaining_records <= 0:
+ break
+
+ click.echo(
+ click.style(
+ "\nSuccessfully removed default projects and associated memberships from existing workspaces.",
+ fg="green",
+ ),
+ color=True,
+ )
+ except Exception as e:
+ # Handle exceptions and rollback if necessary
+ session.rollback()
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while removing default projects and memberships: {traceback.format_exc()}",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
diff --git a/api/ee/databases/postgres/migrations/core/env.py b/api/ee/databases/postgres/migrations/core/env.py
new file mode 100644
index 0000000000..e5e251f801
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/env.py
@@ -0,0 +1,126 @@
+import os
+import asyncio
+from logging.config import fileConfig
+
+from sqlalchemy import pool
+from sqlalchemy.engine import Connection, create_engine
+from sqlalchemy.ext.asyncio import async_engine_from_config, create_async_engine
+
+from alembic import context
+
+from oss.src.dbs.postgres.shared.engine import engine
+
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+config.set_main_option("sqlalchemy.url", engine.postgres_uri_core) # type: ignore
+
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+from oss.src.dbs.postgres.shared.base import Base
+
+import oss.src.dbs.postgres.secrets.dbes
+import oss.src.dbs.postgres.observability.dbes
+import oss.src.dbs.postgres.tracing.dbes
+import oss.src.dbs.postgres.testcases.dbes
+import oss.src.dbs.postgres.testsets.dbes
+import oss.src.dbs.postgres.queries.dbes
+import oss.src.dbs.postgres.workflows.dbes
+import oss.src.dbs.postgres.evaluations.dbes
+
+import ee.src.dbs.postgres.meters.dbes
+import ee.src.dbs.postgres.subscriptions.dbes
+
+
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ connection = create_engine(
+ url=config.get_main_option("sqlalchemy.url"),
+ pool_size=10, # Maintain 10 connections in the pool
+ pool_timeout=43200, # Timeout of 12 hours
+ pool_recycle=43200, # Timeout of 12 hours
+ pool_pre_ping=True,
+ echo_pool=True,
+ pool_use_lifo=True,
+ )
+ context.configure(
+ connection=connection,
+ transaction_per_migration=True,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def do_run_migrations(connection: Connection) -> None:
+ context.configure(
+ transaction_per_migration=True,
+ connection=connection,
+ target_metadata=target_metadata,
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+async def run_async_migrations() -> None:
+ """In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+
+ connectable = create_async_engine(
+ url=config.get_main_option("sqlalchemy.url"),
+ pool_size=10, # Maintain 10 connections in the pool
+ pool_timeout=43200, # Timeout of 12 hours
+ pool_recycle=43200, # Timeout of 12 hours
+ pool_pre_ping=True,
+ echo_pool=True,
+ pool_use_lifo=True,
+ )
+ async with connectable.connect() as connection:
+ await connection.run_sync(do_run_migrations)
+
+ await connectable.dispose()
+
+
+def run_migrations_online() -> None:
+ """Run migrations in 'online' mode."""
+
+ asyncio.run(run_async_migrations())
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/api/ee/databases/postgres/migrations/core/script.py.mako b/api/ee/databases/postgres/migrations/core/script.py.mako
new file mode 100644
index 0000000000..fbc4b07dce
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/script.py.mako
@@ -0,0 +1,26 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/api/ee/databases/postgres/migrations/core/temp/80910d2fa9a4_migrate_old_testsets_to_new_.py b/api/ee/databases/postgres/migrations/core/temp/80910d2fa9a4_migrate_old_testsets_to_new_.py
new file mode 100644
index 0000000000..43be6c1579
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/temp/80910d2fa9a4_migrate_old_testsets_to_new_.py
@@ -0,0 +1,32 @@
+"""migrate old testsets to new testsets data structure
+
+Revision ID: 80910d2fa9a4
+Revises: ...
+Create Date: 2025-07-25 07:35:57.319449
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+from ee.databases.postgres.migrations.core.data_migrations.testsets import (
+ run_migration,
+)
+
+# revision identifiers, used by Alembic.
+revision: str = "80910d2fa9a4"
+down_revision: Union[str, None] = "..."
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ run_migration(sqlalchemy_url=context.config.get_main_option("sqlalchemy.url"))
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ pass
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/temp/bd7937ee784d_migrate_old_evaluators_to_new_.py b/api/ee/databases/postgres/migrations/core/temp/bd7937ee784d_migrate_old_evaluators_to_new_.py
new file mode 100644
index 0000000000..da71b370bb
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/temp/bd7937ee784d_migrate_old_evaluators_to_new_.py
@@ -0,0 +1,32 @@
+"""migrate old evaluators to new evaluators data structure
+
+Revision ID: bd7937ee784d
+Revises: ...
+Create Date: 2025-07-25 07:35:57.319449
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+from ee.databases.postgres.migrations.core.data_migrations.evaluators import (
+ run_migration,
+)
+
+# revision identifiers, used by Alembic.
+revision: str = "bd7937ee784d"
+down_revision: Union[str, None] = "..."
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ run_migration(sqlalchemy_url=context.config.get_main_option("sqlalchemy.url"))
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ pass
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/utils.py b/api/ee/databases/postgres/migrations/core/utils.py
new file mode 100644
index 0000000000..206e46db64
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/utils.py
@@ -0,0 +1,196 @@
+import asyncio
+import logging
+import traceback
+
+import click
+import asyncpg
+from alembic import command
+from sqlalchemy import Engine
+from alembic.config import Config
+from sqlalchemy import inspect, text
+from alembic.script import ScriptDirectory
+from sqlalchemy.exc import ProgrammingError
+from sqlalchemy.ext.asyncio import create_async_engine, AsyncEngine
+
+from oss.src.utils.env import env
+
+
+# Initializer logger
+logger = logging.getLogger("alembic.env")
+
+# Initialize alembic config
+alembic_cfg = Config(env.ALEMBIC_CFG_PATH_CORE)
+script = ScriptDirectory.from_config(alembic_cfg)
+
+logger.info("license: ee")
+logger.info("migrations: entities")
+logger.info("ALEMBIC_CFG_PATH_CORE: %s", env.ALEMBIC_CFG_PATH_CORE)
+logger.info("alembic_cfg: %s", alembic_cfg)
+logger.info("script: %s", script)
+
+
+def is_initial_setup(engine) -> bool:
+ """
+ Check if the database is in its initial state by verifying the existence of required tables.
+
+ This function inspects the current state of the database and determines if it needs initial setup by checking for the presence of a predefined set of required tables.
+
+ Args:
+ engine (sqlalchemy.engine.base.Engine): The SQLAlchemy engine used to connect to the database.
+
+ Returns:
+ bool: True if the database is in its initial state (i.e., not all required tables exist), False otherwise.
+ """
+
+ inspector = inspect(engine)
+ required_tables = [
+ "users",
+ "app_db",
+ "deployments",
+ "bases",
+ "app_variants",
+ "ids_mapping",
+ ] # NOTE: The tables here were picked at random. Having all the tables in the database in the list \
+ # will not change the behaviour of this function, so best to leave things as it is!
+ existing_tables = inspector.get_table_names()
+
+ # Check if all required tables exist in the database
+ all_tables_exist = all(table in existing_tables for table in required_tables)
+
+ return not all_tables_exist
+
+
+async def get_current_migration_head_from_db(engine: AsyncEngine):
+ """
+ Checks the alembic_version table to get the current migration head that has been applied.
+
+ Args:
+ engine (Engine): The engine that connects to an sqlalchemy pool
+
+ Returns:
+ the current migration head (where 'head' is the revision stored in the migration script)
+ """
+
+ async with engine.connect() as connection:
+ try:
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
+ except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
+ # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
+ # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
+ # to make Alembic start tracking the migration changes.
+ # --------------------------------------------------------------------------------------
+ # This effect (the exception raising) happens for both users (first-time and returning)
+ return "alembic_version"
+
+ migration_heads = [row[0] for row in result.fetchall()]
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
+ return migration_heads[0]
+
+
+async def get_pending_migration_head():
+ """
+ Gets the migration head that have not been applied.
+
+ Returns:
+ the pending migration head
+ """
+
+ pending_migration_head = []
+
+ engine = create_async_engine(url=env.POSTGRES_URI_CORE)
+ try:
+ current_migration_script_head = script.get_current_head()
+ migration_head_from_db = await get_current_migration_head_from_db(engine=engine)
+
+ if current_migration_script_head != migration_head_from_db:
+ pending_migration_head.append(current_migration_script_head)
+ if "alembic_version" == migration_head_from_db:
+ pending_migration_head.append("alembic_version")
+ finally:
+ await engine.dispose()
+
+ return pending_migration_head
+
+
+def run_alembic_migration():
+ """
+ Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ """
+
+ try:
+ pending_migration_head = asyncio.run(get_pending_migration_head())
+ FIRST_TIME_USER = True if "alembic_version" in pending_migration_head else False
+
+ if FIRST_TIME_USER or env.AGENTA_AUTO_MIGRATIONS:
+ command.upgrade(alembic_cfg, "head")
+ click.echo(
+ click.style(
+ "\nMigration applied successfully. The container will now exit.",
+ fg="green",
+ ),
+ color=True,
+ )
+ else:
+ click.echo(
+ click.style(
+ "\nAll migrations are up-to-date. The container will now exit.",
+ fg="yellow",
+ ),
+ color=True,
+ )
+ except Exception as e:
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while applying migration: {traceback.format_exc()}\nThe container will now exit.",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+async def check_for_new_migrations():
+ """
+ Checks for new migrations and notify the user.
+ """
+
+ pending_migration_head = await get_pending_migration_head()
+ if len(pending_migration_head) >= 1 and isinstance(pending_migration_head[0], str):
+ click.echo(
+ click.style(
+ f"\nWe have detected that there are pending database migrations {pending_migration_head} that need to be applied to keep the application up to date. To ensure the application functions correctly with the latest updates, please follow the guide here => https://docs.agenta.ai/self-host/migration/applying-schema-migration\n",
+ fg="yellow",
+ ),
+ color=True,
+ )
+ return
+
+
+def unique_constraint_exists(
+ engine: Engine, table_name: str, constraint_name: str
+) -> bool:
+ """
+ The function checks if a unique constraint with a specific name exists on a table in a PostgreSQL
+ database.
+
+ Args:
+ - engine (Engine): instance of a database engine that represents a connection to a database.
+ - table_name (str): name of the table to check the existence of the unique constraint.
+ - constraint_name (str): name of the unique constraint to check for existence.
+
+ Returns:
+ - returns a boolean value indicating whether a unique constraint with the specified `constraint_name` exists in the table.
+ """
+
+ with engine.connect() as conn:
+ result = conn.execute(
+ text(
+ f"""
+ SELECT conname FROM pg_constraint
+ WHERE conname = '{constraint_name}' AND conrelid = '{table_name}'::regclass;
+ """
+ )
+ )
+ return result.fetchone() is not None
diff --git a/api/ee/databases/postgres/migrations/core/versions/0698355c7641_add_tables_for_testsets.py b/api/ee/databases/postgres/migrations/core/versions/0698355c7641_add_tables_for_testsets.py
new file mode 100644
index 0000000000..c0b8756dec
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/0698355c7641_add_tables_for_testsets.py
@@ -0,0 +1,388 @@
+"""add tables for testsets (artifacts, variants, & revisions)
+
+Revision ID: 0698355c7641
+Revises: 9698355c7649
+Create Date: 2025-04-24 07:27:45.801481
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "0698355c7641"
+down_revision: Union[str, None] = "9698355c7649"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.create_table(
+ "testset_artifacts",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_testset_artifacts_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.create_table(
+ "testset_variants",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["testset_artifacts.project_id", "testset_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_testset_variants_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_testset_variants_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - REVISIONS --------------------------------------------------------------
+
+ op.create_table(
+ "testset_revisions",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "variant_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "message",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "author",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "date",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["testset_artifacts.project_id", "testset_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "variant_id"],
+ ["testset_variants.project_id", "testset_variants.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_testset_revisions_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_testset_revisions_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ sa.Index(
+ "ix_testset_revisions_project_id_variant_id",
+ "project_id",
+ "variant_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - REVISIONS --------------------------------------------------------------
+
+ op.drop_table("testset_revisions")
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.drop_table("testset_variants")
+
+ # --------------------------------------------------------------------------
+
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.drop_table("testset_artifacts")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/0698355c7642_add_table_for_testcases.py b/api/ee/databases/postgres/migrations/core/versions/0698355c7642_add_table_for_testcases.py
new file mode 100644
index 0000000000..c7a98fc712
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/0698355c7642_add_table_for_testcases.py
@@ -0,0 +1,112 @@
+"""add tables for testcases (blobs)
+
+Revision ID: 0698355c7642
+Revises: 0698355c7641
+Create Date: 2025-04-24 07:27:45.801481
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "0698355c7642"
+down_revision: Union[str, None] = "0698355c7641"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - BLOBS ------------------------------------------------------------------
+
+ op.create_table(
+ "testcase_blobs",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "set_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "set_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "set_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "set_id"],
+ ["testset_artifacts.project_id", "testset_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_testcase_blobs_project_id_blob_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_testcase_blobs_project_id_set_id",
+ "project_id",
+ "set_id",
+ ),
+ sa.Index(
+ "ix_testcase_blobs_project_id_set_id_id",
+ "project_id",
+ "set_id",
+ "id",
+ ),
+ sa.Index(
+ "ix_testcase_blobs_project_id_set_id_slug",
+ "project_id",
+ "set_id",
+ "slug",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - BLOBS ------------------------------------------------------------------
+
+ op.drop_table("testcase_blobs")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/0f086ebc2f83_extend_app_type.py b/api/ee/databases/postgres/migrations/core/versions/0f086ebc2f83_extend_app_type.py
new file mode 100644
index 0000000000..dd76961a2f
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/0f086ebc2f83_extend_app_type.py
@@ -0,0 +1,58 @@
+"""Extend app_type
+
+Revision ID: 0f086ebc2f83
+Revises: 0f086ebc2f82
+Create Date: 2025-01-08 10:24:00
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "0f086ebc2f83"
+down_revision: Union[str, None] = "425c68e8de6c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade():
+ # Define the new enum
+ temp_enum = sa.Enum(
+ "CHAT_TEMPLATE",
+ "COMPLETION_TEMPLATE",
+ "CHAT_SERVICE",
+ "COMPLETION_SERVICE",
+ "CUSTOM",
+ name="app_type_enum",
+ )
+ temp_enum.create(op.get_bind(), checkfirst=True)
+
+ # Update the column to use the new enum
+ op.execute(
+ "ALTER TABLE app_db ALTER COLUMN app_type TYPE app_type_enum USING app_type::text::app_type_enum"
+ )
+
+ # Drop the old enum
+ op.execute("DROP TYPE app_enumtype")
+
+
+def downgrade():
+ # Define the old enum
+ temp_enum = sa.Enum(
+ "CHAT_TEMPLATE",
+ "COMPLETION_TEMPLATE",
+ "CUSTOM",
+ name="app_enumtype",
+ )
+ temp_enum.create(op.get_bind(), checkfirst=True)
+
+ # Update the column to use the old enum
+ op.execute(
+ "ALTER TABLE app_db ALTER COLUMN app_type TYPE app_enumtype USING app_type::text::app_enumtype"
+ )
+
+ # Drop the new enum
+ op.execute("DROP TYPE app_type_enum")
diff --git a/api/ee/databases/postgres/migrations/core/versions/12f477990f1e_add_meters.py b/api/ee/databases/postgres/migrations/core/versions/12f477990f1e_add_meters.py
new file mode 100644
index 0000000000..2e5c4ef580
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/12f477990f1e_add_meters.py
@@ -0,0 +1,54 @@
+"""add meters
+
+Revision ID: 12f477990f1e
+Revises: 6965776e6940
+Create Date: 2025-01-25 16:51:06.233811
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+from sqlalchemy.sql import func
+
+# revision identifiers, used by Alembic.
+revision: str = "12f477990f1e"
+down_revision: Union[str, None] = "6965776e6940"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "meters",
+ sa.Column(
+ "key",
+ sa.Enum(
+ "USERS",
+ "APPLICATIONS",
+ "EVALUATIONS",
+ "TRACES",
+ name="meters_type",
+ ),
+ nullable=False,
+ ),
+ sa.Column("value", sa.BigInteger(), nullable=False),
+ sa.Column("synced", sa.BigInteger(), nullable=False),
+ sa.Column("organization_id", sa.UUID(), nullable=False),
+ sa.Column("year", sa.SmallInteger(), nullable=True, server_default="0"),
+ sa.Column("month", sa.SmallInteger(), nullable=True, server_default="0"),
+ sa.PrimaryKeyConstraint("organization_id", "key", "year", "month"),
+ sa.ForeignKeyConstraint(["organization_id"], ["subscriptions.organization_id"]),
+ )
+ op.create_index("idx_synced_value", "meters", ["synced", "value"], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index("idx_synced_value", table_name="meters")
+ op.drop_table("meters")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/154098b1e56c_set_user_id_column_in_db_entities_to_be_.py b/api/ee/databases/postgres/migrations/core/versions/154098b1e56c_set_user_id_column_in_db_entities_to_be_.py
new file mode 100644
index 0000000000..411101fa4d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/154098b1e56c_set_user_id_column_in_db_entities_to_be_.py
@@ -0,0 +1,69 @@
+"""Set user_id column in db entities to be optional --- prep for project_id scoping
+
+Revision ID: 154098b1e56c
+Revises: ad0987a77380
+Create Date: 2024-09-17 06:44:31.061378
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "154098b1e56c"
+down_revision: Union[str, None] = "ad0987a77380"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column("docker_images", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("app_db", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("deployments", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("bases", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("app_variants", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("environments", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column("testsets", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column(
+ "evaluators_configs", "user_id", existing_type=sa.UUID, nullable=True
+ )
+ op.alter_column(
+ "human_evaluations", "user_id", existing_type=sa.UUID, nullable=True
+ )
+ op.alter_column(
+ "human_evaluations_scenarios", "user_id", existing_type=sa.UUID, nullable=True
+ )
+ op.alter_column("evaluations", "user_id", existing_type=sa.UUID, nullable=True)
+ op.alter_column(
+ "evaluation_scenarios", "user_id", existing_type=sa.UUID, nullable=True
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column("docker_images", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("app_db", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("deployments", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("bases", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("app_variants", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("environments", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column("testsets", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column(
+ "evaluators_configs", "user_id", existing_type=sa.UUID, nullable=False
+ )
+ op.alter_column(
+ "human_evaluations", "user_id", existing_type=sa.UUID, nullable=False
+ )
+ op.alter_column(
+ "human_evaluations_scenarios", "user_id", existing_type=sa.UUID, nullable=False
+ )
+ op.alter_column("evaluations", "user_id", existing_type=sa.UUID, nullable=False)
+ op.alter_column(
+ "evaluation_scenarios", "user_id", existing_type=sa.UUID, nullable=False
+ )
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/1c2d3e4f5a6b_workspaces_migration_to_add_default_project_and_membership.py b/api/ee/databases/postgres/migrations/core/versions/1c2d3e4f5a6b_workspaces_migration_to_add_default_project_and_membership.py
new file mode 100644
index 0000000000..2e52bcfdc9
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/1c2d3e4f5a6b_workspaces_migration_to_add_default_project_and_membership.py
@@ -0,0 +1,40 @@
+"""workspaces migration to add default project and memberships
+
+Revision ID: 1c2d3e4f5a6b
+Revises: 6aafdfc2befb
+Create Date: 2024-09-03 08:05:58.870573
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+import sqlalchemy as sa
+
+from ee.databases.postgres.migrations.core.data_migrations.workspaces import (
+ create_default_project_for_workspaces,
+ create_default_project_memberships,
+ remove_default_projects_from_workspaces,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "1c2d3e4f5a6b"
+down_revision: Union[str, None] = "6aafdfc2befb"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom migration ###
+ connection = context.get_bind() # get database connect from alembic context
+ create_default_project_for_workspaces(session=connection)
+ create_default_project_memberships(session=connection)
+ # ### end custom migration ###
+
+
+def downgrade() -> None:
+ # ### custom migration ###
+ connection = context.get_bind() # get database connect from alembic context
+ remove_default_projects_from_workspaces(session=connection)
+ # ### end custom migration ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/24f8bdb390ee_added_the_app_type_column_to_the_app_db_.py b/api/ee/databases/postgres/migrations/core/versions/24f8bdb390ee_added_the_app_type_column_to_the_app_db_.py
new file mode 100644
index 0000000000..de300ce7fa
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/24f8bdb390ee_added_the_app_type_column_to_the_app_db_.py
@@ -0,0 +1,59 @@
+"""Added the 'app_type' column to the 'app_db' table
+
+Revision ID: 24f8bdb390ee
+Revises: e9fa2135f3fb
+Create Date: 2024-09-09 07:32:45.053125
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "24f8bdb390ee"
+down_revision: Union[str, None] = "847972cfa14a"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+
+ # Create the enum type first
+ app_enumtype = sa.Enum(
+ "CHAT_TEMPLATE",
+ "COMPLETION_TEMPLATE",
+ "CUSTOM",
+ name="app_enumtype",
+ )
+ app_enumtype.create(op.get_bind(), checkfirst=True)
+
+ # Then add the column using the enum type
+ op.add_column(
+ "app_db",
+ sa.Column(
+ "app_type",
+ app_enumtype,
+ nullable=True,
+ ),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+
+ # Drop the column first
+ op.drop_column("app_db", "app_type")
+
+ # Then drop the enum type
+ app_enumtype = sa.Enum(
+ "CHAT_TEMPLATE",
+ "COMPLETION_TEMPLATE",
+ "CUSTOM",
+ name="app_enumtype",
+ )
+ app_enumtype.drop(op.get_bind(), checkfirst=True)
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/2a91436752f9_update_secrets_data_schema_type.py b/api/ee/databases/postgres/migrations/core/versions/2a91436752f9_update_secrets_data_schema_type.py
new file mode 100644
index 0000000000..460986b788
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/2a91436752f9_update_secrets_data_schema_type.py
@@ -0,0 +1,64 @@
+"""update secrets data schema type
+
+Revision ID: 2a91436752f9
+Revises: 0f086ebc2f83
+Create Date: 2025-02-10 10:38:31.555604
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import context, op
+
+from oss.databases.postgres.migrations.core.data_migrations.secrets import (
+ rename_and_update_secrets_data_schema,
+ revert_rename_and_update_secrets_data_schema,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "2a91436752f9"
+down_revision: Union[str, None] = "0f086ebc2f83"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands to do data migration for secrets ###
+ connection = context.get_bind()
+
+ # Define the new enum
+ secret_kinds = sa.Enum("PROVIDER_KEY", "CUSTOM_PROVIDER", name="secretkind_enum")
+ secret_kinds.create(bind=connection, checkfirst=True)
+
+ # Update the column to make use of the new enum
+ op.execute(
+ "ALTER TABLE secrets ALTER COLUMN kind TYPE secretkind_enum USING kind::text::secretkind_enum"
+ )
+
+ # Drop the old enum
+ op.execute("DROP TYPE IF EXISTS secretkind")
+
+ rename_and_update_secrets_data_schema(session=connection)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands to do data migration for secrets ###
+ connection = context.get_bind()
+
+ # Define the new enum
+ secret_kinds = sa.Enum("PROVIDER_KEY", name="secretkind")
+ secret_kinds.create(bind=connection, checkfirst=True)
+
+ # Update the column to make use of the new enum
+ op.execute(
+ "ALTER TABLE secrets ALTER COLUMN kind TYPE secretkind USING kind::text::secretkind"
+ )
+
+ # Drop the old enum
+ op.execute("DROP TYPE IF EXISTS secretkind_enum")
+
+ revert_rename_and_update_secrets_data_schema(session=connection)
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/30dcf07de96a_add_tables_for_queries.py b/api/ee/databases/postgres/migrations/core/versions/30dcf07de96a_add_tables_for_queries.py
new file mode 100644
index 0000000000..735a859ce0
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/30dcf07de96a_add_tables_for_queries.py
@@ -0,0 +1,403 @@
+"""add tables for queries (artifacts, variants, & revisions)
+
+Revision ID: 30dcf07de96a
+Revises: aa1b2c3d4e5f
+Create Date: 2025-07-30 14:55:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "30dcf07de96a"
+down_revision: Union[str, None] = "aa1b2c3d4e5f"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.create_table(
+ "query_artifacts",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_query_artifacts_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.create_table(
+ "query_variants",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["query_artifacts.project_id", "query_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_query_variants_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_query_variants_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - REVISIONS --------------------------------------------------------------
+
+ op.create_table(
+ "query_revisions",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "variant_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "message",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "author",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "date",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["query_artifacts.project_id", "query_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "variant_id"],
+ ["query_variants.project_id", "query_variants.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_query_revisions_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_query_revisions_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ sa.Index(
+ "ix_query_revisions_project_id_variant_id",
+ "project_id",
+ "variant_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - REVISIONS --------------------------------------------------------------
+
+ op.drop_table("query_revisions")
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.drop_table("query_variants")
+
+ # --------------------------------------------------------------------------
+
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.drop_table("query_artifacts")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/320a4a7ee0c7_set_columns_in_api_key_table_to_be_.py b/api/ee/databases/postgres/migrations/core/versions/320a4a7ee0c7_set_columns_in_api_key_table_to_be_.py
new file mode 100644
index 0000000000..463285cacb
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/320a4a7ee0c7_set_columns_in_api_key_table_to_be_.py
@@ -0,0 +1,61 @@
+"""set columns in api_key table to be nullable -- prep for access control
+
+Revision ID: 320a4a7ee0c7
+Revises: b3f6bff547d4
+Create Date: 2024-10-22 10:57:36.983190
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op, context
+
+from ee.databases.postgres.migrations.core.data_migrations.api_keys import (
+ update_api_key_to_make_use_of_project_id,
+ revert_api_key_to_make_use_of_workspace_id,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "320a4a7ee0c7"
+down_revision: Union[str, None] = "b3f6bff547d4"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ connection = context.get_bind()
+ op.alter_column("api_keys", "user_id", nullable=True)
+ op.alter_column("api_keys", "workspace_id", nullable=True)
+ op.add_column("api_keys", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.add_column("api_keys", sa.Column("created_by_id", sa.UUID(), nullable=True))
+ # ================== Custom data migration ====================== #
+ update_api_key_to_make_use_of_project_id(session=connection)
+ # ================== Custom data migration ====================== #
+ op.drop_column("api_keys", "user_id")
+ op.drop_column("api_keys", "workspace_id")
+ op.alter_column("api_keys", "created_by_id", nullable=False)
+ op.alter_column("api_keys", "project_id", nullable=False)
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ connection = context.get_bind()
+ inspector = sa.inspect(connection)
+ columns = [column["name"] for column in inspector.get_columns("api_keys")]
+ if "user_id" not in columns:
+ op.add_column("api_keys", sa.Column("user_id", sa.String(), nullable=True))
+
+ if "workspace_id" not in columns:
+ op.add_column("api_keys", sa.Column("workspace_id", sa.String(), nullable=True))
+ # ================== Custom data migration ====================== #
+ revert_api_key_to_make_use_of_workspace_id(session=connection)
+ # ================== Custom data migration ====================== #
+ op.drop_column("api_keys", "created_by_id")
+ op.drop_column("api_keys", "project_id")
+ op.alter_column("api_keys", "user_id", nullable=False)
+ op.alter_column("api_keys", "workspace_id", nullable=False)
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/3b5f5652f611_populate_runs_references.py b/api/ee/databases/postgres/migrations/core/versions/3b5f5652f611_populate_runs_references.py
new file mode 100644
index 0000000000..bb43067ccb
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/3b5f5652f611_populate_runs_references.py
@@ -0,0 +1,77 @@
+"""Populate runs references
+
+Revision ID: 3b5f5652f611
+Revises: b3f15a7140ab
+Create Date: 2025-10-07 12:00:00
+"""
+
+from typing import Sequence, Union
+from alembic import op
+import sqlalchemy as sa
+import json
+
+# revision identifiers, used by Alembic.
+revision: str = "3b5f5652f611"
+down_revision: Union[str, None] = "b3f15a7140ab"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ conn = op.get_bind()
+
+ rows = conn.execute(
+ sa.text('SELECT id, data, "references" FROM evaluation_runs')
+ ).fetchall()
+
+ for run_id, data, existing_refs in rows:
+ if existing_refs not in (None, [], {}):
+ continue
+ if not data or "steps" not in data:
+ continue
+
+ refs_out = []
+ seen = set()
+
+ for step in data.get("steps", []):
+ refs = step.get("references", {})
+ if not isinstance(refs, dict):
+ continue
+
+ for key, ref in refs.items():
+ if not isinstance(ref, dict):
+ continue
+
+ entry = {"key": key}
+
+ if ref.get("id") is not None:
+ entry["id"] = ref["id"]
+ if ref.get("slug") is not None:
+ entry["slug"] = ref["slug"]
+ if ref.get("version") is not None:
+ entry["version"] = ref["version"]
+
+ dedup_key = (
+ entry.get("id"),
+ entry["key"],
+ entry.get("slug"),
+ entry.get("version"),
+ )
+ if dedup_key in seen:
+ continue
+ seen.add(dedup_key)
+
+ refs_out.append(entry)
+
+ if refs_out:
+ conn.execute(
+ sa.text(
+ 'UPDATE evaluation_runs SET "references" = :refs WHERE id = :id'
+ ),
+ {"refs": json.dumps(refs_out), "id": run_id},
+ )
+
+
+def downgrade() -> None:
+ conn = op.get_bind()
+ conn.execute(sa.text('UPDATE evaluation_runs SET "references" = NULL'))
diff --git a/api/ee/databases/postgres/migrations/core/versions/425c68e8de6c_add_secrets_dbe_model.py b/api/ee/databases/postgres/migrations/core/versions/425c68e8de6c_add_secrets_dbe_model.py
new file mode 100644
index 0000000000..b58d9cc9ce
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/425c68e8de6c_add_secrets_dbe_model.py
@@ -0,0 +1,53 @@
+"""add secrets dbe model
+
+Revision ID: 425c68e8de6c
+Revises: 73a2d8cfaa3d
+Create Date: 2024-12-05 10:30:54.986714
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+from oss.src.dbs.postgres.secrets.custom_fields import PGPString
+
+# revision identifiers, used by Alembic.
+revision: str = "425c68e8de6c"
+down_revision: Union[str, None] = "73a2d8cfaa3d"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.execute("CREATE EXTENSION IF NOT EXISTS pgcrypto;")
+ op.create_table(
+ "secrets",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("kind", sa.Enum("PROVIDER_KEY", name="secretkind"), nullable=True),
+ sa.Column("data", PGPString(), nullable=True),
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("description", sa.String(), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("secrets")
+ op.execute("DROP TYPE IF EXISTS secretkind;")
+ op.execute("DROP EXTENSION IF EXISTS pgcrypto;")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/4d9a58ff8f98_add_default_project_to_scoped_model_.py b/api/ee/databases/postgres/migrations/core/versions/4d9a58ff8f98_add_default_project_to_scoped_model_.py
new file mode 100644
index 0000000000..22b9387d66
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/4d9a58ff8f98_add_default_project_to_scoped_model_.py
@@ -0,0 +1,42 @@
+"""add default project to scoped model entities
+
+Revision ID: 4d9a58ff8f98
+Revises: d0b8e05ca190
+Create Date: 2024-09-17 07:16:57.740642
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+
+from ee.databases.postgres.migrations.core.data_migrations.projects import (
+ add_project_id_to_db_entities,
+ remove_project_id_from_db_entities,
+ repair_evaluation_scenario_to_have_project_id,
+ repair_evaluator_configs_to_have_project_id,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "4d9a58ff8f98"
+down_revision: Union[str, None] = "d0b8e05ca190"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom command ###
+ connection = context.get_bind() # get database connect from alembic context
+ add_project_id_to_db_entities(session=connection)
+ repair_evaluation_scenario_to_have_project_id(session=connection)
+ repair_evaluator_configs_to_have_project_id(session=connection)
+ repair_evaluation_scenario_to_have_project_id(session=connection)
+ # ### end custom command ###
+
+
+def downgrade() -> None:
+ # ### custom command ###
+ connection = context.get_bind() # get database connect from alembic context
+ remove_project_id_from_db_entities(session=connection)
+ # ### end custom command ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/54e81e9eed88_add_tables_for_evaluations.py b/api/ee/databases/postgres/migrations/core/versions/54e81e9eed88_add_tables_for_evaluations.py
new file mode 100644
index 0000000000..f8549687ce
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/54e81e9eed88_add_tables_for_evaluations.py
@@ -0,0 +1,514 @@
+"""add tables for evaluations
+
+Revision ID: 54e81e9eed88
+Revises: 9698355c7650
+Create Date: 2025-04-24 07:27:45.801481
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "54e81e9eed88"
+down_revision: Union[str, None] = "9698355c7650"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.rename_table(
+ "evaluation_aggregated_results",
+ "auto_evaluation_aggregated_results",
+ )
+ op.rename_table(
+ "evaluation_evaluator_configs",
+ "auto_evaluation_evaluator_configs",
+ )
+ op.rename_table(
+ "evaluation_scenario_results",
+ "auto_evaluation_scenario_results",
+ )
+ op.rename_table(
+ "evaluation_scenarios",
+ "auto_evaluation_scenarios",
+ )
+ op.rename_table(
+ "evaluations",
+ "auto_evaluations",
+ )
+ op.rename_table(
+ "evaluators_configs",
+ "auto_evaluator_configs",
+ )
+
+ op.create_table(
+ "evaluation_runs",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "status",
+ sa.VARCHAR,
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_evaluation_runs_project_id",
+ "project_id",
+ ),
+ )
+
+ op.create_table(
+ "evaluation_scenarios",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "status",
+ sa.VARCHAR,
+ nullable=False,
+ ),
+ sa.Column(
+ "run_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "run_id"],
+ ["evaluation_runs.project_id", "evaluation_runs.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_evaluation_scenarios_project_id",
+ "project_id",
+ ),
+ sa.Index(
+ "ix_evaluation_scenarios_run_id",
+ "run_id",
+ ),
+ )
+
+ op.create_table(
+ "evaluation_steps",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "status",
+ sa.VARCHAR,
+ nullable=False,
+ ),
+ sa.Column(
+ "timestamp",
+ sa.TIMESTAMP(timezone=True),
+ nullable=False,
+ ),
+ sa.Column(
+ "key",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "repeat_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "retry_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "hash_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "trace_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "testcase_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "error",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "scenario_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "run_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "run_id"],
+ ["evaluation_runs.project_id", "evaluation_runs.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "scenario_id"],
+ ["evaluation_scenarios.project_id", "evaluation_scenarios.id"],
+ ondelete="CASCADE",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "run_id",
+ "scenario_id",
+ "key",
+ "retry_id",
+ "retry_id",
+ ),
+ sa.Index(
+ "ix_evaluation_steps_project_id",
+ "project_id",
+ ),
+ sa.Index(
+ "ix_evaluation_steps_scenario_id",
+ "scenario_id",
+ ),
+ sa.Index(
+ "ix_evaluation_steps_run_id",
+ "run_id",
+ ),
+ )
+
+ op.create_table(
+ "evaluation_metrics",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "status",
+ sa.VARCHAR,
+ nullable=False,
+ ),
+ sa.Column(
+ "scenario_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "run_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "run_id"],
+ ["evaluation_runs.project_id", "evaluation_runs.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "scenario_id"],
+ ["evaluation_scenarios.project_id", "evaluation_scenarios.id"],
+ ondelete="CASCADE",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "run_id",
+ "scenario_id",
+ ),
+ sa.Index(
+ "ix_evaluation_metrics_project_id",
+ "project_id",
+ ),
+ sa.Index(
+ "ix_evaluation_metrics_run_id",
+ "run_id",
+ ),
+ sa.Index(
+ "ix_evaluation_metrics_scenario_id",
+ "scenario_id",
+ ),
+ )
+
+
+def downgrade() -> None:
+ op.drop_table("evaluation_metrics")
+ op.drop_table("evaluation_steps")
+ op.drop_table("evaluation_scenarios")
+ op.drop_table("evaluation_runs")
+
+ op.rename_table(
+ "auto_evaluator_configs",
+ "evaluators_configs",
+ )
+
+ op.rename_table(
+ "auto_evaluations",
+ "evaluations",
+ )
+ op.rename_table(
+ "auto_evaluation_scenarios",
+ "evaluation_scenarios",
+ )
+ op.rename_table(
+ "auto_evaluation_scenario_results",
+ "evaluation_scenario_results",
+ )
+ op.rename_table(
+ "auto_evaluation_evaluator_configs",
+ "evaluation_evaluator_configs",
+ )
+ op.rename_table(
+ "auto_evaluation_aggregated_results",
+ "evaluation_aggregated_results",
+ )
diff --git a/api/ee/databases/postgres/migrations/core/versions/5a71b3f140ab_fix_all_preview_schemas.py b/api/ee/databases/postgres/migrations/core/versions/5a71b3f140ab_fix_all_preview_schemas.py
new file mode 100644
index 0000000000..62d244d1e1
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/5a71b3f140ab_fix_all_preview_schemas.py
@@ -0,0 +1,426 @@
+"""fix all preview schemas
+
+Revision ID: 5a71b3f140ab
+Revises: 8089ee7692d1
+Create Date: 2025-09-03 14:28:06.362553
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+revision: str = "5a71b3f140ab"
+down_revision: Union[str, None] = "8089ee7692d1"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # EVALUATION RUNS ----------------------------------------------------------
+
+ op.add_column(
+ "evaluation_runs",
+ sa.Column(
+ "references",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+
+ op.create_index(
+ "ix_evaluation_runs_references",
+ "evaluation_runs",
+ ["references"],
+ unique=False,
+ postgresql_using="gin",
+ postgresql_ops={"references": "jsonb_path_ops"},
+ )
+ op.create_index(
+ "ix_evaluation_runs_flags",
+ "evaluation_runs",
+ ["flags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_runs_tags",
+ "evaluation_runs",
+ ["tags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+
+ # EVALUATION SCENARIOS -----------------------------------------------------
+
+ op.add_column(
+ "evaluation_scenarios",
+ sa.Column(
+ "interval",
+ postgresql.INTEGER(),
+ nullable=True,
+ ),
+ )
+
+ op.create_index(
+ "ix_evaluation_scenarios_timestamp_interval",
+ "evaluation_scenarios",
+ ["timestamp", "interval"],
+ unique=False,
+ )
+ op.create_index(
+ "ix_evaluation_scenarios_flags",
+ "evaluation_scenarios",
+ ["flags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_scenarios_tags",
+ "evaluation_scenarios",
+ ["tags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+
+ # EVALUATION RESULTS -------------------------------------------------------
+
+ op.alter_column(
+ "evaluation_steps",
+ "timestamp",
+ existing_type=postgresql.TIMESTAMP(timezone=True),
+ nullable=True,
+ )
+ op.add_column(
+ "evaluation_steps",
+ sa.Column(
+ "interval",
+ postgresql.INTEGER(),
+ nullable=True,
+ ),
+ )
+
+ op.create_unique_constraint(
+ "uq_evaluation_steps_project_run_scenario_step_repeat",
+ "evaluation_steps",
+ ["project_id", "run_id", "scenario_id", "step_key", "repeat_idx"],
+ )
+
+ op.create_index(
+ "ix_evaluation_steps_tags",
+ "evaluation_steps",
+ ["tags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_steps_flags",
+ "evaluation_steps",
+ ["flags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_steps_timestamp_interval",
+ "evaluation_steps",
+ ["timestamp", "interval"],
+ unique=False,
+ )
+ op.create_index(
+ "ix_evaluation_steps_repeat_idx",
+ "evaluation_steps",
+ ["repeat_idx"],
+ unique=False,
+ )
+ op.create_index(
+ "ix_evaluation_steps_step_key",
+ "evaluation_steps",
+ ["step_key"],
+ unique=False,
+ )
+
+ op.rename_table("evaluation_steps", "evaluation_results")
+
+ op.execute(
+ "ALTER TABLE evaluation_results RENAME CONSTRAINT "
+ "uq_evaluation_steps_project_run_scenario_step_repeat TO "
+ "uq_evaluation_results_project_run_scenario_step_repeat"
+ )
+
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_project_id RENAME TO ix_evaluation_results_project_id"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_run_id RENAME TO ix_evaluation_results_run_id"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_scenario_id RENAME TO ix_evaluation_results_scenario_id"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_step_key RENAME TO ix_evaluation_results_step_key"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_repeat_idx RENAME TO ix_evaluation_results_repeat_idx"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_timestamp_interval RENAME TO ix_evaluation_results_timestamp_interval"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_flags RENAME TO ix_evaluation_results_flags"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_steps_tags RENAME TO ix_evaluation_results_tags"
+ )
+
+ # EVALUATION METRICS -------------------------------------------------------
+
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column(
+ "interval",
+ postgresql.INTEGER(),
+ nullable=True,
+ ),
+ )
+
+ op.drop_constraint(
+ op.f("evaluation_metrics_project_id_run_id_scenario_id_key"),
+ "evaluation_metrics",
+ type_="unique",
+ )
+
+ op.create_unique_constraint(
+ "uq_evaluation_metrics_project_run_scenario_timestamp_interval",
+ "evaluation_metrics",
+ ["project_id", "run_id", "scenario_id", "timestamp", "interval"],
+ )
+
+ op.create_index(
+ "ix_evaluation_metrics_timestamp_interval",
+ "evaluation_metrics",
+ ["timestamp", "interval"],
+ unique=False,
+ )
+ op.create_index(
+ "ix_evaluation_metrics_flags",
+ "evaluation_metrics",
+ ["flags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_metrics_tags",
+ "evaluation_metrics",
+ ["tags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+
+ # EVALUATION QUEUES --------------------------------------------------------
+
+ op.add_column(
+ "evaluation_queues",
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "evaluation_queues",
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "evaluation_queues",
+ sa.Column(
+ "status",
+ sa.VARCHAR(),
+ nullable=False,
+ server_default=sa.text("'pending'::varchar"),
+ ),
+ )
+
+ op.create_index(
+ "ix_evaluation_queues_flags",
+ "evaluation_queues",
+ ["flags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+ op.create_index(
+ "ix_evaluation_queues_tags",
+ "evaluation_queues",
+ ["tags"],
+ unique=False,
+ postgresql_using="gin",
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # EVALUATION QUEUES --------------------------------------------------------
+
+ op.drop_index(
+ "ix_evaluation_queues_tags",
+ table_name="evaluation_queues",
+ )
+ op.drop_index(
+ "ix_evaluation_queues_flags",
+ table_name="evaluation_queues",
+ )
+
+ op.drop_column(
+ "evaluation_queues",
+ "status",
+ )
+ op.drop_column(
+ "evaluation_queues",
+ "description",
+ )
+ op.drop_column(
+ "evaluation_queues",
+ "name",
+ )
+
+ # EVALUATION METRICS -------------------------------------------------------
+
+ op.drop_index(
+ "ix_evaluation_metrics_tags",
+ table_name="evaluation_metrics",
+ )
+ op.drop_index(
+ "ix_evaluation_metrics_flags",
+ table_name="evaluation_metrics",
+ )
+ op.drop_index(
+ "ix_evaluation_metrics_timestamp_interval",
+ table_name="evaluation_metrics",
+ )
+
+ op.drop_constraint(
+ "uq_evaluation_metrics_project_run_scenario_timestamp_interval",
+ "evaluation_metrics",
+ type_="unique",
+ )
+
+ op.create_unique_constraint(
+ op.f("evaluation_metrics_project_id_run_id_scenario_id_key"),
+ "evaluation_metrics",
+ ["project_id", "run_id", "scenario_id"],
+ postgresql_nulls_not_distinct=False,
+ )
+
+ op.drop_column("evaluation_metrics", "interval")
+
+ # EVALUATION RESULTS -------------------------------------------------------
+
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_tags RENAME TO ix_evaluation_steps_tags"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_flags RENAME TO ix_evaluation_steps_flags"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_timestamp_interval RENAME TO ix_evaluation_steps_timestamp_interval"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_repeat_idx RENAME TO ix_evaluation_steps_repeat_idx"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_step_key RENAME TO ix_evaluation_steps_step_key"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_scenario_id RENAME TO ix_evaluation_steps_scenario_id"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_run_id RENAME TO ix_evaluation_steps_run_id"
+ )
+ op.execute(
+ "ALTER INDEX ix_evaluation_results_project_id RENAME TO ix_evaluation_steps_project_id"
+ )
+
+ op.execute(
+ "ALTER TABLE evaluation_results RENAME CONSTRAINT uq_evaluation_results_project_run_scenario_step_repeat "
+ "TO uq_evaluation_steps_project_run_scenario_step_repeat"
+ )
+
+ op.rename_table("evaluation_results", "evaluation_steps")
+
+ op.drop_index(
+ "ix_evaluation_steps_tags",
+ table_name="evaluation_steps",
+ )
+ op.drop_index(
+ "ix_evaluation_steps_flags",
+ table_name="evaluation_steps",
+ )
+ op.drop_index(
+ "ix_evaluation_steps_timestamp_interval",
+ table_name="evaluation_steps",
+ )
+ op.drop_index(
+ "ix_evaluation_steps_repeat_idx",
+ table_name="evaluation_steps",
+ )
+ op.drop_index(
+ "ix_evaluation_steps_step_key",
+ table_name="evaluation_steps",
+ )
+
+ op.drop_constraint(
+ "uq_evaluation_steps_project_run_scenario_step_repeat",
+ "evaluation_steps",
+ type_="unique",
+ )
+
+ op.alter_column(
+ "evaluation_steps",
+ "timestamp",
+ existing_type=postgresql.TIMESTAMP(timezone=True),
+ nullable=False,
+ )
+
+ op.drop_column("evaluation_steps", "interval")
+
+ # EVALUATION SCENARIOS -----------------------------------------------------
+
+ op.drop_index(
+ "ix_evaluation_scenarios_tags",
+ table_name="evaluation_scenarios",
+ )
+ op.drop_index(
+ "ix_evaluation_scenarios_flags",
+ table_name="evaluation_scenarios",
+ )
+ op.drop_index(
+ "ix_evaluation_scenarios_timestamp_interval",
+ table_name="evaluation_scenarios",
+ )
+
+ op.drop_column("evaluation_scenarios", "interval")
+
+ # EVALUATION RUNS ----------------------------------------------------------
+
+ op.drop_index(
+ "ix_evaluation_runs_tags",
+ table_name="evaluation_runs",
+ )
+ op.drop_index(
+ "ix_evaluation_runs_flags",
+ table_name="evaluation_runs",
+ )
+ op.drop_index(
+ "ix_evaluation_runs_references",
+ table_name="evaluation_runs",
+ )
+
+ op.drop_column("evaluation_runs", "references")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/6161b674688d_add_commit_message_column_to_app_.py b/api/ee/databases/postgres/migrations/core/versions/6161b674688d_add_commit_message_column_to_app_.py
new file mode 100644
index 0000000000..81d1ee6046
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/6161b674688d_add_commit_message_column_to_app_.py
@@ -0,0 +1,39 @@
+"""add commit_message column to app_variants, app_variant_revisions and environments_revisions table
+
+Revision ID: 6161b674688d
+Revises: 2a91436752f9
+Create Date: 2025-03-27 08:23:07.894643
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "6161b674688d"
+down_revision: Union[str, None] = "2a91436752f9"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "app_variant_revisions",
+ sa.Column("commit_message", sa.String(length=255), nullable=True),
+ )
+ op.add_column(
+ "environments_revisions",
+ sa.Column("commit_message", sa.String(length=255), nullable=True),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("environments_revisions", "commit_message")
+ op.drop_column("app_variant_revisions", "commit_message")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/6965776e6940_add_subscriptions.py b/api/ee/databases/postgres/migrations/core/versions/6965776e6940_add_subscriptions.py
new file mode 100644
index 0000000000..b6b76bc89f
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/6965776e6940_add_subscriptions.py
@@ -0,0 +1,40 @@
+"""add subscriptions
+
+Revision ID: 6965776e6940
+Revises: 425c68e8de6c
+Create Date: 2025-01-23 13:42:47.716771
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "6965776e6940"
+down_revision: Union[str, None] = "7cc66fc40298"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "subscriptions",
+ sa.Column("plan", sa.String(), nullable=False),
+ sa.Column("active", sa.Boolean(), nullable=False),
+ sa.Column("organization_id", sa.UUID(), nullable=False),
+ sa.Column("customer_id", sa.String(), nullable=True),
+ sa.Column("subscription_id", sa.String(), nullable=True),
+ sa.Column("anchor", sa.SmallInteger(), nullable=True),
+ sa.PrimaryKeyConstraint("organization_id"),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("subscriptions")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/6aafdfc2befb_rename_user_organizations_to_organization_members.py b/api/ee/databases/postgres/migrations/core/versions/6aafdfc2befb_rename_user_organizations_to_organization_members.py
new file mode 100644
index 0000000000..02fb6c9eef
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/6aafdfc2befb_rename_user_organizations_to_organization_members.py
@@ -0,0 +1,63 @@
+"""created project_members table and added organization&workspace id to projects table
+
+Revision ID: 6aafdfc2befb
+Revises: 8accbbea1d21
+Create Date: 2024-09-02 15:50:58.870573
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "6aafdfc2befb"
+down_revision: Union[str, None] = "e14e8689cd03"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "organization_members",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ connection = op.get_bind()
+ inspector = sa.inspect(connection)
+ if "user_organizations" not in inspector.get_table_names():
+ op.create_table(
+ "user_organizations",
+ sa.Column("id", sa.UUID(), autoincrement=False, nullable=False),
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ sa.Column("organization_id", sa.UUID(), autoincrement=False, nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ name="user_organizations_organization_id_fkey",
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"], ["users.id"], name="user_organizations_user_id_fkey"
+ ),
+ sa.PrimaryKeyConstraint("id", name="user_organizations_pkey"),
+ )
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3c_add_is_demo_flag.py b/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3c_add_is_demo_flag.py
new file mode 100644
index 0000000000..94eed007df
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3c_add_is_demo_flag.py
@@ -0,0 +1,30 @@
+"""add initial demo
+
+Revision ID: 73a2d8cfaa3c
+Revises: 24f8bdb390ee
+Create Date: 2024-12-02 9:00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "73a2d8cfaa3c"
+down_revision: Union[str, None] = "24f8bdb390ee"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom data migrations ###
+ op.add_column("project_members", sa.Column("is_demo", sa.BOOLEAN(), nullable=True))
+ # ### end of custom data commands ###
+
+
+def downgrade() -> None:
+ # ### custom data migrations ###
+ op.drop_column("project_members", "is_demo")
+ # ### end of custom data commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3d_add_initial_demo.py b/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3d_add_initial_demo.py
new file mode 100644
index 0000000000..f20dfb0e2d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/73a2d8cfaa3d_add_initial_demo.py
@@ -0,0 +1,36 @@
+"""add initial demo
+
+Revision ID: 73a2d8cfaa3d
+Revises: 73a2d8cfaa3c
+Create Date: 2024-12-02 9:00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+
+from ee.databases.postgres.migrations.core.data_migrations.demos import (
+ add_users_to_demos,
+ remove_users_from_demos,
+)
+
+# revision identifiers, used by Alembic.
+revision: str = "73a2d8cfaa3d"
+down_revision: Union[str, None] = "73a2d8cfaa3c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom data migrations ###
+ connection = context.get_bind() # get database connect from alembic context
+ add_users_to_demos(session=connection)
+ # ### end of custom data commands ###
+
+
+def downgrade() -> None:
+ # ### custom data migrations ###
+ connection = context.get_bind() # get database connect from alembic context
+ remove_users_from_demos(session=connection)
+ # ### end of custom data commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/770d68410ab0_transfer_user_organization_to_.py b/api/ee/databases/postgres/migrations/core/versions/770d68410ab0_transfer_user_organization_to_.py
new file mode 100644
index 0000000000..a69fbc2b6b
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/770d68410ab0_transfer_user_organization_to_.py
@@ -0,0 +1,35 @@
+"""transfer user organization to organization members
+
+Revision ID: 770d68410ab0
+Revises: 79b9acb137a1
+Create Date: 2024-09-08 18:21:27.192472
+
+"""
+
+from typing import Sequence, Union
+from alembic import context
+from alembic import op
+
+
+from ee.databases.postgres.migrations.core.data_migrations.export_records import (
+ transfer_records_from_user_organization_to_organization_members,
+ transfer_records_from_organization_members_to_user_organization,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "770d68410ab0"
+down_revision: Union[str, None] = "79b9acb137a1"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ connection = context.get_bind() # get database connect from alembic context
+ transfer_records_from_user_organization_to_organization_members(session=connection)
+
+
+def downgrade() -> None:
+ connection = context.get_bind() # get database connect from alembic context
+ transfer_records_from_organization_members_to_user_organization(session=connection)
+ op.drop_table("organization_members")
diff --git a/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
new file mode 100644
index 0000000000..3061a4d230
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/7990f1e12f47_create_free_plans.py
@@ -0,0 +1,360 @@
+"""create free plans
+
+Revision ID: 7990f1e12f47
+Revises: 12f477990f1e
+Create Date: 2025-01-25 16:51:06.233811
+
+"""
+
+from typing import Sequence, Union
+from os import environ
+from datetime import datetime, timezone
+from time import time
+
+from alembic import context
+
+from sqlalchemy import Connection, func, insert, select, update
+
+import stripe
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.models.db_models import UserDB
+from oss.src.models.db_models import AppDB
+from ee.src.models.db_models import OrganizationDB
+from ee.src.models.db_models import OrganizationMemberDB
+from ee.src.models.db_models import ProjectDB
+from ee.src.models.db_models import ProjectMemberDB
+from ee.src.dbs.postgres.subscriptions.dbes import SubscriptionDBE
+from ee.src.dbs.postgres.meters.dbes import MeterDBE
+from ee.src.core.subscriptions.types import FREE_PLAN
+from ee.src.core.entitlements.types import Gauge
+
+stripe.api_key = environ.get("STRIPE_API_KEY")
+
+log = get_module_logger(__name__)
+
+# revision identifiers, used by Alembic.
+revision: str = "7990f1e12f47"
+down_revision: Union[str, None] = "12f477990f1e"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ try:
+ session: Connection = context.get_bind()
+
+ now = datetime.now(timezone.utc)
+
+ # --> GET ORGANIZATION COUNT
+ query = select(func.count()).select_from(OrganizationDB)
+
+ nof_organizations = session.execute(query).scalar()
+ # <-- GET ORGANIZATION COUNT
+
+ # --> ITERATE OVER ORGANIZATION BATCHES
+ organization_batch_size = 100
+ organization_batch_index = 0
+
+ while True:
+ # --> GET ORGANIZATION BATCH
+ query = (
+ select(OrganizationDB)
+ .limit(organization_batch_size)
+ .offset(organization_batch_index * organization_batch_size)
+ )
+
+ organizations = session.execute(query).all()
+
+ organization_batch_index += 1
+
+ if not organizations:
+ break
+ # <-- GET ORGANIZATION BATCH
+
+ # --> ITERATE OVER ORGANIZATIONS
+ for i, organization in enumerate(organizations):
+ log.info(
+ " %s / %s - %s",
+ (organization_batch_index - 1) * organization_batch_size + i + 1,
+ nof_organizations,
+ organization.id,
+ )
+
+ ti = time()
+
+ # xti = time()
+ # --> GET ORGANIZATION INFO
+ owner = organization.owner
+
+ if not owner:
+ continue
+
+ query = select(UserDB).where(
+ UserDB.id == owner,
+ )
+
+ user = session.execute(query).first()
+
+ if not user:
+ continue
+
+ email = user.email
+
+ if not email:
+ continue
+ # <-- GET ORGANIZATION INFO
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - GET ORGANIZATION INFO: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> CHECK IF SUBSCRIPTION EXISTS
+ organization_id = organization.id
+ customer_id = None
+ subscription_id = None
+ plan = FREE_PLAN
+ active = True
+ anchor = now.day
+
+ subscription_exists = (
+ session.execute(
+ select(SubscriptionDBE).where(
+ SubscriptionDBE.organization_id == organization_id,
+ )
+ )
+ .scalars()
+ .first()
+ )
+ # <-- CHECK IF SUBSCRIPTION EXISTS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - CHECK IF SUBSCRIPTION EXISTS: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> CREATE OR UPDATE SUBSCRIPTION
+ if not subscription_exists:
+ query = insert(SubscriptionDBE).values(
+ organization_id=organization_id,
+ subscription_id=subscription_id,
+ customer_id=customer_id,
+ plan=plan.value,
+ active=active,
+ anchor=anchor,
+ )
+
+ session.execute(query)
+ else:
+ query = (
+ update(SubscriptionDBE)
+ .where(
+ SubscriptionDBE.organization_id == organization_id,
+ )
+ .values(
+ subscription_id=subscription_id,
+ customer_id=customer_id,
+ plan=plan.value,
+ active=active,
+ anchor=anchor,
+ )
+ )
+
+ session.execute(query)
+ # <-- CREATE OR UPDATE SUBSCRIPTION
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - CREATE OR UPDATE SUBSCRIPTION: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> GET ORGANIZATION MEMBERS
+ query = (
+ select(func.count())
+ .select_from(OrganizationMemberDB)
+ .where(
+ OrganizationMemberDB.organization_id == organization.id,
+ )
+ )
+
+ nof_members = session.execute(query).scalar()
+ # <-- GET ORGANIZATION MEMBERS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - GET ORGANIZATION MEMBERS: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> CHECK IF USERS METER EXISTS
+ key = Gauge.USERS
+ value = nof_members
+ synced = 0
+ # organization_id = organization_id
+ year = 0
+ month = 0
+
+ users_meter_exists = (
+ session.execute(
+ select(MeterDBE).where(
+ MeterDBE.organization_id == organization_id,
+ MeterDBE.key == key,
+ MeterDBE.year == year,
+ MeterDBE.month == month,
+ )
+ )
+ .scalars()
+ .first()
+ )
+ # <-- CHECK IF USERS METER EXISTS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - CHECK IF USERS METER EXISTS: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> CREATE OR UPDATE USERS METER
+ if not users_meter_exists:
+ query = insert(MeterDBE).values(
+ organization_id=organization_id,
+ key=key,
+ year=year,
+ month=month,
+ value=value,
+ synced=synced,
+ )
+
+ session.execute(query)
+ else:
+ query = (
+ update(MeterDBE)
+ .where(
+ MeterDBE.organization_id == organization_id,
+ MeterDBE.key == key,
+ MeterDBE.year == year,
+ MeterDBE.month == month,
+ )
+ .values(
+ value=value,
+ synced=synced,
+ )
+ )
+
+ session.execute(query)
+ # <-- CREATE OR UPDATE USERS METER
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - CREATE OR UPDATE USERS METER: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> GET ORGANIZATION PROJECTS
+ query = select(ProjectDB).where(
+ ProjectDB.organization_id == organization_id,
+ )
+
+ projects = session.execute(query).all()
+ # <-- GET ORGANIZATION PROJECTS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - GET ORGANIZATION PROJECTS: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> ITERATE OVER PROJECTS
+ value = 0
+
+ for project in projects:
+ # --> GET PROJECT APPLICATIONS
+ query = select(AppDB).where(
+ AppDB.project_id == project.id,
+ )
+
+ apps = session.execute(query).scalars().all()
+ # <-- GET PROJECT APPLICATIONS
+
+ value += len(apps)
+ # <-- ITERATE OVER PROJECTS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(" - ITERATE OVER PROJECTS: %s ms", int(xdt * 1000))
+
+ # xti = time()
+ # --> CHECK IF APPLICATIONS METER EXISTS
+ key = Gauge.APPLICATIONS
+ # value = value
+ synced = 0
+ # organization_id = organization_id
+ year = 0
+ month = 0
+
+ applications_meter_exists = (
+ session.execute(
+ select(MeterDBE).where(
+ MeterDBE.organization_id == organization_id,
+ MeterDBE.key == key,
+ MeterDBE.year == year,
+ MeterDBE.month == month,
+ )
+ )
+ .scalars()
+ .first()
+ )
+ # <-- CHECK IF APPLICATIONS METER EXISTS
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(
+ # " - CHECK IF APPLICATIONS METER EXISTS: %s ms", int(xdt * 1000)
+ # )
+
+ # xti = time()
+ # --> CREATE OR UPDATE APPLICATIONS METER
+ if not applications_meter_exists:
+ query = insert(MeterDBE).values(
+ organization_id=organization_id,
+ key=key,
+ year=year,
+ month=month,
+ value=value,
+ synced=synced,
+ )
+
+ session.execute(query)
+ else:
+ query = (
+ update(MeterDBE)
+ .where(
+ MeterDBE.organization_id == organization_id,
+ MeterDBE.key == key,
+ MeterDBE.year == year,
+ MeterDBE.month == month,
+ )
+ .values(
+ value=value,
+ synced=synced,
+ )
+ )
+
+ session.execute(query)
+ # <-- CREATE OR UPDATE APPLICATIONS METER
+ # xtf = time()
+ # xdt = xtf - xti
+ # log.info(
+ # " - CREATE OR UPDATE APPLICATIONS METER: %s ms", int(xdt * 1000)
+ # )
+
+ tf = time()
+ dt = tf - ti
+ log.info(
+ " %s / %s - %s - %s ms",
+ (organization_batch_index - 1) * organization_batch_size + i + 1,
+ nof_organizations,
+ organization.id,
+ int(dt * 1000),
+ )
+ # <-- ITERATE OVER ORGANIZATIONS
+
+ # <-- ITERATE OVER ORGANIZATION BATCHES
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error during free plans migration: %s", e)
+ session.rollback()
+ raise e
+
+ log.info("Free plans migration completed successfully.")
+
+
+def downgrade() -> None:
+ pass
diff --git a/api/ee/databases/postgres/migrations/core/versions/79b9acb137a1_transfer_workspace_invitations_to_.py b/api/ee/databases/postgres/migrations/core/versions/79b9acb137a1_transfer_workspace_invitations_to_.py
new file mode 100644
index 0000000000..bade4cb395
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/79b9acb137a1_transfer_workspace_invitations_to_.py
@@ -0,0 +1,37 @@
+"""transfer workspace invitations to project invitations
+
+Revision ID: 79b9acb137a1
+Revises: 9b0e1a740b88
+Create Date: 2024-09-05 17:16:29.480645
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+
+from ee.databases.postgres.migrations.core.data_migrations.invitations import (
+ transfer_invitations_from_old_table_to_new_table,
+ revert_invitations_transfer_from_new_table_to_old_table,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "79b9acb137a1"
+down_revision: Union[str, None] = "9b0e1a740b88"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom migration ###
+ connection = context.get_bind() # get database connect from alembic context
+ transfer_invitations_from_old_table_to_new_table(session=connection)
+ # ### end of custom migration ###
+
+
+def downgrade() -> None:
+ # ### custom migration ###
+ connection = context.get_bind() # get database connect from alembic context
+ revert_invitations_transfer_from_new_table_to_old_table(session=connection)
+ # ### end of custom migration ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/7cc66fc40298_add_hidden_column_to_app_variants_table.py b/api/ee/databases/postgres/migrations/core/versions/7cc66fc40298_add_hidden_column_to_app_variants_table.py
new file mode 100644
index 0000000000..d45ba53b3c
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/7cc66fc40298_add_hidden_column_to_app_variants_table.py
@@ -0,0 +1,35 @@
+"""add 'hidden' column to app_variants table
+
+Revision ID: 7cc66fc40298
+Revises: 6161b674688d
+Create Date: 2025-03-27 14:40:47.770949
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "7cc66fc40298"
+down_revision: Union[str, None] = "6161b674688d"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column("app_variants", sa.Column("hidden", sa.Boolean(), nullable=True))
+ op.add_column(
+ "app_variant_revisions", sa.Column("hidden", sa.Boolean(), nullable=True)
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("app_variants", "hidden")
+ op.drop_column("app_variant_revisions", "hidden")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/8089ee7692d1_cleanup_preview_entities.py b/api/ee/databases/postgres/migrations/core/versions/8089ee7692d1_cleanup_preview_entities.py
new file mode 100644
index 0000000000..36e9e4edd4
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/8089ee7692d1_cleanup_preview_entities.py
@@ -0,0 +1,168 @@
+"""clean up preview entities
+
+Revision ID: 8089ee7692d1
+Revises: fa07e07350bf
+Create Date: 2025-08-20 16:00:00.00000000
+
+"""
+
+from typing import Sequence, Union
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "8089ee7692d1"
+down_revision: Union[str, None] = "fa07e07350bf"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+TABLES_WITH_DATA_MIGRATION = [
+ "evaluation_runs",
+ "evaluation_metrics",
+ "evaluation_queues",
+ "testcase_blobs",
+ "testset_revisions",
+ "query_revisions",
+ "workflow_revisions",
+]
+
+TABLES_WITH_META_MIGRATION = [
+ "evaluation_runs",
+ "evaluation_scenarios",
+ "evaluation_steps",
+ "evaluation_metrics",
+ "evaluation_queues",
+ "testcase_blobs",
+ "testset_artifacts",
+ "testset_variants",
+ "testset_revisions",
+ "query_artifacts",
+ "query_variants",
+ "query_revisions",
+ "workflow_artifacts",
+ "workflow_variants",
+ "workflow_revisions",
+]
+
+
+def upgrade() -> None:
+ # Convert jsonb -> json for data columns
+ for table in TABLES_WITH_DATA_MIGRATION:
+ op.alter_column(
+ table_name=table,
+ column_name="data",
+ type_=sa.JSON(),
+ postgresql_using="data::json",
+ )
+
+ # Convert jsonb -> json for meta columns
+ for table in TABLES_WITH_META_MIGRATION:
+ op.alter_column(
+ table_name=table,
+ column_name="meta",
+ type_=sa.JSON(),
+ postgresql_using="meta::json",
+ )
+
+ # Add new timestamp column
+ op.add_column(
+ "evaluation_scenarios",
+ sa.Column(
+ "timestamp",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ )
+
+ # Add repeat_idx and drop old repeat_id + retry_id
+ op.add_column(
+ "evaluation_steps",
+ sa.Column(
+ "repeat_idx",
+ sa.Integer(),
+ nullable=True,
+ ),
+ )
+ op.drop_column(
+ "evaluation_steps",
+ "repeat_id",
+ )
+ op.drop_column(
+ "evaluation_steps",
+ "retry_id",
+ )
+
+ # Rename key -> step_key
+ op.alter_column(
+ "evaluation_steps",
+ "key",
+ new_column_name="step_key",
+ existing_type=sa.String(), # adjust if needed
+ existing_nullable=False,
+ )
+
+ op.drop_column(
+ "evaluation_metrics",
+ "interval",
+ )
+
+
+def downgrade() -> None:
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column(
+ "interval",
+ sa.Integer(),
+ nullable=True,
+ ),
+ )
+
+ # Rename step_key back to key
+ op.alter_column(
+ "evaluation_steps",
+ "step_key",
+ new_column_name="key",
+ existing_type=sa.String(), # adjust if needed
+ existing_nullable=False,
+ )
+
+ # Recreate repeat_id and retry_id columns
+ op.add_column(
+ "evaluation_steps",
+ sa.Column("repeat_id", sa.UUID(), nullable=False),
+ )
+ op.add_column(
+ "evaluation_steps",
+ sa.Column("retry_id", sa.UUID(), nullable=False),
+ )
+
+ # Drop repeat_idx column
+ op.drop_column(
+ "evaluation_steps",
+ "repeat_idx",
+ )
+
+ # Drop timestamp column
+ op.drop_column(
+ "evaluation_scenarios",
+ "timestamp",
+ )
+
+ # Convert meta columns back to jsonb
+ for table in TABLES_WITH_META_MIGRATION:
+ op.alter_column(
+ table_name=table,
+ column_name="meta",
+ type_=sa.dialects.postgresql.JSONB(),
+ postgresql_using="meta::jsonb",
+ )
+
+ # Convert data columns back to jsonb
+ for table in TABLES_WITH_DATA_MIGRATION:
+ op.alter_column(
+ table_name=table,
+ column_name="data",
+ type_=sa.dialects.postgresql.JSONB(),
+ postgresql_using="data::jsonb",
+ )
diff --git a/api/ee/databases/postgres/migrations/core/versions/847972cfa14a_add_nodes_dbe.py b/api/ee/databases/postgres/migrations/core/versions/847972cfa14a_add_nodes_dbe.py
new file mode 100644
index 0000000000..239b9fb280
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/847972cfa14a_add_nodes_dbe.py
@@ -0,0 +1,121 @@
+"""add_nodes_dbe
+
+Revision ID: 847972cfa14a
+Revises: 320a4a7ee0c7
+Create Date: 2024-11-07 12:21:19.080345
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "847972cfa14a"
+down_revision: Union[str, None] = "320a4a7ee0c7"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "nodes",
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ sa.Column("root_id", sa.UUID(), nullable=False),
+ sa.Column("tree_id", sa.UUID(), nullable=False),
+ sa.Column("tree_type", sa.Enum("INVOCATION", name="treetype"), nullable=True),
+ sa.Column("node_id", sa.UUID(), nullable=False),
+ sa.Column("node_name", sa.String(), nullable=False),
+ sa.Column(
+ "node_type",
+ sa.Enum(
+ "AGENT",
+ "WORKFLOW",
+ "CHAIN",
+ "TASK",
+ "TOOL",
+ "EMBEDDING",
+ "QUERY",
+ "COMPLETION",
+ "CHAT",
+ "RERANK",
+ name="nodetype",
+ ),
+ nullable=True,
+ ),
+ sa.Column("parent_id", sa.UUID(), nullable=True),
+ sa.Column("time_start", sa.TIMESTAMP(), nullable=False),
+ sa.Column("time_end", sa.TIMESTAMP(), nullable=False),
+ sa.Column(
+ "status",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metrics",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "refs",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "exception",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "links",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column("content", sa.String(), nullable=True),
+ sa.Column(
+ "otel",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("project_id", "node_id"),
+ )
+ op.create_index(
+ "index_project_id_node_id", "nodes", ["project_id", "created_at"], unique=False
+ )
+ op.create_index(
+ "index_project_id_root_id", "nodes", ["project_id", "root_id"], unique=False
+ )
+ op.create_index(
+ "index_project_id_tree_id", "nodes", ["project_id", "tree_id"], unique=False
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index("index_project_id_tree_id", table_name="nodes")
+ op.drop_index("index_project_id_root_id", table_name="nodes")
+ op.drop_index("index_project_id_node_id", table_name="nodes")
+ op.drop_table("nodes")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/8accbbea1d21_initial_migration.py b/api/ee/databases/postgres/migrations/core/versions/8accbbea1d21_initial_migration.py
new file mode 100644
index 0000000000..d5f43f9f08
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/8accbbea1d21_initial_migration.py
@@ -0,0 +1,1000 @@
+"""initial migration
+
+Revision ID: 8accbbea1d21
+Revises:
+Create Date: 2024-07-27 16:20:33.077302
+
+"""
+
+import os
+from typing import Sequence, Union
+
+from alembic import op
+from alembic import context
+
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+from oss.src.utils.env import env
+from ee.databases.postgres.migrations.core.utils import is_initial_setup
+
+
+# revision identifiers, used by Alembic.
+revision: str = "8accbbea1d21"
+down_revision: Union[str, None] = None
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def unique_constraint_exists(engine: sa.Engine, table_name: str, constraint_name: str):
+ with engine.connect() as conn:
+ result = conn.execute(
+ sa.text(
+ f"""
+ SELECT conname FROM pg_constraint
+ WHERE conname = '{constraint_name}' AND conrelid = '{table_name}'::regclass;
+ """
+ )
+ )
+ return result.fetchone() is not None
+
+
+def first_time_user_from_agenta_v019_upwards_upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "api_keys",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("prefix", sa.String(), nullable=True),
+ sa.Column("hashed_key", sa.String(), nullable=True),
+ sa.Column("user_id", sa.String(), nullable=True),
+ sa.Column("workspace_id", sa.String(), nullable=True),
+ sa.Column("rate_limit", sa.Integer(), nullable=True),
+ sa.Column("hidden", sa.Boolean(), nullable=True),
+ sa.Column("expiration_date", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "ids_mapping",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("table_name", sa.String(), nullable=False),
+ sa.Column("objectid", sa.String(), nullable=False),
+ sa.Column("uuid", sa.UUID(), nullable=False),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "invitations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("token", sa.String(), nullable=False),
+ sa.Column("email", sa.String(), nullable=False),
+ sa.Column("organization_id", sa.String(), nullable=False),
+ sa.Column("used", sa.Boolean(), nullable=True),
+ sa.Column("workspace_id", sa.String(), nullable=False),
+ sa.Column(
+ "workspace_roles", postgresql.JSONB(astext_type=sa.Text()), nullable=True
+ ),
+ sa.Column("expiration_date", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ sa.UniqueConstraint("token"),
+ )
+ op.create_table(
+ "organizations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("description", sa.String(), nullable=True),
+ sa.Column("type", sa.String(), nullable=True),
+ sa.Column("owner", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("is_paying", sa.Boolean(), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "templates",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("type", sa.Enum("IMAGE", "ZIP", name="templatetype"), nullable=False),
+ sa.Column("template_uri", sa.String(), nullable=True),
+ sa.Column("tag_id", sa.Integer(), nullable=True),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("repo_name", sa.String(), nullable=True),
+ sa.Column("title", sa.String(), nullable=True),
+ sa.Column("description", sa.String(), nullable=True),
+ sa.Column("size", sa.Integer(), nullable=True),
+ sa.Column("digest", sa.String(), nullable=True),
+ sa.Column("last_pushed", sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ sa.UniqueConstraint("name"),
+ )
+ op.create_table(
+ "users",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("uid", sa.String(), nullable=True),
+ sa.Column("username", sa.String(), nullable=True),
+ sa.Column("email", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("email"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_index(op.f("ix_users_uid"), "users", ["uid"], unique=True)
+ op.create_table(
+ "user_organizations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"],
+ ["organizations.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "workspaces",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("type", sa.String(), nullable=True),
+ sa.Column("description", sa.String(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "app_db",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_name", sa.String(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "docker_images",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("type", sa.String(), nullable=True),
+ sa.Column("template_uri", sa.String(), nullable=True),
+ sa.Column("docker_id", sa.String(), nullable=True),
+ sa.Column("tags", sa.String(), nullable=True),
+ sa.Column("deletable", sa.Boolean(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_index(
+ op.f("ix_docker_images_docker_id"), "docker_images", ["docker_id"], unique=False
+ )
+ op.create_table(
+ "workspace_members",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("role", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"],
+ ["workspaces.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "deployments",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("container_name", sa.String(), nullable=True),
+ sa.Column("container_id", sa.String(), nullable=True),
+ sa.Column("uri", sa.String(), nullable=True),
+ sa.Column("status", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("cloud_map_service_id", sa.String(), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluators_configs",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("evaluator_key", sa.String(), nullable=True),
+ sa.Column(
+ "settings_values", postgresql.JSONB(astext_type=sa.Text()), nullable=True
+ ),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="SET NULL"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "testsets",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("csvdata", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "bases",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("base_name", sa.String(), nullable=True),
+ sa.Column("image_id", sa.UUID(), nullable=True),
+ sa.Column("deployment_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["deployment_id"], ["deployments.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["image_id"], ["docker_images.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "human_evaluations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("status", sa.String(), nullable=True),
+ sa.Column("evaluation_type", sa.String(), nullable=True),
+ sa.Column("testset_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["testset_id"],
+ ["testsets.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "app_variants",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("variant_name", sa.String(), nullable=True),
+ sa.Column("revision", sa.Integer(), nullable=True),
+ sa.Column("image_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("modified_by_id", sa.UUID(), nullable=True),
+ sa.Column("base_name", sa.String(), nullable=True),
+ sa.Column("base_id", sa.UUID(), nullable=True),
+ sa.Column("config_name", sa.String(), nullable=False),
+ sa.Column(
+ "config_parameters", postgresql.JSONB(astext_type=sa.Text()), nullable=False
+ ),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["base_id"],
+ ["bases.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["image_id"], ["docker_images.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["modified_by_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "human_evaluations_scenarios",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("evaluation_id", sa.UUID(), nullable=True),
+ sa.Column("inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column("outputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column("vote", sa.String(), nullable=True),
+ sa.Column("score", sa.String(), nullable=True),
+ sa.Column("correct_answer", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("is_pinned", sa.Boolean(), nullable=True),
+ sa.Column("note", sa.String(), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["evaluation_id"], ["human_evaluations.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "app_variant_revisions",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("variant_id", sa.UUID(), nullable=True),
+ sa.Column("revision", sa.Integer(), nullable=True),
+ sa.Column("modified_by_id", sa.UUID(), nullable=True),
+ sa.Column("base_id", sa.UUID(), nullable=True),
+ sa.Column("config_name", sa.String(), nullable=False),
+ sa.Column(
+ "config_parameters", postgresql.JSONB(astext_type=sa.Text()), nullable=False
+ ),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["base_id"],
+ ["bases.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["modified_by_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_id"], ["app_variants.id"], ondelete="CASCADE"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "environments",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("name", sa.String(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("revision", sa.Integer(), nullable=True),
+ sa.Column("deployed_app_variant_id", sa.UUID(), nullable=True),
+ sa.Column("deployed_app_variant_revision_id", sa.UUID(), nullable=True),
+ sa.Column("deployment_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["deployed_app_variant_id"], ["app_variants.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["deployed_app_variant_revision_id"],
+ ["app_variant_revisions.id"],
+ ondelete="SET NULL",
+ ),
+ sa.ForeignKeyConstraint(
+ ["deployment_id"], ["deployments.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("app_id", sa.UUID(), nullable=True),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("status", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column("testset_id", sa.UUID(), nullable=True),
+ sa.Column("variant_id", sa.UUID(), nullable=True),
+ sa.Column("variant_revision_id", sa.UUID(), nullable=True),
+ sa.Column(
+ "average_cost", postgresql.JSONB(astext_type=sa.Text()), nullable=True
+ ),
+ sa.Column("total_cost", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column(
+ "average_latency", postgresql.JSONB(astext_type=sa.Text()), nullable=True
+ ),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(["app_id"], ["app_db.id"], ondelete="CASCADE"),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(["testset_id"], ["testsets.id"], ondelete="SET NULL"),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_id"], ["app_variants.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_revision_id"], ["app_variant_revisions.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "human_evaluation_variants",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("human_evaluation_id", sa.UUID(), nullable=True),
+ sa.Column("variant_id", sa.UUID(), nullable=True),
+ sa.Column("variant_revision_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["human_evaluation_id"], ["human_evaluations.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_id"], ["app_variants.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_revision_id"], ["app_variant_revisions.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "environments_revisions",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("environment_id", sa.UUID(), nullable=True),
+ sa.Column("revision", sa.Integer(), nullable=True),
+ sa.Column("modified_by_id", sa.UUID(), nullable=True),
+ sa.Column("deployed_app_variant_revision_id", sa.UUID(), nullable=True),
+ sa.Column("deployment_id", sa.UUID(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["deployed_app_variant_revision_id"],
+ ["app_variant_revisions.id"],
+ ondelete="SET NULL",
+ ),
+ sa.ForeignKeyConstraint(
+ ["deployment_id"], ["deployments.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["environment_id"], ["environments.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["modified_by_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluation_aggregated_results",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("evaluation_id", sa.UUID(), nullable=True),
+ sa.Column("evaluator_config_id", sa.UUID(), nullable=True),
+ sa.Column("result", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["evaluation_id"], ["evaluations.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["evaluator_config_id"], ["evaluators_configs.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluation_evaluator_configs",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("evaluation_id", sa.UUID(), nullable=False),
+ sa.Column("evaluator_config_id", sa.UUID(), nullable=False),
+ sa.ForeignKeyConstraint(
+ ["evaluation_id"], ["evaluations.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["evaluator_config_id"], ["evaluators_configs.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id", "evaluation_id", "evaluator_config_id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluation_scenarios",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("evaluation_id", sa.UUID(), nullable=True),
+ sa.Column("variant_id", sa.UUID(), nullable=True),
+ sa.Column("inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column("outputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.Column(
+ "correct_answers", postgresql.JSONB(astext_type=sa.Text()), nullable=True
+ ),
+ sa.Column("is_pinned", sa.Boolean(), nullable=True),
+ sa.Column("note", sa.String(), nullable=True),
+ sa.Column("latency", sa.Integer(), nullable=True),
+ sa.Column("cost", sa.Integer(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["evaluation_id"], ["evaluations.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["variant_id"], ["app_variants.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "evaluation_scenario_results",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("evaluation_scenario_id", sa.UUID(), nullable=True),
+ sa.Column("evaluator_config_id", sa.UUID(), nullable=True),
+ sa.Column("result", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["evaluation_scenario_id"], ["evaluation_scenarios.id"], ondelete="CASCADE"
+ ),
+ sa.ForeignKeyConstraint(
+ ["evaluator_config_id"], ["evaluators_configs.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ # ### end Alembic commands ###
+
+
+def first_time_user_from_agenta_v019_upwards_downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("evaluation_scenario_results")
+ op.drop_table("evaluation_scenarios")
+ op.drop_table("evaluation_evaluator_configs")
+ op.drop_table("evaluation_aggregated_results")
+ op.drop_table("environments_revisions")
+ op.drop_table("human_evaluation_variants")
+ op.drop_table("evaluations")
+ op.drop_table("environments")
+ op.drop_table("app_variant_revisions")
+ op.drop_table("human_evaluations_scenarios")
+ op.drop_table("app_variants")
+ op.drop_table("human_evaluations")
+ op.drop_table("bases")
+ op.drop_table("testsets")
+ op.drop_table("evaluators_configs")
+ op.drop_table("deployments")
+ op.drop_table("workspace_members")
+ op.drop_index(op.f("ix_docker_images_docker_id"), table_name="docker_images")
+ op.drop_table("docker_images")
+ op.drop_table("app_db")
+ op.drop_table("workspaces")
+ op.drop_table("user_organizations")
+ op.drop_index(op.f("ix_users_uid"), table_name="users")
+ op.drop_table("users")
+ op.drop_table("templates")
+ op.drop_table("organizations")
+ op.drop_table("invitations")
+ op.drop_table("ids_mapping")
+ op.drop_table("api_keys")
+ # ### end Alembic commands ###
+
+
+def returning_user_from_agenta_v018_downwards_upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ engine = sa.create_engine(env.POSTGRES_URI_CORE)
+ if not unique_constraint_exists(engine, "app_db", "app_db_pkey"):
+ op.create_unique_constraint("app_db_pkey", "app_db", ["id"])
+
+ if not unique_constraint_exists(
+ engine, "app_variant_revisions", "app_variant_revisions_pkey"
+ ):
+ op.create_unique_constraint(
+ "app_variant_revisions_pkey", "app_variant_revisions", ["id"]
+ )
+
+ if not unique_constraint_exists(engine, "app_variants", "app_variants_pkey"):
+ op.create_unique_constraint("app_variants_pkey", "app_variants", ["id"])
+
+ if not unique_constraint_exists(engine, "bases", "bases_pkey"):
+ op.create_unique_constraint("bases_pkey", "bases", ["id"])
+
+ if not unique_constraint_exists(engine, "deployments", "deployments_pkey"):
+ op.create_unique_constraint("deployments_pkey", "deployments", ["id"])
+
+ if not unique_constraint_exists(engine, "docker_images", "docker_images_pkey"):
+ op.create_unique_constraint("docker_images_pkey", "docker_images", ["id"])
+
+ if not unique_constraint_exists(engine, "environments", "environments_pkey"):
+ op.create_unique_constraint("environments_pkey", "environments", ["id"])
+
+ if not unique_constraint_exists(
+ engine, "environments_revisions", "environments_revisions_pkey"
+ ):
+ op.create_unique_constraint(
+ "environments_revisions_pkey", "environments_revisions", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "evaluation_aggregated_results", "evaluation_aggregated_results_pkey"
+ ):
+ op.create_unique_constraint(
+ "evaluation_aggregated_results_pkey",
+ "evaluation_aggregated_results",
+ ["id"],
+ )
+
+ if not unique_constraint_exists(
+ engine, "evaluation_scenario_results", "evaluation_scenario_results_pkey"
+ ):
+ op.create_unique_constraint(
+ "evaluation_scenario_results_pkey", "evaluation_scenario_results", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "evaluation_scenarios", "evaluation_scenarios_pkey"
+ ):
+ op.create_unique_constraint(
+ "evaluation_scenarios_pkey", "evaluation_scenarios", ["id"]
+ )
+
+ if not unique_constraint_exists(engine, "evaluations", "evaluations_pkey"):
+ op.create_unique_constraint("evaluations_pkey", "evaluations", ["id"])
+
+ if not unique_constraint_exists(
+ engine, "evaluators_configs", "evaluators_configs_pkey"
+ ):
+ op.create_unique_constraint(
+ "evaluators_configs_pkey", "evaluators_configs", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "human_evaluation_variants", "human_evaluation_variants_pkey"
+ ):
+ op.create_unique_constraint(
+ "human_evaluation_variants_pkey", "human_evaluation_variants", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "human_evaluations", "human_evaluations_pkey"
+ ):
+ op.create_unique_constraint(
+ "human_evaluations_pkey", "human_evaluations", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "human_evaluations_scenarios", "human_evaluations_scenarios_pkey"
+ ):
+ op.create_unique_constraint(
+ "human_evaluations_scenarios_pkey", "human_evaluations_scenarios", ["id"]
+ )
+
+ if not unique_constraint_exists(engine, "ids_mapping", "ids_mapping_pkey"):
+ op.create_unique_constraint("ids_mapping_pkey", "ids_mapping", ["id"])
+
+ if not unique_constraint_exists(engine, "templates", "templates_pkey"):
+ op.create_unique_constraint("templates_pkey", "templates", ["id"])
+
+ if not unique_constraint_exists(engine, "testsets", "testsets_pkey"):
+ op.create_unique_constraint("testsets_pkey", "testsets", ["id"])
+
+ if not unique_constraint_exists(engine, "users", "users_pkey"):
+ op.create_unique_constraint("users_pkey", "users", ["id"])
+
+ if not unique_constraint_exists(engine, "api_keys", "api_keys_pkey"):
+ op.create_unique_constraint("api_keys_pkey", "api_keys", ["id"])
+
+ if not unique_constraint_exists(engine, "invitations", "invitations_pkey"):
+ op.create_unique_constraint("invitations_pkey", "invitations", ["id"])
+
+ if not unique_constraint_exists(engine, "organizations", "organizations_pkey"):
+ op.create_unique_constraint("organizations_pkey", "organizations", ["id"])
+
+ if not unique_constraint_exists(
+ engine, "user_organizations", "user_organizations_pkey"
+ ):
+ op.create_unique_constraint(
+ "user_organizations_pkey", "user_organizations", ["id"]
+ )
+
+ if not unique_constraint_exists(
+ engine, "workspace_members", "workspace_members_pkey"
+ ):
+ op.create_unique_constraint(
+ "workspace_members_pkey", "workspace_members", ["id"]
+ )
+
+ if not unique_constraint_exists(engine, "workspaces", "workspaces_pkey"):
+ op.create_unique_constraint("workspaces_pkey", "workspaces", ["id"])
+
+ # ### end Alembic commands ###
+
+
+def returning_user_from_agenta_v018_downwards_downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ engine = sa.create_engine(env.POSTGRES_URI_CORE)
+ if unique_constraint_exists(engine, "users", "users_pkey"):
+ op.drop_constraint("users_pkey", "users", type_="unique")
+
+ if unique_constraint_exists(engine, "testsets", "testsets_pkey"):
+ op.drop_constraint("testsets_pkey", "testsets", type_="unique")
+
+ if unique_constraint_exists(engine, "templates", "templates_pkey"):
+ op.drop_constraint("templates_pkey", "templates", type_="unique")
+
+ if unique_constraint_exists(engine, "ids_mapping", "ids_mapping_pkey"):
+ op.drop_constraint("ids_mapping_pkey", "ids_mapping", type_="unique")
+
+ if unique_constraint_exists(
+ engine, "human_evaluations_scenarios", "human_evaluations_scenarios_pkey"
+ ):
+ op.drop_constraint(
+ "human_evaluations_scenarios_pkey",
+ "human_evaluations_scenarios",
+ type_="unique",
+ )
+
+ if unique_constraint_exists(engine, "human_evaluations", "human_evaluations_pkey"):
+ op.drop_constraint(
+ "human_evaluations_pkey", "human_evaluations", type_="unique"
+ )
+
+ if unique_constraint_exists(
+ engine, "human_evaluation_variants", "human_evaluation_variants_pkey"
+ ):
+ op.drop_constraint(
+ "human_evaluation_variants_pkey",
+ "human_evaluation_variants",
+ type_="unique",
+ )
+
+ if unique_constraint_exists(
+ engine, "evaluators_configs", "evaluators_configs_pkey"
+ ):
+ op.drop_constraint(
+ "evaluators_configs_pkey", "evaluators_configs", type_="unique"
+ )
+
+ if unique_constraint_exists(engine, "evaluations", "evaluations_pkey"):
+ op.drop_constraint("evaluations_pkey", "evaluations", type_="unique")
+
+ if unique_constraint_exists(
+ engine, "evaluation_scenarios", "evaluation_scenarios_pkey"
+ ):
+ op.drop_constraint(
+ "evaluation_scenarios_pkey", "evaluation_scenarios", type_="unique"
+ )
+
+ if unique_constraint_exists(
+ engine, "evaluation_scenario_results", "evaluation_scenario_results_pkey"
+ ):
+ op.drop_constraint(
+ "evaluation_scenario_results_pkey",
+ "evaluation_scenario_results",
+ type_="unique",
+ )
+
+ if unique_constraint_exists(
+ engine, "evaluation_aggregated_results", "evaluation_aggregated_results_pkey"
+ ):
+ op.drop_constraint(
+ "evaluation_aggregated_results_pkey",
+ "evaluation_aggregated_results",
+ type_="unique",
+ )
+
+ if unique_constraint_exists(
+ engine, "environments_revisions", "environments_revisions_pkey"
+ ):
+ op.drop_constraint(
+ "environments_revisions_pkey", "environments_revisions", type_="unique"
+ )
+
+ if unique_constraint_exists(engine, "environments", "environments_pkey"):
+ op.drop_constraint("environments_pkey", "environments", type_="unique")
+
+ if unique_constraint_exists(engine, "docker_images", "docker_images_pkey"):
+ op.drop_constraint("docker_images_pkey", "docker_images", type_="unique")
+
+ if unique_constraint_exists(engine, "deployments", "deployments_pkey"):
+ op.drop_constraint("deployments_pkey", "deployments", type_="unique")
+
+ if unique_constraint_exists(engine, "bases", "bases_pkey"):
+ op.drop_constraint("bases_pkey", "bases", type_="unique")
+
+ if unique_constraint_exists(engine, "app_variants", "app_variants_pkey"):
+ op.drop_constraint("app_variants_pkey", "app_variants", type_="unique")
+
+ if unique_constraint_exists(
+ engine, "app_variant_revisions", "app_variant_revisions_pkey"
+ ):
+ op.drop_constraint(
+ "app_variant_revisions_pkey", "app_variant_revisions", type_="unique"
+ )
+
+ if unique_constraint_exists(engine, "app_db", "app_db_pkey"):
+ op.drop_constraint("app_db_pkey", "app_db", type_="unique")
+
+ if unique_constraint_exists(engine, "workspaces", "workspaces_pkey"):
+ op.drop_constraint("workspaces_pkey", "workspaces", type_="unique")
+
+ if unique_constraint_exists(engine, "workspace_members", "workspace_members_pkey"):
+ op.drop_constraint(
+ "workspace_members_pkey", "workspace_members", type_="unique"
+ )
+
+ if unique_constraint_exists(
+ engine, "user_organizations", "user_organizations_pkey"
+ ):
+ op.drop_constraint(
+ "user_organizations_pkey", "user_organizations", type_="unique"
+ )
+
+ if unique_constraint_exists(engine, "organizations", "organizations_pkey"):
+ op.drop_constraint("organizations_pkey", "organizations", type_="unique")
+
+ if unique_constraint_exists(engine, "invitations", "invitations_pkey"):
+ op.drop_constraint("invitations_pkey", "invitations", type_="unique")
+
+ if unique_constraint_exists(engine, "api_keys", "api_keys_pkey"):
+ op.drop_constraint("api_keys_pkey", "api_keys", type_="unique")
+ # ### end Alembic commands ###
+
+
+def upgrade() -> None:
+ engine = sa.create_engine(context.config.get_main_option("sqlalchemy.url"))
+ if is_initial_setup(engine=engine):
+ first_time_user_from_agenta_v019_upwards_upgrade()
+ else:
+ returning_user_from_agenta_v018_downwards_upgrade()
+
+
+def downgrade() -> None:
+ engine = sa.create_engine(context.config.get_main_option("sqlalchemy.url"))
+ if is_initial_setup(engine=engine):
+ first_time_user_from_agenta_v019_upwards_downgrade()
+ else:
+ returning_user_from_agenta_v018_downwards_downgrade()
diff --git a/api/ee/databases/postgres/migrations/core/versions/91d3b4a8c27f_fix_ag_config.py b/api/ee/databases/postgres/migrations/core/versions/91d3b4a8c27f_fix_ag_config.py
new file mode 100644
index 0000000000..1baa0b36fe
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/91d3b4a8c27f_fix_ag_config.py
@@ -0,0 +1,61 @@
+"""Fix ag_config
+
+Revision ID: 91d3b4a8c27f
+Revises: 7990f1e12f47
+Create Date: 2025-04-24 11:00:00
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+from sqlalchemy import text
+
+
+revision: str = "91d3b4a8c27f"
+down_revision: Union[str, None] = "7990f1e12f47"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade():
+ batch_size = 100
+
+ conn = op.get_bind()
+
+ while True:
+ # Update config_parameters in app_variant_revisions table
+ result = conn.execute(
+ text(
+ f"""
+ WITH updated AS (
+ UPDATE app_variant_revisions
+ SET config_parameters = config_parameters->'ag_config'
+ WHERE id IN (
+ SELECT id
+ FROM app_variant_revisions
+ WHERE config_parameters ? 'ag_config'
+ LIMIT {batch_size}
+ )
+ RETURNING id
+ )
+ SELECT COUNT(*) FROM updated;
+ """
+ )
+ )
+ count = result.scalar()
+ if count == 0:
+ break
+
+ # Clear the config_parameters column in app_variants table (execute once)
+ result = conn.execute(
+ text(
+ f"""
+ UPDATE app_variants
+ SET config_parameters = '{{}}'::jsonb
+ """
+ )
+ )
+
+
+def downgrade():
+ pass
diff --git a/api/ee/databases/postgres/migrations/core/versions/9698355c7649_add_tables_for_workflows.py b/api/ee/databases/postgres/migrations/core/versions/9698355c7649_add_tables_for_workflows.py
new file mode 100644
index 0000000000..506fe0a1cb
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/9698355c7649_add_tables_for_workflows.py
@@ -0,0 +1,388 @@
+"""add tables for workflows (artifacts, variants, & revisions)
+
+Revision ID: 9698355c7649
+Revises: 7990f1e12f47
+Create Date: 2025-04-24 07:27:45.801481
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "9698355c7649"
+down_revision: Union[str, None] = "91d3b4a8c27f"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.create_table(
+ "workflow_artifacts",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_workflow_artifacts_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.create_table(
+ "workflow_variants",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["workflow_artifacts.project_id", "workflow_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_workflow_variants_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_workflow_variants_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+ # - REVISIONS --------------------------------------------------------------
+
+ op.create_table(
+ "workflow_revisions",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "artifact_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "variant_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=False,
+ ),
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metadata",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "name",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "description",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "message",
+ sa.String(),
+ nullable=True,
+ ),
+ sa.Column(
+ "author",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "date",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.UniqueConstraint(
+ "project_id",
+ "slug",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "artifact_id"],
+ ["workflow_artifacts.project_id", "workflow_artifacts.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "variant_id"],
+ ["workflow_variants.project_id", "workflow_variants.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_workflow_revisions_project_id_slug",
+ "project_id",
+ "slug",
+ ),
+ sa.Index(
+ "ix_workflow_revisions_project_id_artifact_id",
+ "project_id",
+ "artifact_id",
+ ),
+ sa.Index(
+ "ix_workflow_revisions_project_id_variant_id",
+ "project_id",
+ "variant_id",
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - REVISIONS --------------------------------------------------------------
+
+ op.drop_table("workflow_revisions")
+
+ # --------------------------------------------------------------------------
+
+ # - VARIANTS ---------------------------------------------------------------
+
+ op.drop_table("workflow_variants")
+
+ # --------------------------------------------------------------------------
+
+ # - ARTIFACTS --------------------------------------------------------------
+
+ op.drop_table("workflow_artifacts")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/9698355c7650_rename_metadata_to_meta.py b/api/ee/databases/postgres/migrations/core/versions/9698355c7650_rename_metadata_to_meta.py
new file mode 100644
index 0000000000..d0870f8288
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/9698355c7650_rename_metadata_to_meta.py
@@ -0,0 +1,51 @@
+"""rename metadata to meta
+
+Revision ID: 9698355c7650
+Revises: 0698355c7642
+Create Date: 2025-05-21 07:27:45.801481
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "9698355c7650"
+down_revision: Union[str, None] = "0698355c7642"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - WORKFLOWS --------------------------------------------------------------
+
+ op.execute("ALTER TABLE workflow_artifacts RENAME COLUMN metadata TO meta")
+ op.execute("ALTER TABLE workflow_variants RENAME COLUMN metadata TO meta")
+ op.execute("ALTER TABLE workflow_revisions RENAME COLUMN metadata TO meta")
+
+ # - TESTSETS ---------------------------------------------------------------
+
+ op.execute("ALTER TABLE testset_artifacts RENAME COLUMN metadata TO meta")
+ op.execute("ALTER TABLE testset_variants RENAME COLUMN metadata TO meta")
+ op.execute("ALTER TABLE testset_revisions RENAME COLUMN metadata TO meta")
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - WORKFLOWS --------------------------------------------------------------
+
+ op.execute("ALTER TABLE workflow_artifacts RENAME COLUMN meta TO metadata")
+ op.execute("ALTER TABLE workflow_variants RENAME COLUMN meta TO metadata")
+ op.execute("ALTER TABLE workflow_revisions RENAME COLUMN meta TO metadata")
+
+ # - TESTSETS ---------------------------------------------------------------
+
+ op.execute("ALTER TABLE testset_artifacts RENAME COLUMN meta TO metadata")
+ op.execute("ALTER TABLE testset_variants RENAME COLUMN meta TO metadata")
+ op.execute("ALTER TABLE testset_revisions RENAME COLUMN meta TO metadata")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/9b0e1a740b88_create_project_invitations_table.py b/api/ee/databases/postgres/migrations/core/versions/9b0e1a740b88_create_project_invitations_table.py
new file mode 100644
index 0000000000..d265d52a12
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/9b0e1a740b88_create_project_invitations_table.py
@@ -0,0 +1,60 @@
+"""create project_invitations table
+
+Revision ID: 9b0e1a740b88
+Revises: 1c2d3e4f5a6b
+Create Date: 2024-09-05 16:08:04.440845
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "9b0e1a740b88"
+down_revision: Union[str, None] = "1c2d3e4f5a6b"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # Get the current connection
+ connection = op.get_bind()
+ inspector = sa.inspect(connection)
+ if "project_invitations" not in inspector.get_table_names():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "project_invitations",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("token", sa.String(), nullable=False),
+ sa.Column("email", sa.String(), nullable=False),
+ sa.Column("used", sa.Boolean(), nullable=True),
+ sa.Column("role", sa.String(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("project_id", sa.UUID(), nullable=True),
+ sa.Column("expiration_date", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ sa.UniqueConstraint("token"),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ connection = op.get_bind()
+ inspector = sa.inspect(connection)
+ if "project_invitations" in inspector.get_table_names():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("project_invitations")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/aa1b2c3d4e5f_migrate_config_parameters_jsonb_to_json.py b/api/ee/databases/postgres/migrations/core/versions/aa1b2c3d4e5f_migrate_config_parameters_jsonb_to_json.py
new file mode 100644
index 0000000000..e0da80ce6b
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/aa1b2c3d4e5f_migrate_config_parameters_jsonb_to_json.py
@@ -0,0 +1,132 @@
+"""Migrate config_parameters from JSONB to JSON
+
+Revision ID: aa1b2c3d4e5f
+Revises: d5d4d6bf738f
+Create Date: 2025-07-11 12:00:00
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+
+# revision identifiers, used by Alembic.
+revision: str = "aa1b2c3d4e5f"
+down_revision: Union[str, None] = "d5d4d6bf738f"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade():
+ """
+ Migrate config_parameters from JSONB to JSON type to preserve key ordering.
+ This involves:
+ 1. Creating new JSON columns
+ 2. Copying data from JSONB to JSON
+ 3. Dropping old JSONB columns
+ 4. Renaming new columns to original names
+ """
+
+ # Step 1: Add new JSON columns with temporary names
+ op.add_column(
+ "app_variants",
+ sa.Column("config_parameters_json_temp", sa.JSON(), nullable=True),
+ )
+
+ op.add_column(
+ "app_variant_revisions",
+ sa.Column("config_parameters_json_temp", sa.JSON(), nullable=True),
+ )
+
+ # Step 2: Copy data from JSONB to JSON columns
+ # For app_variants table
+ op.execute(
+ """
+ UPDATE app_variants
+ SET config_parameters_json_temp = config_parameters::json
+ """
+ )
+
+ # For app_variant_revisions table
+ op.execute(
+ """
+ UPDATE app_variant_revisions
+ SET config_parameters_json_temp = config_parameters::json
+ """
+ )
+
+ # Step 3: Drop the old JSONB columns
+ op.drop_column("app_variants", "config_parameters")
+ op.drop_column("app_variant_revisions", "config_parameters")
+
+ # Step 4: Rename the new JSON columns to the original names
+ op.alter_column(
+ "app_variants",
+ "config_parameters_json_temp",
+ new_column_name="config_parameters",
+ nullable=False,
+ server_default="{}",
+ )
+
+ op.alter_column(
+ "app_variant_revisions",
+ "config_parameters_json_temp",
+ new_column_name="config_parameters",
+ nullable=False,
+ )
+
+
+def downgrade():
+ """
+ Migrate config_parameters from JSON back to JSONB type.
+ """
+
+ # Step 1: Add new JSONB columns with temporary names
+ op.add_column(
+ "app_variants",
+ sa.Column("config_parameters_jsonb_temp", postgresql.JSONB(), nullable=True),
+ )
+
+ op.add_column(
+ "app_variant_revisions",
+ sa.Column("config_parameters_jsonb_temp", postgresql.JSONB(), nullable=True),
+ )
+
+ # Step 2: Copy data from JSON to JSONB columns
+ # For app_variants table
+ op.execute(
+ """
+ UPDATE app_variants
+ SET config_parameters_jsonb_temp = config_parameters::jsonb
+ """
+ )
+
+ # For app_variant_revisions table
+ op.execute(
+ """
+ UPDATE app_variant_revisions
+ SET config_parameters_jsonb_temp = config_parameters::jsonb
+ """
+ )
+
+ # Step 3: Drop the old JSON columns
+ op.drop_column("app_variants", "config_parameters")
+ op.drop_column("app_variant_revisions", "config_parameters")
+
+ # Step 4: Rename the new JSONB columns to the original names
+ op.alter_column(
+ "app_variants",
+ "config_parameters_jsonb_temp",
+ new_column_name="config_parameters",
+ nullable=False,
+ )
+
+ op.alter_column(
+ "app_variant_revisions",
+ "config_parameters_jsonb_temp",
+ new_column_name="config_parameters",
+ nullable=False,
+ )
diff --git a/api/ee/databases/postgres/migrations/core/versions/ad0987a77380_update_evaluators_names_with_app_name_.py b/api/ee/databases/postgres/migrations/core/versions/ad0987a77380_update_evaluators_names_with_app_name_.py
new file mode 100644
index 0000000000..42f949782b
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/ad0987a77380_update_evaluators_names_with_app_name_.py
@@ -0,0 +1,35 @@
+"""Update evaluators names with app name as prefix
+
+Revision ID: ad0987a77380
+Revises: 770d68410ab0
+Create Date: 2024-09-17 06:32:38.238473
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import context
+
+from ee.databases.postgres.migrations.core.data_migrations.applications import (
+ update_evaluators_with_app_name,
+)
+
+
+# revision identifiers, used by Alembic.
+revision: str = "ad0987a77380"
+down_revision: Union[str, None] = "770d68410ab0"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### custom command ###
+ connection = context.get_bind() # get database connect from alembic context
+ update_evaluators_with_app_name(session=connection)
+ # ### end custom command ###
+
+
+def downgrade() -> None:
+ # ### custom command ###
+ pass
+ # ### end custom command ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/b3f15a7140ab_add_version_to_eval_entities.py b/api/ee/databases/postgres/migrations/core/versions/b3f15a7140ab_add_version_to_eval_entities.py
new file mode 100644
index 0000000000..f6a9d6a9af
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/b3f15a7140ab_add_version_to_eval_entities.py
@@ -0,0 +1,107 @@
+"""Add version to evaluation entities
+
+Revision ID: b3f15a7140ab
+Revises: 5a71b3f140ab
+Create Date: 2025-10-03 14:30:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+revision: str = "b3f15a7140ab"
+down_revision: Union[str, None] = "5a71b3f140ab"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # BASED ON
+ # version = Column(
+ # String,
+ # nullable=True,
+ # )
+
+ # EVALUATION RUNS ----------------------------------------------------------
+
+ op.add_column(
+ "evaluation_runs",
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # EVALUATION SCENARIOS -----------------------------------------------------
+
+ op.add_column(
+ "evaluation_scenarios",
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # EVALUATION RESULTS -------------------------------------------------------
+
+ op.add_column(
+ "evaluation_results",
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # EVALUATION METRICS -------------------------------------------------------
+
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # EVALUATION QUEUES --------------------------------------------------------
+
+ op.add_column(
+ "evaluation_queues",
+ sa.Column(
+ "version",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # EVALUATION QUEUES --------------------------------------------------------
+
+ op.drop_column("evaluation_queues", "version")
+
+ # EVALUATION METRICS -------------------------------------------------------
+
+ op.drop_column("evaluation_metrics", "version")
+
+ # EVALUATION RESULTS -------------------------------------------------------
+
+ op.drop_column("evaluation_results", "version")
+
+ # EVALUATION SCENARIOS -----------------------------------------------------
+
+ op.drop_column("evaluation_scenarios", "version")
+
+ # EVALUATION RUNS ----------------------------------------------------------
+
+ op.drop_column("evaluation_runs", "version")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/core/versions/b3f6bff547d4_remove_app_id_from_evaluators_configs.py b/api/ee/databases/postgres/migrations/core/versions/b3f6bff547d4_remove_app_id_from_evaluators_configs.py
new file mode 100644
index 0000000000..647857d32d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/b3f6bff547d4_remove_app_id_from_evaluators_configs.py
@@ -0,0 +1,38 @@
+"""repair remaining malformed evaluation/evaluator data
+
+Revision ID: b3f6bff547d4
+Revises: 4d9a58ff8f98
+Create Date: 2024-10-10 21:56:26.901827
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "b3f6bff547d4"
+down_revision: Union[str, None] = "4d9a58ff8f98"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ connection = op.get_bind()
+ inspector = sa.inspect(connection)
+ if "evaluators_configs" not in inspector.get_table_names():
+ # Check if app_id exists in the evaluators_configs table
+ columns = [
+ column["name"] for column in inspector.get_columns("evaluators_configs")
+ ]
+ if "app_id" in columns:
+ op.drop_column("evaluators_configs", "app_id")
+
+
+def downgrade() -> None:
+ op.add_column(
+ "evaluators_configs",
+ sa.Column("app_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
diff --git a/api/ee/databases/postgres/migrations/core/versions/d0b8e05ca190_scope_project_id_to_db_models_entities.py b/api/ee/databases/postgres/migrations/core/versions/d0b8e05ca190_scope_project_id_to_db_models_entities.py
new file mode 100644
index 0000000000..c204c1dd65
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/d0b8e05ca190_scope_project_id_to_db_models_entities.py
@@ -0,0 +1,348 @@
+"""scope project_id to db models/entities
+
+Revision ID: d0b8e05ca190
+Revises: 154098b1e56c
+Create Date: 2024-09-17 07:11:16.704972
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+from oss.src.utils.env import env
+from ee.databases.postgres.migrations.core import utils
+
+
+# revision identifiers, used by Alembic.
+revision: str = "d0b8e05ca190"
+down_revision: Union[str, None] = "154098b1e56c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ engine = sa.create_engine(env.POSTGRES_URI_CORE)
+ op.add_column("app_db", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("app_db_user_id_fkey", "app_db", type_="foreignkey")
+ op.create_foreign_key(
+ "app_db_projects_fkey",
+ "app_db",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("app_db", "user_id")
+ op.add_column(
+ "app_variant_revisions", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.create_foreign_key(
+ "app_variant_revisions_projects_fkey",
+ "app_variant_revisions",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.add_column("app_variants", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("app_variants_user_id_fkey", "app_variants", type_="foreignkey")
+ op.create_foreign_key(
+ "app_variants_projects_fkey",
+ "app_variants",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("app_variants", "user_id")
+ op.add_column("bases", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("bases_app_id_fkey", "bases", type_="foreignkey")
+ op.drop_constraint("bases_user_id_fkey", "bases", type_="foreignkey")
+ op.create_foreign_key(
+ "bases_projects_fkey",
+ "bases",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("bases", "user_id")
+ op.add_column("deployments", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("deployments_user_id_fkey", "deployments", type_="foreignkey")
+ op.create_foreign_key(
+ "deployments_projects_fkey",
+ "deployments",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("deployments", "user_id")
+ op.add_column("docker_images", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint(
+ "docker_images_user_id_fkey", "docker_images", type_="foreignkey"
+ )
+ op.create_foreign_key(
+ "docker_images_projects_fkey",
+ "docker_images",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("docker_images", "user_id")
+ op.add_column("environments", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("environments_user_id_fkey", "environments", type_="foreignkey")
+ op.create_foreign_key(
+ "environments_projects_fkey",
+ "environments",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("environments", "user_id")
+ op.add_column(
+ "environments_revisions", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.create_foreign_key(
+ "environments_revisions_projects_fkey",
+ "environments_revisions",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.add_column(
+ "evaluation_scenarios", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.drop_constraint(
+ "evaluation_scenarios_user_id_fkey", "evaluation_scenarios", type_="foreignkey"
+ )
+ op.create_foreign_key(
+ "evaluation_scenarios_projects_fkey",
+ "evaluation_scenarios",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("evaluation_scenarios", "user_id")
+ op.add_column("evaluations", sa.Column("project_id", sa.UUID(), nullable=True))
+ op.drop_constraint("evaluations_user_id_fkey", "evaluations", type_="foreignkey")
+ op.create_foreign_key(
+ "evaluations_projects_fkey",
+ "evaluations",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("evaluations", "user_id")
+ op.add_column(
+ "evaluators_configs", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.drop_constraint(
+ "evaluators_configs_user_id_fkey", "evaluators_configs", type_="foreignkey"
+ )
+ op.drop_constraint(
+ "evaluators_configs_app_id_fkey", "evaluators_configs", type_="foreignkey"
+ )
+ op.create_foreign_key(
+ "evaluators_configs_projects_fkey",
+ "evaluators_configs",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("evaluators_configs", "user_id")
+ op.add_column(
+ "human_evaluations", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.drop_constraint(
+ "human_evaluations_user_id_fkey", "human_evaluations", type_="foreignkey"
+ )
+ op.create_foreign_key(
+ "human_evaluations_projects_fkey",
+ "human_evaluations",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("human_evaluations", "user_id")
+ op.add_column(
+ "human_evaluations_scenarios", sa.Column("project_id", sa.UUID(), nullable=True)
+ )
+ op.drop_constraint(
+ "human_evaluations_scenarios_user_id_fkey",
+ "human_evaluations_scenarios",
+ type_="foreignkey",
+ )
+ op.create_foreign_key(
+ "human_evaluations_scenarios_projects_fkey",
+ "human_evaluations_scenarios",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("human_evaluations_scenarios", "user_id")
+ op.alter_column("projects", "is_default", existing_type=sa.BOOLEAN(), nullable=True)
+ op.add_column("testsets", sa.Column("project_id", sa.UUID(), nullable=True))
+ if not utils.unique_constraint_exists(engine, "testsets", "testsets_user_id_fkey"):
+ op.drop_constraint("testsets_user_id_fkey", "testsets", type_="foreignkey")
+ op.drop_constraint("testsets_app_id_fkey", "testsets", type_="foreignkey")
+
+ op.create_foreign_key(
+ "testsets_projects_fkey",
+ "testsets",
+ "projects",
+ ["project_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.drop_column("testsets", "app_id")
+ op.drop_column("testsets", "user_id")
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "testsets", sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True)
+ )
+ op.add_column(
+ "testsets", sa.Column("app_id", sa.UUID(), autoincrement=False, nullable=True)
+ )
+ op.create_foreign_key(
+ "testsets_app_id_fkey",
+ "testsets",
+ "app_db",
+ ["app_id"],
+ ["id"],
+ ondelete="CASCADE",
+ )
+ op.create_foreign_key(
+ "testsets_user_id_fkey", "testsets", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("testsets", "project_id")
+ op.alter_column(
+ "projects", "is_default", existing_type=sa.BOOLEAN(), nullable=False
+ )
+ op.add_column(
+ "human_evaluations_scenarios",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "human_evaluations_scenarios_user_id_fkey",
+ "human_evaluations_scenarios",
+ "users",
+ ["user_id"],
+ ["id"],
+ )
+ op.drop_column("human_evaluations_scenarios", "project_id")
+ op.add_column(
+ "human_evaluations",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "human_evaluations_user_id_fkey",
+ "human_evaluations",
+ "users",
+ ["user_id"],
+ ["id"],
+ )
+ op.drop_column("human_evaluations", "project_id")
+ op.add_column(
+ "evaluators_configs",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "evaluators_configs_app_id_fkey",
+ "evaluators_configs",
+ "app_db",
+ ["app_id"],
+ ["id"],
+ ondelete="SET NULL",
+ )
+ op.create_foreign_key(
+ "evaluators_configs_user_id_fkey",
+ "evaluators_configs",
+ "users",
+ ["user_id"],
+ ["id"],
+ )
+ op.drop_column("evaluators_configs", "project_id")
+ op.add_column(
+ "evaluations",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "evaluations_user_id_fkey", "evaluations", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("evaluations", "project_id")
+ op.add_column(
+ "evaluation_scenarios",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "evaluation_scenarios_user_id_fkey",
+ "evaluation_scenarios",
+ "users",
+ ["user_id"],
+ ["id"],
+ )
+ op.drop_column("evaluation_scenarios", "project_id")
+ op.drop_column("environments_revisions", "project_id")
+ op.add_column(
+ "environments",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "environments_user_id_fkey", "environments", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("environments", "project_id")
+ op.add_column(
+ "docker_images",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "docker_images_user_id_fkey", "docker_images", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("docker_images", "project_id")
+ op.add_column(
+ "deployments",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "deployments_user_id_fkey", "deployments", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("deployments", "project_id")
+ op.add_column(
+ "bases", sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True)
+ )
+ op.create_foreign_key("bases_user_id_fkey", "bases", "users", ["user_id"], ["id"])
+ op.create_foreign_key(
+ "bases_app_id_fkey", "bases", "app_db", ["app_id"], ["id"], ondelete="CASCADE"
+ )
+ op.drop_column("bases", "project_id")
+ op.add_column(
+ "app_variants",
+ sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True),
+ )
+ op.create_foreign_key(
+ "app_variants_user_id_fkey", "app_variants", "users", ["user_id"], ["id"]
+ )
+ op.drop_column("app_variants", "project_id")
+ op.drop_column("app_variant_revisions", "project_id")
+ op.add_column(
+ "app_db", sa.Column("user_id", sa.UUID(), autoincrement=False, nullable=True)
+ )
+ op.create_foreign_key("app_db_user_id_fkey", "app_db", "users", ["user_id"], ["id"])
+ op.drop_column("app_db", "project_id")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/d5d4d6bf738f_add_evaluation_queues.py b/api/ee/databases/postgres/migrations/core/versions/d5d4d6bf738f_add_evaluation_queues.py
new file mode 100644
index 0000000000..6d39d973aa
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/d5d4d6bf738f_add_evaluation_queues.py
@@ -0,0 +1,116 @@
+"""add evaluation queues
+
+Revision ID: d5d4d6bf738f
+Revises: fd77265d65dc
+Create Date: 2025-07-10 17:04:00.000000
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "d5d4d6bf738f"
+down_revision: Union[str, None] = "fd77265d65dc"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "evaluation_queues",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "run_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "id",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["project_id", "run_id"],
+ ["evaluation_runs.project_id", "evaluation_runs.id"],
+ ondelete="CASCADE",
+ ),
+ sa.Index(
+ "ix_evaluation_queues_project_id",
+ "project_id",
+ ),
+ sa.Index(
+ "ix_evaluation_queues_run_id",
+ "run_id",
+ ),
+ )
+
+
+def downgrade() -> None:
+ op.drop_table("evaluation_queues")
diff --git a/api/ee/databases/postgres/migrations/core/versions/e14e8689cd03_created_project_members_table_and_added_.py b/api/ee/databases/postgres/migrations/core/versions/e14e8689cd03_created_project_members_table_and_added_.py
new file mode 100644
index 0000000000..a1eebc1154
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/e14e8689cd03_created_project_members_table_and_added_.py
@@ -0,0 +1,68 @@
+"""created project_members table and added organization&workspace id to projects table
+
+Revision ID: e14e8689cd03
+Revises: e9fa2135f3fb
+Create Date: 2024-09-02 15:50:58.870573
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "e14e8689cd03"
+down_revision: Union[str, None] = "e9fa2135f3fb"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "projects",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("project_name", sa.String(), nullable=False),
+ sa.Column("is_default", sa.Boolean(), nullable=False),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("workspace_id", sa.UUID(), nullable=True),
+ sa.Column("organization_id", sa.UUID(), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["organization_id"], ["organizations.id"], ondelete="SET NULL"
+ ),
+ sa.ForeignKeyConstraint(
+ ["workspace_id"], ["workspaces.id"], ondelete="SET NULL"
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ op.create_table(
+ "project_members",
+ sa.Column("id", sa.UUID(), nullable=False),
+ sa.Column("user_id", sa.UUID(), nullable=True),
+ sa.Column("project_id", sa.UUID(), nullable=True),
+ sa.Column("role", sa.String(), nullable=True),
+ sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
+ sa.ForeignKeyConstraint(
+ ["project_id"],
+ ["projects.id"],
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ sa.UniqueConstraint("id"),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("project_members")
+ op.drop_table("projects")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/e9fa2135f3fb_add_modified_by_id_column_to_apps_db_.py b/api/ee/databases/postgres/migrations/core/versions/e9fa2135f3fb_add_modified_by_id_column_to_apps_db_.py
new file mode 100644
index 0000000000..cf9c02f606
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/e9fa2135f3fb_add_modified_by_id_column_to_apps_db_.py
@@ -0,0 +1,31 @@
+"""add modified_by_id column to apps_db table
+
+Revision ID: e9fa2135f3fb
+Revises: 8accbbea1d21
+Create Date: 2024-09-03 20:51:51.856509
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "e9fa2135f3fb"
+down_revision: Union[str, None] = "8accbbea1d21"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column("app_db", sa.Column("modified_by_id", sa.UUID(), nullable=True))
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("app_db", "modified_by_id")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/core/versions/fa07e07350bf_add_timestamp_to_metrics.py b/api/ee/databases/postgres/migrations/core/versions/fa07e07350bf_add_timestamp_to_metrics.py
new file mode 100644
index 0000000000..c6d85c7467
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/fa07e07350bf_add_timestamp_to_metrics.py
@@ -0,0 +1,34 @@
+"""add timestamp to metrics
+
+Revision ID: fa07e07350bf
+Revises: 30dcf07de96a
+Create Date: 2025-07-30 14:55:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "fa07e07350bf"
+down_revision: Union[str, None] = "30dcf07de96a"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column("timestamp", sa.TIMESTAMP(timezone=True), nullable=True),
+ )
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column("interval", sa.INTEGER(), nullable=True),
+ )
+
+
+def downgrade() -> None:
+ op.drop_column("evaluation_metrics", "interval")
+ op.drop_column("evaluation_metrics", "timestamp")
diff --git a/api/ee/databases/postgres/migrations/core/versions/fd77265d65dc_fix_preview_entities.py b/api/ee/databases/postgres/migrations/core/versions/fd77265d65dc_fix_preview_entities.py
new file mode 100644
index 0000000000..0e4666cc84
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/core/versions/fd77265d65dc_fix_preview_entities.py
@@ -0,0 +1,232 @@
+"""fix previw entities
+
+Revision ID: fd77265d65dc
+Revises: 54e81e9eed88
+Create Date: 2025-05-29 16:30:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "fd77265d65dc"
+down_revision: Union[str, None] = "54e81e9eed88"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - WORKFLOWS --------------------------------------------------------------
+
+ op.add_column(
+ "workflow_artifacts",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "workflow_variants",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "workflow_revisions",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+
+ # - TESTSETS ---------------------------------------------------------------
+
+ op.add_column(
+ "testset_artifacts",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testset_variants",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testset_revisions",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+
+ # - TESTCASES --------------------------------------------------------------
+
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "flags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.drop_column("testcase_blobs", "slug")
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ )
+
+ # - EVALUATIONS ------------------------------------------------------------
+
+ op.add_column(
+ "evaluation_runs",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "evaluation_scenarios",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "evaluation_steps",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "evaluation_metrics",
+ sa.Column(
+ "tags",
+ postgresql.JSONB(none_as_null=True),
+ nullable=True,
+ ),
+ )
+
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - WORKFLOWS --------------------------------------------------------------
+
+ op.drop_column("workflow_artifacts", "tags")
+ op.drop_column("workflow_variants", "tags")
+ op.drop_column("workflow_revisions", "tags")
+
+ # - TESTSETS ---------------------------------------------------------------
+
+ op.drop_column("testset_artifacts", "tags")
+ op.drop_column("testset_variants", "tags")
+ op.drop_column("testset_revisions", "tags")
+
+ # - TESTCASES --------------------------------------------------------------
+
+ op.drop_column("testcase_blobs", "flags")
+ op.drop_column("testcase_blobs", "tags")
+ op.drop_column("testcase_blobs", "meta")
+ op.add_column(
+ "testcase_blobs",
+ sa.Column(
+ "slug",
+ sa.String(),
+ nullable=True,
+ ),
+ )
+ op.drop_column("testcase_blobs", "created_at")
+ op.drop_column("testcase_blobs", "updated_at")
+ op.drop_column("testcase_blobs", "deleted_at")
+ op.drop_column("testcase_blobs", "created_by_id")
+ op.drop_column("testcase_blobs", "updated_by_id")
+ op.drop_column("testcase_blobs", "deleted_by_id")
+
+ # - EVALUATIONS ------------------------------------------------------------
+
+ op.drop_column("evaluation_runs", "tags")
+ op.drop_column("evaluation_scenarios", "tags")
+ op.drop_column("evaluation_steps", "tags")
+ op.drop_column("evaluation_metrics", "tags")
+
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/find_head.py b/api/ee/databases/postgres/migrations/find_head.py
new file mode 100644
index 0000000000..c435c485a9
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/find_head.py
@@ -0,0 +1,48 @@
+import os
+import re
+from typing import Union, Dict, Set
+import sys
+
+database = sys.argv[1]
+
+MIGRATIONS_DIR = f"./{database}/versions/"
+
+revision_pattern = re.compile(r'revision\s*:\s*str\s*=\s*"([a-f0-9]+)"')
+down_revision_pattern = re.compile(
+ r'down_revision\s*:\s*Union\[str,\s*None\]\s*=\s*(?:"([^"]+)"|None)'
+)
+
+revisions: Dict[str, Union[str, None]] = {}
+all_down_revisions: Set[str] = set()
+
+for filename in os.listdir(MIGRATIONS_DIR):
+ if not filename.endswith(".py"):
+ continue
+
+ print("---------")
+ print("file:", filename)
+
+ with open(os.path.join(MIGRATIONS_DIR, filename), encoding="utf-8") as f:
+ content = f.read()
+ revision_match = revision_pattern.search(content)
+ down_revision_match = down_revision_pattern.search(content)
+
+ print("revision:", revision_match)
+ print("down_revision:", down_revision_match)
+ if revision_match:
+ revision = revision_match.group(1)
+ down_revision = (
+ down_revision_match.group(1) if down_revision_match else None
+ )
+ if down_revision in ("None", ""):
+ down_revision = None
+ revisions[revision] = down_revision
+ if down_revision:
+ all_down_revisions.add(down_revision)
+
+# head(s) = revisions that are not anyone's down_revision
+heads = [rev for rev in revisions if rev not in all_down_revisions]
+
+print("---------")
+print()
+print("Heads:", heads)
diff --git a/api/ee/databases/postgres/migrations/runner.py b/api/ee/databases/postgres/migrations/runner.py
new file mode 100644
index 0000000000..14baed1924
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/runner.py
@@ -0,0 +1,21 @@
+import asyncio
+
+from ee.databases.postgres.migrations.utils import (
+ split_core_and_tracing,
+ copy_nodes_from_core_to_tracing,
+)
+from ee.databases.postgres.migrations.core.utils import (
+ run_alembic_migration as migrate_core,
+)
+from ee.databases.postgres.migrations.tracing.utils import (
+ run_alembic_migration as migrate_tracing,
+)
+
+
+if __name__ == "__main__":
+ loop = asyncio.get_event_loop()
+
+ loop.run_until_complete(split_core_and_tracing())
+ migrate_core()
+ migrate_tracing()
+ loop.run_until_complete(copy_nodes_from_core_to_tracing())
diff --git a/api/ee/databases/postgres/migrations/tracing/README copy.md b/api/ee/databases/postgres/migrations/tracing/README copy.md
new file mode 100644
index 0000000000..8d8552e3c3
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/README copy.md
@@ -0,0 +1,35 @@
+# Migrations with Alembic
+
+Generic single-database configuration with an async dbapi.
+
+## Autogenerate Migrations
+
+One of Alembic's key features is its ability to auto-generate migration scripts. By analyzing the current database state and comparing it with the application's table metadata, Alembic can automatically generate the necessary migration scripts using the `--autogenerate` flag in the alembic revision command.
+
+Note that autogenerate sometimes does not detect all database changes and it is always necessary to manually review (and correct if needed) the candidate migrations that autogenerate produces.
+
+### Making migrations
+
+To make migrations after creating a new table schema or modifying a current column in a table, run the following commands:
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini revision --autogenerate -m "migration message"
+```
+
+The above command will create a script that contains the changes that was made to the database schema. Kindly update "migration message" with a message that is clear to indicate what change was made. Here are some examples:
+
+- added username column in users table
+- renamed template_uri to template_repository_uri
+- etc
+
+### Applying Migrations
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini upgrade head
+```
+
+The above command will be used to apply the changes in the script created to the database table(s). If you'd like to revert the migration, run the following command:
+
+```bash
+docker exec -e PYTHONPATH=/app -w /app/ee/databases/postgres/migrations/core agenta-ee-dev-api-1 alembic -c alembic.ini downgrade head
+```
diff --git a/api/ee/databases/postgres/migrations/tracing/__init__.py b/api/ee/databases/postgres/migrations/tracing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/databases/postgres/migrations/tracing/alembic.ini b/api/ee/databases/postgres/migrations/tracing/alembic.ini
new file mode 100644
index 0000000000..046889088d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/alembic.ini
@@ -0,0 +1,114 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = /app/ee/databases/postgres/migrations/tracing
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to migrations/versions. When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts. See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
\ No newline at end of file
diff --git a/api/ee/databases/postgres/migrations/tracing/env.py b/api/ee/databases/postgres/migrations/tracing/env.py
new file mode 100644
index 0000000000..9376d4486d
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/env.py
@@ -0,0 +1,100 @@
+import os
+import asyncio
+from logging.config import fileConfig
+
+from sqlalchemy import pool
+from sqlalchemy.engine import Connection, create_engine
+from sqlalchemy.ext.asyncio import async_engine_from_config, create_async_engine
+
+from alembic import context
+
+from oss.src.dbs.postgres.shared.engine import engine
+
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+config.set_main_option("sqlalchemy.url", engine.postgres_uri_tracing) # type: ignore
+
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+from oss.src.dbs.postgres.shared.base import Base
+
+import oss.src.dbs.postgres.tracing.dbes
+
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+ Calls to context.execute() here emit the given string to the
+ script output.
+ """
+ url = config.get_main_option("sqlalchemy.url")
+ context.configure(
+ url=url,
+ transaction_per_migration=True,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def do_run_migrations(connection: Connection) -> None:
+ context.configure(
+ transaction_per_migration=True,
+ connection=connection,
+ target_metadata=target_metadata,
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+async def run_async_migrations() -> None:
+ """In this scenario we need to create an Engine
+ and associate a connection with the context.
+ """
+
+ connectable = async_engine_from_config(
+ config.get_section(config.config_ini_section, {}),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ async with connectable.connect() as connection:
+ await connection.run_sync(do_run_migrations)
+
+ await connectable.dispose()
+
+
+def run_migrations_online() -> None:
+ """Run migrations in 'online' mode."""
+
+ asyncio.run(run_async_migrations())
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/api/ee/databases/postgres/migrations/tracing/script.py.mako b/api/ee/databases/postgres/migrations/tracing/script.py.mako
new file mode 100644
index 0000000000..fbc4b07dce
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/script.py.mako
@@ -0,0 +1,26 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/api/ee/databases/postgres/migrations/tracing/utils.py b/api/ee/databases/postgres/migrations/tracing/utils.py
new file mode 100644
index 0000000000..15f3e66b5f
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/utils.py
@@ -0,0 +1,188 @@
+import os
+import asyncio
+import logging
+import traceback
+
+import click
+import asyncpg
+from alembic import command
+from sqlalchemy import Engine
+from alembic.config import Config
+from sqlalchemy import inspect, text
+from alembic.script import ScriptDirectory
+from sqlalchemy.exc import ProgrammingError
+from sqlalchemy.ext.asyncio import create_async_engine, AsyncEngine
+
+from oss.src.utils.env import env
+
+
+# Initializer logger
+logger = logging.getLogger("alembic.env")
+
+# Initialize alembic config
+alembic_cfg = Config(env.ALEMBIC_CFG_PATH_TRACING)
+script = ScriptDirectory.from_config(alembic_cfg)
+
+logger.info("license: ee")
+logger.info("migrations: tracing")
+logger.info("ALEMBIC_CFG_PATH_TRACING: %s", env.ALEMBIC_CFG_PATH_TRACING)
+logger.info("alembic_cfg: %s", alembic_cfg)
+logger.info("script: %s", script)
+
+
+def is_initial_setup(engine) -> bool:
+ """
+ Check if the database is in its initial state by verifying the existence of required tables.
+
+ This function inspects the current state of the database and determines if it needs initial setup by checking for the presence of a predefined set of required tables.
+
+ Args:
+ engine (sqlalchemy.engine.base.Engine): The SQLAlchemy engine used to connect to the database.
+
+ Returns:
+ bool: True if the database is in its initial state (i.e., not all required tables exist), False otherwise.
+ """
+
+ inspector = inspect(engine)
+ required_tables = ["spans"]
+ existing_tables = inspector.get_table_names()
+
+ # Check if all required tables exist in the database
+ all_tables_exist = all(table in existing_tables for table in required_tables)
+
+ return not all_tables_exist
+
+
+async def get_current_migration_head_from_db(engine: AsyncEngine):
+ """
+ Checks the alembic_version table to get the current migration head that has been applied.
+
+ Args:
+ engine (Engine): The engine that connects to an sqlalchemy pool
+
+ Returns:
+ the current migration head (where 'head' is the revision stored in the migration script)
+ """
+
+ async with engine.connect() as connection:
+ try:
+ result = await connection.execute(text("SELECT version_num FROM alembic_version")) # type: ignore
+ except (asyncpg.exceptions.UndefinedTableError, ProgrammingError):
+ # Note: If the alembic_version table does not exist, it will result in raising an UndefinedTableError exception.
+ # We need to suppress the error and return a list with the alembic_version table name to inform the user that there is a pending migration \
+ # to make Alembic start tracking the migration changes.
+ # --------------------------------------------------------------------------------------
+ # This effect (the exception raising) happens for both users (first-time and returning)
+ return "alembic_version"
+
+ migration_heads = [row[0] for row in result.fetchall()]
+ assert (
+ len(migration_heads) == 1
+ ), "There can only be one migration head stored in the database."
+ return migration_heads[0]
+
+
+async def get_pending_migration_head():
+ """
+ Gets the migration head that have not been applied.
+
+ Returns:
+ the pending migration head
+ """
+
+ engine = create_async_engine(url=env.POSTGRES_URI_TRACING)
+ try:
+ current_migration_script_head = script.get_current_head()
+ migration_head_from_db = await get_current_migration_head_from_db(engine=engine)
+
+ pending_migration_head = []
+ if current_migration_script_head != migration_head_from_db:
+ pending_migration_head.append(current_migration_script_head)
+ if "alembic_version" == migration_head_from_db:
+ pending_migration_head.append("alembic_version")
+ finally:
+ await engine.dispose()
+
+ return pending_migration_head
+
+
+def run_alembic_migration():
+ """
+ Applies migration for first-time users and also checks the environment variable "AGENTA_AUTO_MIGRATIONS" to determine whether to apply migrations for returning users.
+ """
+
+ try:
+ pending_migration_head = asyncio.run(get_pending_migration_head())
+ FIRST_TIME_USER = True if "alembic_version" in pending_migration_head else False
+
+ if FIRST_TIME_USER or env.AGENTA_AUTO_MIGRATIONS:
+ command.upgrade(alembic_cfg, "head")
+ click.echo(
+ click.style(
+ "\nMigration applied successfully. The container will now exit.",
+ fg="green",
+ ),
+ color=True,
+ )
+ else:
+ click.echo(
+ click.style(
+ "\nAll migrations are up-to-date. The container will now exit.",
+ fg="yellow",
+ ),
+ color=True,
+ )
+ except Exception as e:
+ click.echo(
+ click.style(
+ f"\nAn ERROR occurred while applying migration: {traceback.format_exc()}\nThe container will now exit.",
+ fg="red",
+ ),
+ color=True,
+ )
+ raise e
+
+
+async def check_for_new_migrations():
+ """
+ Checks for new migrations and notify the user.
+ """
+
+ pending_migration_head = await get_pending_migration_head()
+ if len(pending_migration_head) >= 1 and isinstance(pending_migration_head[0], str):
+ click.echo(
+ click.style(
+ f"\nWe have detected that there are pending database migrations {pending_migration_head} that need to be applied to keep the application up to date. To ensure the application functions correctly with the latest updates, please follow the guide here => https://docs.agenta.ai/self-host/migration/applying-schema-migration\n",
+ fg="yellow",
+ ),
+ color=True,
+ )
+ return
+
+
+def unique_constraint_exists(
+ engine: Engine, table_name: str, constraint_name: str
+) -> bool:
+ """
+ The function checks if a unique constraint with a specific name exists on a table in a PostgreSQL
+ database.
+
+ Args:
+ - engine (Engine): instance of a database engine that represents a connection to a database.
+ - table_name (str): name of the table to check the existence of the unique constraint.
+ - constraint_name (str): name of the unique constraint to check for existence.
+
+ Returns:
+ - returns a boolean value indicating whether a unique constraint with the specified `constraint_name` exists in the table.
+ """
+
+ with engine.connect() as conn:
+ result = conn.execute(
+ text(
+ f"""
+ SELECT conname FROM pg_constraint
+ WHERE conname = '{constraint_name}' AND conrelid = '{table_name}'::regclass;
+ """
+ )
+ )
+ return result.fetchone() is not None
diff --git a/api/ee/databases/postgres/migrations/tracing/versions/58b1b61e5d6c_add_spans.py b/api/ee/databases/postgres/migrations/tracing/versions/58b1b61e5d6c_add_spans.py
new file mode 100644
index 0000000000..d0b32e0008
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/versions/58b1b61e5d6c_add_spans.py
@@ -0,0 +1,202 @@
+"""Add Spans v2
+
+Revision ID: 58b1b61e5d6c
+Revises:
+Create Date: 2025-03-28 12:22:05.104488
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "58b1b61e5d6c"
+down_revision: Union[str, None] = None
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "spans",
+ sa.Column(
+ "project_id",
+ sa.UUID(),
+ # sa.ForeignKey("projects.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.TIMESTAMP(timezone=True),
+ server_onupdate=sa.text("CURRENT_TIMESTAMP"),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_at",
+ sa.TIMESTAMP(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ "created_by_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "deleted_by_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "trace_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "span_id",
+ sa.UUID(),
+ nullable=False,
+ ),
+ sa.Column(
+ "parent_id",
+ sa.UUID(),
+ nullable=True,
+ ),
+ sa.Column(
+ "span_kind",
+ sa.Enum(
+ "SPAN_KIND_UNSPECIFIED",
+ "SPAN_KIND_INTERNAL",
+ "SPAN_KIND_SERVER",
+ "SPAN_KIND_CLIENT",
+ "SPAN_KIND_PRODUCER",
+ "SPAN_KIND_CONSUMER",
+ name="otelspankind",
+ ),
+ nullable=False,
+ ),
+ sa.Column(
+ "span_name",
+ sa.VARCHAR(),
+ nullable=False,
+ ),
+ sa.Column(
+ "start_time",
+ sa.TIMESTAMP(timezone=True),
+ nullable=False,
+ ),
+ sa.Column(
+ "end_time",
+ sa.TIMESTAMP(timezone=True),
+ nullable=False,
+ ),
+ sa.Column(
+ "status_code",
+ sa.Enum(
+ "STATUS_CODE_UNSET",
+ "STATUS_CODE_OK",
+ "STATUS_CODE_ERROR",
+ name="otelstatuscode",
+ ),
+ nullable=False,
+ ),
+ sa.Column(
+ "status_message",
+ sa.VARCHAR(),
+ nullable=True,
+ ),
+ sa.Column(
+ "attributes",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "events",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "links",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "references",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ # sa.Column(
+ # "content",
+ # sa.VARCHAR(),
+ # nullable=True,
+ # ),
+ sa.PrimaryKeyConstraint(
+ "project_id",
+ "trace_id",
+ "span_id",
+ ),
+ sa.Index(
+ "ix_project_id_trace_id",
+ "project_id",
+ "trace_id",
+ ),
+ sa.Index(
+ "ix_project_id_span_id",
+ "project_id",
+ "span_id",
+ ),
+ sa.Index(
+ "ix_project_id_start_time",
+ "project_id",
+ "start_time",
+ ),
+ sa.Index(
+ "ix_project_id",
+ "project_id",
+ ),
+ sa.Index(
+ "ix_attributes_gin",
+ "attributes",
+ postgresql_using="gin",
+ ),
+ sa.Index(
+ "ix_events_gin",
+ "events",
+ postgresql_using="gin",
+ ),
+ sa.Index(
+ "ix_links_gin",
+ "links",
+ postgresql_using="gin",
+ ),
+ sa.Index(
+ "ix_references_gin",
+ "references",
+ postgresql_using="gin",
+ ),
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("ix_references_gin", table_name="spans")
+ op.drop_index("ix_links_gin", table_name="spans")
+ op.drop_index("ix_events_gin", table_name="spans")
+ op.drop_index("ix_attributes_gin", table_name="spans")
+ op.drop_index("ix_project_id", table_name="spans")
+ op.drop_index("ix_project_id_start_time", table_name="spans")
+ op.drop_index("ix_project_id_span_id", table_name="spans")
+ op.drop_index("ix_project_id_trace_id", table_name="spans")
+ op.drop_table("spans")
diff --git a/api/ee/databases/postgres/migrations/tracing/versions/847972cfa14a_add_nodes.py b/api/ee/databases/postgres/migrations/tracing/versions/847972cfa14a_add_nodes.py
new file mode 100644
index 0000000000..4b6903973b
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/versions/847972cfa14a_add_nodes.py
@@ -0,0 +1,121 @@
+"""add_nodes_dbe
+
+Revision ID: 847972cfa14a
+Revises: 58b1b61e5d6c
+Create Date: 2024-11-07 12:21:19.080345
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision: str = "847972cfa14a"
+down_revision: Union[str, None] = "58b1b61e5d6c"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "nodes",
+ sa.Column("project_id", sa.UUID(), nullable=False),
+ sa.Column(
+ "created_at",
+ sa.TIMESTAMP(timezone=True),
+ server_default=sa.text("CURRENT_TIMESTAMP"),
+ nullable=False,
+ ),
+ sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True),
+ sa.Column("updated_by_id", sa.UUID(), nullable=True),
+ sa.Column("root_id", sa.UUID(), nullable=False),
+ sa.Column("tree_id", sa.UUID(), nullable=False),
+ sa.Column("tree_type", sa.Enum("INVOCATION", name="treetype"), nullable=True),
+ sa.Column("node_id", sa.UUID(), nullable=False),
+ sa.Column("node_name", sa.String(), nullable=False),
+ sa.Column(
+ "node_type",
+ sa.Enum(
+ "AGENT",
+ "WORKFLOW",
+ "CHAIN",
+ "TASK",
+ "TOOL",
+ "EMBEDDING",
+ "QUERY",
+ "COMPLETION",
+ "CHAT",
+ "RERANK",
+ name="nodetype",
+ ),
+ nullable=True,
+ ),
+ sa.Column("parent_id", sa.UUID(), nullable=True),
+ sa.Column("time_start", sa.TIMESTAMP(), nullable=False),
+ sa.Column("time_end", sa.TIMESTAMP(), nullable=False),
+ sa.Column(
+ "status",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "data",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "metrics",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "meta",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "refs",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "exception",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column(
+ "links",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.Column("content", sa.String(), nullable=True),
+ sa.Column(
+ "otel",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ sa.PrimaryKeyConstraint("project_id", "node_id"),
+ )
+ op.create_index(
+ "index_project_id_node_id", "nodes", ["project_id", "created_at"], unique=False
+ )
+ op.create_index(
+ "index_project_id_root_id", "nodes", ["project_id", "root_id"], unique=False
+ )
+ op.create_index(
+ "index_project_id_tree_id", "nodes", ["project_id", "tree_id"], unique=False
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index("index_project_id_tree_id", table_name="nodes")
+ op.drop_index("index_project_id_root_id", table_name="nodes")
+ op.drop_index("index_project_id_node_id", table_name="nodes")
+ op.drop_table("nodes")
+ # ### end Alembic commands ###
diff --git a/api/ee/databases/postgres/migrations/tracing/versions/fd77265d65dc_fix_spans.py b/api/ee/databases/postgres/migrations/tracing/versions/fd77265d65dc_fix_spans.py
new file mode 100644
index 0000000000..6cb4e3f963
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/tracing/versions/fd77265d65dc_fix_spans.py
@@ -0,0 +1,202 @@
+"""fix spans
+
+Revision ID: fd77265d65dc
+Revises: 847972cfa14a
+Create Date: 2025-05-29 16:30:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+from oss.src.core.tracing.dtos import SpanType
+from oss.src.core.tracing.dtos import TraceType
+
+# revision identifiers, used by Alembic.
+revision: str = "fd77265d65dc"
+down_revision: Union[str, None] = "847972cfa14a"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # - SPANS ------------------------------------------------------------------
+ trace_type_enum = sa.Enum(TraceType, name="tracetype")
+ span_type_enum = sa.Enum(SpanType, name="spantype")
+
+ trace_type_enum.create(op.get_bind(), checkfirst=True)
+ span_type_enum.create(op.get_bind(), checkfirst=True)
+
+ op.add_column(
+ "spans",
+ sa.Column(
+ "trace_type",
+ trace_type_enum,
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "spans",
+ sa.Column(
+ "span_type",
+ span_type_enum,
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "spans",
+ sa.Column(
+ "hashes",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.add_column(
+ "spans",
+ sa.Column(
+ "exception",
+ postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
+ nullable=True,
+ ),
+ )
+ op.create_index(
+ "ix_spans_project_id_trace_type",
+ "spans",
+ ["project_id", "trace_type"],
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_spans_project_id_span_type",
+ "spans",
+ ["project_id", "span_type"],
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_spans_project_id_trace_id_created_at",
+ "spans",
+ ["project_id", "trace_id", sa.text("created_at DESC")],
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_spans_project_id_trace_id_start_time",
+ "spans",
+ ["project_id", "trace_id", sa.text("start_time DESC")],
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_hashes_gin",
+ "spans",
+ ["hashes"],
+ postgresql_using="gin",
+ postgresql_ops={"hashes": "jsonb_path_ops"},
+ if_not_exists=True,
+ )
+ op.drop_index(
+ "ix_events_gin",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.create_index(
+ "ix_events_gin",
+ "spans", # replace with your table name
+ ["events"],
+ postgresql_using="gin",
+ postgresql_ops={"events": "jsonb_path_ops"},
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_spans_fts_attributes_gin",
+ "spans",
+ [sa.text("to_tsvector('simple', attributes)")],
+ postgresql_using="gin",
+ if_not_exists=True,
+ )
+ op.create_index(
+ "ix_spans_fts_events_gin",
+ "spans",
+ [sa.text("to_tsvector('simple', events)")],
+ postgresql_using="gin",
+ if_not_exists=True,
+ )
+ # --------------------------------------------------------------------------
+
+
+def downgrade() -> None:
+ # - SPANS ------------------------------------------------------------------
+ op.drop_index(
+ "ix_spans_fts_events_gin",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_spans_fts_attributes_gin",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_events_gin",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.create_index(
+ "ix_events_gin",
+ "spans",
+ ["events"],
+ postgresql_using="gin",
+ if_not_exists=True,
+ )
+ op.drop_index(
+ "ix_hashes_gin",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_spans_project_id_trace_id_start_time",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_spans_project_id_trace_id_created_at",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_spans_project_id_span_type",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_index(
+ "ix_spans_project_id_trace_type",
+ table_name="spans",
+ if_exists=True,
+ )
+ op.drop_column(
+ "spans",
+ "exception",
+ if_exists=True,
+ )
+ op.drop_column(
+ "spans",
+ "hashes",
+ if_exists=True,
+ )
+ op.drop_column(
+ "spans",
+ "span_type",
+ if_exists=True,
+ )
+ op.drop_column(
+ "spans",
+ "trace_type",
+ if_exists=True,
+ )
+
+ span_type_enum = sa.Enum(SpanType, name="spantype")
+ trace_type_enum = sa.Enum(TraceType, name="tracetype")
+
+ span_type_enum.drop(op.get_bind(), checkfirst=True)
+ trace_type_enum.drop(op.get_bind(), checkfirst=True)
+ # --------------------------------------------------------------------------
diff --git a/api/ee/databases/postgres/migrations/utils.py b/api/ee/databases/postgres/migrations/utils.py
new file mode 100644
index 0000000000..f3874da1c8
--- /dev/null
+++ b/api/ee/databases/postgres/migrations/utils.py
@@ -0,0 +1,313 @@
+import os
+import subprocess
+import tempfile
+
+from sqlalchemy import create_engine, text
+from sqlalchemy.ext.asyncio import create_async_engine
+
+from sqlalchemy.exc import ProgrammingError
+
+from oss.src.utils.env import env
+
+
+# Config (can override via env)
+POSTGRES_URI = (
+ os.getenv("POSTGRES_URI")
+ or env.POSTGRES_URI_CORE
+ or env.POSTGRES_URI_TRACING
+ or "postgresql+asyncpg://username:password@localhost:5432/agenta_ee"
+)
+DB_PROTOCOL = POSTGRES_URI.split("://")[0] # .replace("+asyncpg", "")
+DB_USER = POSTGRES_URI.split("://")[1].split(":")[0]
+DB_PASS = POSTGRES_URI.split("://")[1].split(":")[1].split("@")[0]
+DB_HOST = POSTGRES_URI.split("@")[1].split(":")[0]
+DB_PORT = POSTGRES_URI.split(":")[-1].split("/")[0]
+ADMIN_DB = "postgres"
+
+POSTGRES_URI_POSTGRES = (
+ f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{ADMIN_DB}"
+)
+
+# Rename/create map: {'old_name': 'new_name'}
+RENAME_MAP = {
+ "agenta_ee": "agenta_ee_core",
+ "supertokens_ee": "agenta_ee_supertokens",
+ "agenta_ee_tracing": "agenta_ee_tracing",
+}
+
+
+NODES_TF = {
+ "agenta_ee_core": "agenta_ee_tracing",
+}
+
+
+async def copy_nodes_from_core_to_tracing():
+ engine = create_async_engine(
+ POSTGRES_URI_POSTGRES,
+ isolation_level="AUTOCOMMIT",
+ )
+
+ async with engine.begin() as conn:
+ for old_name, new_name in NODES_TF.items():
+ old_exists = (
+ await conn.execute(
+ text("SELECT 1 FROM pg_database WHERE datname = :name"),
+ {"name": old_name},
+ )
+ ).scalar()
+
+ new_exists = (
+ await conn.execute(
+ text("SELECT 1 FROM pg_database WHERE datname = :name"),
+ {"name": new_name},
+ )
+ ).scalar()
+
+ if old_exists and new_exists:
+ # Check if the nodes table exists in old_name database
+ check_url = f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{old_name}"
+ check_engine = create_async_engine(check_url)
+ async with check_engine.begin() as conn:
+ result = (
+ await conn.execute(
+ text("SELECT to_regclass('public.nodes')"),
+ )
+ ).scalar()
+ if result is None:
+ print(
+ f"⚠️ Table 'nodes' does not exist in '{old_name}'. Skipping copy."
+ )
+ return
+
+ count = (
+ await conn.execute(
+ text("SELECT COUNT(*) FROM public.nodes"),
+ )
+ ).scalar()
+
+ if count == 0:
+ print(
+ f"⚠️ Table 'nodes' is empty in '{old_name}'. Skipping copy."
+ )
+ return
+
+ check_url = f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{new_name}"
+ check_engine = create_async_engine(check_url)
+
+ async with check_engine.begin() as conn:
+ count = (
+ await conn.execute(
+ text(
+ "SELECT COUNT(*) FROM public.nodes",
+ )
+ )
+ ).scalar()
+
+ if (count or 0) > 0:
+ print(
+ f"⚠️ Table 'nodes' already exists in '{new_name}' with {count} rows. Skipping copy."
+ )
+ return
+
+ with tempfile.NamedTemporaryFile(suffix=".sql", delete=False) as tmp:
+ dump_file = tmp.name
+
+ try:
+ # Step 1: Dump the 'nodes' table to file
+ subprocess.run(
+ [
+ "pg_dump",
+ "-h",
+ DB_HOST,
+ "-p",
+ str(DB_PORT),
+ "-U",
+ DB_USER,
+ "-d",
+ old_name,
+ "-t",
+ "nodes",
+ "--format=custom", # requires -f, not stdout redirection
+ "--no-owner",
+ "--no-privileges",
+ "-f",
+ dump_file,
+ ],
+ check=True,
+ env={**os.environ, "PGPASSWORD": DB_PASS},
+ )
+
+ print(f"✔ Dumped 'nodes' table to '{dump_file}'")
+
+ # Step 2: Restore the dump into the new database
+ subprocess.run(
+ [
+ "pg_restore",
+ "--data-only",
+ "--no-owner",
+ "--no-privileges",
+ "-h",
+ DB_HOST,
+ "-p",
+ str(DB_PORT),
+ "-U",
+ DB_USER,
+ "-d",
+ new_name,
+ dump_file,
+ ],
+ check=True,
+ env={**os.environ, "PGPASSWORD": DB_PASS},
+ )
+
+ print(f"✔ Restored 'nodes' table into '{new_name}'")
+
+ # Step 3: Verify 'nodes' exists in both DBs, then drop from old
+ source_engine = create_async_engine(
+ f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{old_name}"
+ )
+ dest_engine = create_async_engine(
+ f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{new_name}"
+ )
+
+ async with source_engine.begin() as src, dest_engine.begin() as dst:
+ src = await src.execution_options(isolation_level="AUTOCOMMIT")
+ dst = await dst.execution_options(isolation_level="AUTOCOMMIT")
+
+ src_exists = (
+ await src.execute(
+ text("SELECT to_regclass('public.nodes')")
+ )
+ ).scalar()
+ dst_exists = (
+ await dst.execute(
+ text("SELECT to_regclass('public.nodes')"),
+ )
+ ).scalar()
+
+ if src_exists and dst_exists:
+ subprocess.run(
+ [
+ "psql",
+ "-h",
+ DB_HOST,
+ "-p",
+ str(DB_PORT),
+ "-U",
+ DB_USER,
+ "-d",
+ old_name,
+ "-c",
+ "TRUNCATE TABLE public.nodes CASCADE",
+ ],
+ check=True,
+ env={**os.environ, "PGPASSWORD": DB_PASS},
+ )
+
+ count = (
+ await src.execute(
+ text("SELECT COUNT(*) FROM public.nodes"),
+ )
+ ).scalar()
+
+ print(f"✅ Remaining rows: {count}")
+
+ except subprocess.CalledProcessError as e:
+ print(f"❌ pg_dump/psql failed: {e}")
+ finally:
+ if os.path.exists(dump_file):
+ os.remove(dump_file)
+
+
+async def split_core_and_tracing():
+ engine = create_async_engine(
+ POSTGRES_URI_POSTGRES,
+ isolation_level="AUTOCOMMIT",
+ )
+
+ async with engine.begin() as conn:
+ for old_name, new_name in RENAME_MAP.items():
+ old_exists = (
+ await conn.execute(
+ text("SELECT 1 FROM pg_database WHERE datname = :name"),
+ {"name": old_name},
+ )
+ ).scalar()
+
+ new_exists = (
+ await conn.execute(
+ text("SELECT 1 FROM pg_database WHERE datname = :name"),
+ {"name": new_name},
+ )
+ ).scalar()
+
+ if old_exists and not new_exists:
+ print(f"Renaming database '{old_name}' → '{new_name}'...")
+ try:
+ await conn.execute(
+ text(f"ALTER DATABASE {old_name} RENAME TO {new_name}")
+ )
+ print(f"✔ Renamed '{old_name}' to '{new_name}'")
+ except ProgrammingError as e:
+ print(f"❌ Failed to rename '{old_name}': {e}")
+
+ elif not old_exists and new_exists:
+ print(
+ f"'{old_name}' does not exist, but '{new_name}' already exists. No action taken."
+ )
+
+ elif not old_exists and not new_exists:
+ print(
+ f"Neither '{old_name}' nor '{new_name}' exists. Creating '{new_name}'..."
+ )
+ try:
+ # Ensure the role exists
+ await conn.execute(
+ text(
+ f"""
+ DO $$
+ BEGIN
+ IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '{DB_USER}') THEN
+ EXECUTE format('CREATE ROLE %I WITH LOGIN PASSWORD %L', '{DB_USER}', '{DB_PASS}');
+ END IF;
+ END
+ $$;
+ """
+ )
+ )
+ print(f"✔ Ensured role '{DB_USER}' exists")
+
+ # Create the new database
+ await conn.execute(text(f"CREATE DATABASE {new_name}"))
+ print(f"✔ Created database '{new_name}'")
+
+ # Grant privileges on the database to the role
+ await conn.execute(
+ text(
+ f"GRANT ALL PRIVILEGES ON DATABASE {new_name} TO {DB_USER}"
+ )
+ )
+ print(
+ f"✔ Granted privileges on database '{new_name}' to '{DB_USER}'"
+ )
+
+ # Connect to the new database to grant schema permissions
+ new_db_url = f"{DB_PROTOCOL}://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{new_name}"
+
+ async with create_async_engine(
+ new_db_url, isolation_level="AUTOCOMMIT"
+ ).begin() as new_db_conn:
+ await new_db_conn.execute(
+ text(f"GRANT ALL ON SCHEMA public TO {DB_USER}")
+ )
+ print(
+ f"✔ Granted privileges on schema 'public' in '{new_name}' to '{DB_USER}'"
+ )
+
+ except ProgrammingError as e:
+ print(
+ f"❌ Failed during creation or configuration of '{new_name}': {e}"
+ )
+
+ else:
+ print(f"Both '{old_name}' and '{new_name}' exist. No action taken.")
diff --git a/api/ee/docker/Dockerfile.dev b/api/ee/docker/Dockerfile.dev
new file mode 100644
index 0000000000..a650319e31
--- /dev/null
+++ b/api/ee/docker/Dockerfile.dev
@@ -0,0 +1,44 @@
+FROM python:3.11-slim-bullseye
+
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y curl cron gnupg2 lsb-release && \
+ echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
+ curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | \
+ gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
+ apt-get update && \
+ apt-get install -y postgresql-client-16 && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN pip install --upgrade pip \
+ && pip install poetry
+
+COPY ./ee /app/ee/
+COPY ./oss /app/oss/
+COPY ./entrypoint.py ./pyproject.toml /app/
+
+RUN poetry config virtualenvs.create false \
+ && poetry install --no-interaction --no-ansi
+ # && pip install -e /sdk/
+
+# ENV PYTHONPATH=/sdk:$PYTHONPATH
+
+COPY ./ee/src/crons/meters.sh /meters.sh
+COPY ./ee/src/crons/meters.txt /etc/cron.d/meters-cron
+RUN sed -i -e '$a\' /etc/cron.d/meters-cron
+RUN cat -A /etc/cron.d/meters-cron
+
+RUN chmod +x /meters.sh \
+ && chmod 0644 /etc/cron.d/meters-cron
+
+COPY ./ee/src/crons/queries.sh /queries.sh
+COPY ./ee/src/crons/queries.txt /etc/cron.d/queries-cron
+RUN sed -i -e '$a\' /etc/cron.d/queries-cron
+RUN cat -A /etc/cron.d/queries-cron
+
+RUN chmod +x /queries.sh \
+ && chmod 0644 /etc/cron.d/queries-cron
+
+EXPOSE 8000
diff --git a/api/ee/docker/Dockerfile.gh b/api/ee/docker/Dockerfile.gh
new file mode 100644
index 0000000000..8e8e6ec936
--- /dev/null
+++ b/api/ee/docker/Dockerfile.gh
@@ -0,0 +1,44 @@
+FROM python:3.11-slim-bullseye
+
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y curl cron gnupg2 lsb-release && \
+ echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
+ curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | \
+ gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
+ apt-get update && \
+ apt-get install -y postgresql-client-16 && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN pip install --upgrade pip \
+ && pip install poetry
+
+COPY ./ee /app/ee/
+COPY ./oss /app/oss/
+COPY ./entrypoint.py ./pyproject.toml /app/
+
+RUN poetry config virtualenvs.create false \
+ && poetry install --no-interaction --no-ansi
+#
+
+#
+
+COPY ./ee/src/crons/meters.sh /meters.sh
+COPY ./ee/src/crons/meters.txt /etc/cron.d/meters-cron
+RUN sed -i -e '$a\' /etc/cron.d/meters-cron
+RUN cat -A /etc/cron.d/meters-cron
+
+RUN chmod +x /meters.sh \
+ && chmod 0644 /etc/cron.d/meters-cron
+
+COPY ./ee/src/crons/queries.sh /queries.sh
+COPY ./ee/src/crons/queries.txt /etc/cron.d/queries-cron
+RUN sed -i -e '$a\' /etc/cron.d/queries-cron
+RUN cat -A /etc/cron.d/queries-cron
+
+RUN chmod +x /queries.sh \
+ && chmod 0644 /etc/cron.d/queries-cron
+
+EXPOSE 8000
diff --git a/api/ee/src/__init__.py b/api/ee/src/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/apis/__init__.py b/api/ee/src/apis/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/apis/fastapi/__init__.py b/api/ee/src/apis/fastapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/apis/fastapi/billing/__init__.py b/api/ee/src/apis/fastapi/billing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/apis/fastapi/billing/models.py b/api/ee/src/apis/fastapi/billing/models.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/apis/fastapi/billing/router.py b/api/ee/src/apis/fastapi/billing/router.py
new file mode 100644
index 0000000000..7ac23142c5
--- /dev/null
+++ b/api/ee/src/apis/fastapi/billing/router.py
@@ -0,0 +1,980 @@
+from typing import Any, Dict
+from os import environ
+from json import loads, decoder
+from uuid import getnode
+from datetime import datetime, timezone
+from dateutil.relativedelta import relativedelta
+
+from fastapi import APIRouter, Request, status, HTTPException, Query
+from fastapi.responses import JSONResponse
+
+import stripe
+
+from oss.src.utils.common import is_ee
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.exceptions import intercept_exceptions
+from oss.src.utils.caching import get_cache, set_cache, invalidate_cache
+
+from oss.src.services.db_manager import (
+ get_user_with_id,
+ get_organization_by_id,
+)
+
+from ee.src.utils.permissions import check_action_access
+from ee.src.models.shared_models import Permission
+from ee.src.core.entitlements.types import ENTITLEMENTS, CATALOG, Tracker, Quota
+from ee.src.core.subscriptions.types import Event, Plan
+from ee.src.core.subscriptions.service import (
+ SubscriptionsService,
+ SwitchException,
+ EventException,
+)
+
+
+log = get_module_logger(__name__)
+
+stripe.api_key = environ.get("STRIPE_API_KEY")
+
+MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8))
+STRIPE_WEBHOOK_SECRET = environ.get("STRIPE_WEBHOOK_SECRET")
+STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS
+AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}")
+
+FORBIDDEN_RESPONSE = JSONResponse(
+ status_code=403,
+ content={
+ "detail": "You do not have access to perform this action. Please contact your organization admin.",
+ },
+)
+
+
+class SubscriptionsRouter:
+ def __init__(
+ self,
+ subscription_service: SubscriptionsService,
+ ):
+ self.subscription_service = subscription_service
+
+ # ROUTER
+ self.router = APIRouter()
+
+ # USES 'STRIPE_WEBHOOK_SECRET', SHOULD BE IN A DIFFERENT ROUTER
+ self.router.add_api_route(
+ "/stripe/events/",
+ self.handle_events,
+ methods=["POST"],
+ operation_id="handle_events",
+ )
+
+ self.router.add_api_route(
+ "/stripe/portals/",
+ self.create_portal_user_route,
+ methods=["POST"],
+ operation_id="create_portal",
+ )
+
+ self.router.add_api_route(
+ "/stripe/checkouts/",
+ self.create_checkout_user_route,
+ methods=["POST"],
+ operation_id="create_checkout",
+ )
+
+ self.router.add_api_route(
+ "/plans",
+ self.fetch_plan_user_route,
+ methods=["GET"],
+ operation_id="fetch_plans",
+ )
+
+ self.router.add_api_route(
+ "/plans/switch",
+ self.switch_plans_user_route,
+ methods=["POST"],
+ operation_id="switch_plans",
+ )
+
+ self.router.add_api_route(
+ "/subscription",
+ self.fetch_subscription_user_route,
+ methods=["GET"],
+ operation_id="fetch_subscription",
+ )
+
+ self.router.add_api_route(
+ "/subscription/cancel",
+ self.cancel_subscription_user_route,
+ methods=["POST"],
+ operation_id="cancel_plan",
+ )
+
+ self.router.add_api_route(
+ "/usage",
+ self.fetch_usage_user_route,
+ methods=["GET"],
+ operation_id="fetch_usage",
+ )
+
+ # ADMIN ROUTER
+ self.admin_router = APIRouter()
+
+ self.admin_router.add_api_route(
+ "/stripe/portals/",
+ self.create_portal_admin_route,
+ methods=["POST"],
+ operation_id="admin_create_portal",
+ )
+
+ self.admin_router.add_api_route(
+ "/stripe/checkouts/",
+ self.create_checkout_admin_route,
+ methods=["POST"],
+ operation_id="admin_create_checkout",
+ )
+
+ self.admin_router.add_api_route(
+ "/plans/switch",
+ self.switch_plans_admin_route,
+ methods=["POST"],
+ operation_id="admin_switch_plans",
+ )
+
+ self.admin_router.add_api_route(
+ "/subscription/cancel",
+ self.cancel_subscription_admin_route,
+ methods=["POST"],
+ operation_id="admin_cancel_subscription",
+ )
+
+ # DOESN'T REQUIRE 'organization_id'
+ self.admin_router.add_api_route(
+ "/usage/report",
+ self.report_usage,
+ methods=["POST"],
+ operation_id="admin_report_usage",
+ )
+
+ # HANDLERS
+
+ @intercept_exceptions()
+ async def handle_events(
+ self,
+ request: Request,
+ ):
+ if not stripe.api_key:
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Missing Stripe API Key"},
+ )
+
+ payload = await request.body()
+ stripe_event = None
+
+ try:
+ stripe_event = loads(payload)
+ except decoder.JSONDecodeError:
+ return JSONResponse(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ content={"status": "error", "message": "Payload extraction failed"},
+ )
+
+ try:
+ stripe_event = stripe.Event.construct_from(
+ stripe_event,
+ stripe.api_key,
+ )
+ except ValueError as e:
+ log.error("Could not construct stripe event: %s", e)
+ raise HTTPException(status_code=400, detail="Invalid payload") from e
+
+ try:
+ sig_header = request.headers.get("stripe-signature")
+
+ if STRIPE_WEBHOOK_SECRET:
+ stripe_event = stripe.Webhook.construct_event(
+ payload,
+ sig_header,
+ STRIPE_WEBHOOK_SECRET,
+ )
+ except stripe.error.SignatureVerificationError as e:
+ log.error("Webhook signature verification failed: %s", e)
+ return JSONResponse(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ content={"status": "error", "message": "Signature verification failed"},
+ )
+
+ metadata = None
+
+ if not stripe_event.type.startswith("invoice"):
+ if not hasattr(stripe_event.data.object, "metadata"):
+ log.warn("Skipping stripe event: %s (no metadata)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Metadata not found"},
+ )
+ else:
+ metadata = stripe_event.data.object.metadata
+
+ if stripe_event.type.startswith("invoice"):
+ if not hasattr(
+ stripe_event.data.object, "subscription_details"
+ ) and not hasattr(
+ stripe_event.data.object.subscription_details, "metadata"
+ ):
+ log.warn("Skipping stripe event: %s (no metadata)", stripe_event.type)
+
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Metadata not found"},
+ )
+ else:
+ metadata = stripe_event.data.object.subscription_details.metadata
+
+ if "target" not in metadata:
+ log.warn("Skipping stripe event: %s (no target)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Target not found"},
+ )
+
+ target = metadata.get("target")
+
+ if target != STRIPE_TARGET:
+ log.warn(
+ "Skipping stripe event: %s (wrong target: %s)",
+ stripe_event.type,
+ target,
+ )
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Target mismatch"},
+ )
+
+ if "organization_id" not in metadata:
+ log.warn("Skipping stripe event: %s (no organization)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Organization ID not found"},
+ )
+
+ organization_id = metadata.get("organization_id")
+
+ log.info(
+ "Stripe event: %s | %s | %s",
+ organization_id,
+ stripe_event.type,
+ target,
+ )
+
+ try:
+ event = None
+ subscription_id = None
+ plan = None
+ anchor = None
+
+ if stripe_event.type == "customer.subscription.created":
+ event = Event.SUBSCRIPTION_CREATED
+
+ if "id" not in stripe_event.data.object:
+ log.warn(
+ "Skipping stripe event: %s (no subscription)",
+ stripe_event.type,
+ )
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={
+ "status": "error",
+ "message": "Subscription ID not found",
+ },
+ )
+
+ subscription_id = stripe_event.data.object.id
+
+ if "plan" not in metadata:
+ log.warn("Skipping stripe event: %s (no plan)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={
+ "status": "error",
+ "message": "Plan not found",
+ },
+ )
+
+ plan = Plan(metadata.get("plan"))
+
+ if "billing_cycle_anchor" not in stripe_event.data.object:
+ log.warn("Skipping stripe event: %s (no anchor)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={
+ "status": "error",
+ "message": "Anchor not found",
+ },
+ )
+
+ anchor = datetime.fromtimestamp(
+ stripe_event.data.object.billing_cycle_anchor
+ ).day
+
+ elif stripe_event.type == "invoice.payment_failed":
+ event = Event.SUBSCRIPTION_PAUSED
+
+ elif stripe_event.type == "invoice.payment_succeeded":
+ event = Event.SUBSCRIPTION_RESUMED
+
+ elif stripe_event.type == "customer.subscription.deleted":
+ event = Event.SUBSCRIPTION_CANCELLED
+
+ else:
+ log.warn("Skipping stripe event: %s (unsupported)", stripe_event.type)
+ return JSONResponse(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ content={"status": "error", "message": "Unsupported event"},
+ )
+
+ subscription = await self.subscription_service.process_event(
+ organization_id=organization_id,
+ event=event,
+ subscription_id=subscription_id,
+ plan=plan,
+ anchor=anchor,
+ )
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail="unexpected error") from e
+
+ if not subscription:
+ raise HTTPException(status_code=500, detail="unexpected error")
+
+ return JSONResponse(
+ status_code=status.HTTP_200_OK,
+ content={"status": "success"},
+ )
+
+ async def create_portal(
+ self,
+ organization_id: str,
+ ):
+ if not stripe.api_key:
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Missing Stripe API Key"},
+ )
+
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={"status": "error", "message": "Subscription not found"},
+ )
+
+ if not subscription.customer_id:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={
+ "status": "error",
+ "message": "Access denied: please subscribe to a plan to access the portal",
+ },
+ )
+
+ portal = stripe.billing_portal.Session.create(
+ customer=subscription.customer_id,
+ )
+
+ return {"portal_url": portal.url}
+
+ async def create_checkout(
+ self,
+ organization_id: str,
+ plan: Plan,
+ success_url: str,
+ ):
+ if not stripe.api_key:
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={"status": "error", "message": "Missing Stripe API Key"},
+ )
+
+ if plan.name not in Plan.__members__.keys():
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Invalid plan",
+ )
+
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={
+ "status": "error",
+ "message": "Subscription (Agenta) not found",
+ },
+ )
+
+ if subscription.subscription_id:
+ return JSONResponse(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ content={
+ "status": "error",
+ "message": "Subscription (Stripe) already exists",
+ },
+ )
+
+ if not subscription.customer_id:
+ organization = await get_organization_by_id(
+ organization_id=organization_id,
+ )
+
+ if not organization:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={
+ "status": "error",
+ "message": "Organization not found",
+ },
+ )
+
+ user = await get_user_with_id(
+ user_id=organization.owner,
+ )
+
+ if not user:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={"status": "error", "message": "Owner not found"},
+ )
+
+ customer = stripe.Customer.create(
+ name=organization.name,
+ email=user.email,
+ metadata={
+ "organization_id": organization_id,
+ "target": STRIPE_TARGET,
+ },
+ )
+
+ subscription.customer_id = customer.id
+
+ await self.subscription_service.update(
+ subscription=subscription,
+ )
+
+ checkout = stripe.checkout.Session.create(
+ mode="subscription",
+ payment_method_types=["card"],
+ allow_promotion_codes=True,
+ customer_update={"address": "auto", "name": "auto"},
+ billing_address_collection="required",
+ automatic_tax={"enabled": True},
+ tax_id_collection={"enabled": True},
+ #
+ customer=subscription.customer_id,
+ line_items=list(AGENTA_PRICING[plan].values()),
+ #
+ subscription_data={
+ # "billing_cycle_anchor": anchor,
+ "metadata": {
+ "organization_id": organization_id,
+ "plan": plan.value,
+ "target": STRIPE_TARGET,
+ },
+ },
+ #
+ ui_mode="hosted",
+ success_url=success_url,
+ )
+
+ return {"checkout_url": checkout.url}
+
+ async def fetch_plans(
+ self,
+ organization_id: str,
+ ):
+ plans = []
+
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription:
+ key = None
+ else:
+ key = subscription.plan.value
+
+ for plan in CATALOG:
+ if plan["type"] == "standard":
+ plans.append(plan)
+ elif plan["type"] == "custom" and plan["plan"] == key:
+ plans.append(plan)
+
+ return plans
+
+ async def switch_plans(
+ self,
+ organization_id: str,
+ plan: Plan,
+ # force: bool,
+ ):
+ if plan.name not in Plan.__members__.keys():
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Invalid plan",
+ )
+
+ try:
+ subscription = await self.subscription_service.process_event(
+ organization_id=organization_id,
+ event=Event.SUBSCRIPTION_SWITCHED,
+ plan=plan.value,
+ # force=force,
+ )
+
+ if not subscription:
+ raise HTTPException(status_code=500, detail="unexpected error")
+
+ except EventException as e:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=str(e),
+ ) from e
+
+ except SwitchException as e:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=str(e),
+ ) from e
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ detail="unexpected error",
+ ) from e
+
+ return JSONResponse(
+ status_code=status.HTTP_200_OK,
+ content={"status": "success"},
+ )
+
+ async def fetch_subscription(
+ self,
+ organization_id: str,
+ ):
+ now = datetime.now(timezone.utc)
+
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription or not subscription.plan:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={
+ "status": "error",
+ "message": "Subscription (Agenta) not found",
+ },
+ )
+
+ plan = subscription.plan
+ anchor = subscription.anchor
+
+ _status: Dict[str, Any] = dict(
+ plan=plan.value,
+ type="standard",
+ )
+
+ if plan == Plan.CLOUD_V0_HOBBY:
+ return _status
+
+ if not subscription.subscription_id:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={
+ "status": "error",
+ "message": "Subscription (Agenta) not found",
+ },
+ )
+
+ if not stripe.api_key:
+ return JSONResponse(
+ status_code=status.HTTP_403_FORBIDDEN,
+ content={
+ "status": "error",
+ "message": "Missing Stripe API Key",
+ },
+ )
+
+ try:
+ _subscription = stripe.Subscription.retrieve(
+ id=subscription.subscription_id,
+ )
+ except Exception:
+ _subscription = None
+
+ if _subscription:
+ _status["period_start"] = int(_subscription.current_period_start)
+ _status["period_end"] = int(_subscription.current_period_end)
+ _status["free_trial"] = _subscription.status == "trialing"
+
+ return _status
+
+ if not anchor or anchor < 1 or anchor > 31:
+ anchor = now.day
+
+ last_day_this_month = (
+ datetime(
+ now.year,
+ now.month,
+ 1,
+ tzinfo=timezone.utc,
+ )
+ + relativedelta(
+ months=+1,
+ days=-1,
+ )
+ ).day
+
+ day_this_month = min(anchor, last_day_this_month)
+
+ if now.day < anchor:
+ prev_month = now + relativedelta(
+ months=-1,
+ )
+
+ last_day_prev_month = (
+ datetime(
+ prev_month.year,
+ prev_month.month,
+ 1,
+ tzinfo=timezone.utc,
+ )
+ + relativedelta(
+ months=+1,
+ days=-1,
+ )
+ ).day
+
+ day_prev_month = min(anchor, last_day_prev_month)
+
+ period_start = datetime(
+ year=prev_month.year,
+ month=prev_month.month,
+ day=day_prev_month,
+ tzinfo=timezone.utc,
+ )
+ period_end = datetime(
+ year=now.year,
+ month=now.month,
+ day=day_this_month,
+ tzinfo=timezone.utc,
+ )
+ else:
+ period_start = datetime(
+ year=now.year,
+ month=now.month,
+ day=day_this_month,
+ tzinfo=timezone.utc,
+ )
+
+ next_month = now + relativedelta(
+ months=+1,
+ )
+
+ last_day_next_month = (
+ datetime(
+ next_month.year,
+ next_month.month,
+ 1,
+ tzinfo=timezone.utc,
+ )
+ + relativedelta(
+ months=+1,
+ days=-1,
+ )
+ ).day
+
+ day_next_month = min(anchor, last_day_next_month)
+
+ period_end = datetime(
+ year=next_month.year,
+ month=next_month.month,
+ day=day_next_month,
+ tzinfo=timezone.utc,
+ )
+
+ _status["period_start"] = int(period_start.timestamp())
+ _status["period_end"] = int(period_end.timestamp())
+ _status["free_trial"] = False
+ _status["type"] = "custom"
+
+ return _status
+
+ async def cancel_subscription(
+ self,
+ organization_id: str,
+ ):
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Subscription (Agenta) not found",
+ )
+
+ if not subscription.subscription_id:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Subscription (Stripe) not found",
+ )
+
+ try:
+ stripe.Subscription.cancel(subscription.subscription_id)
+ except Exception as e:
+ raise HTTPException(
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ detail="Could not cancel subscription. Please try again or contact support.",
+ ) from e
+
+ return JSONResponse(
+ status_code=status.HTTP_200_OK,
+ content={"status": "success"},
+ )
+
+ async def fetch_usage(
+ self,
+ organization_id: str,
+ ):
+ now = datetime.now(timezone.utc)
+
+ subscription = await self.subscription_service.read(
+ organization_id=organization_id,
+ )
+
+ if not subscription:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={"status": "error", "message": "Subscription not found"},
+ )
+
+ plan = subscription.plan
+ anchor_day = subscription.anchor
+ anchor_month = (now.month + (1 if now.day >= anchor_day else 0)) % 12
+
+ entitlements = ENTITLEMENTS.get(plan)
+
+ if not entitlements:
+ return JSONResponse(
+ status_code=status.HTTP_404_NOT_FOUND,
+ content={"status": "error", "message": "Plan not found"},
+ )
+
+ meters = await self.subscription_service.meters_service.fetch(
+ organization_id=organization_id,
+ )
+
+ usage = {}
+
+ for tracker in [Tracker.COUNTERS, Tracker.GAUGES]:
+ for key in list(entitlements[tracker].keys()):
+ quota: Quota = entitlements[tracker][key]
+ value = 0
+
+ for meter in meters:
+ if meter.key == key:
+ if meter.month != 0 and meter.month != anchor_month:
+ continue
+
+ value = meter.value
+
+ usage[key] = {
+ "value": value,
+ "limit": quota.limit,
+ "free": quota.free,
+ "monthly": quota.monthly is True,
+ "strict": quota.strict is True,
+ }
+
+ return usage
+
+ @intercept_exceptions()
+ async def report_usage(
+ self,
+ ):
+ try:
+ await self.subscription_service.meters_service.report()
+ except Exception as e:
+ raise HTTPException(status_code=500, detail="unexpected error") from e
+
+ return JSONResponse(
+ status_code=status.HTTP_200_OK,
+ content={"status": "success"},
+ )
+
+ # ROUTES
+
+ @intercept_exceptions()
+ async def create_portal_user_route(
+ self,
+ request: Request,
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.EDIT_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.create_portal(
+ organization_id=request.state.organization_id,
+ )
+
+ @intercept_exceptions()
+ async def create_portal_admin_route(
+ self,
+ organization_id: str = Query(...),
+ ):
+ return await self.create_portal(
+ organization_id=organization_id,
+ )
+
+ @intercept_exceptions()
+ async def create_checkout_user_route(
+ self,
+ request: Request,
+ plan: Plan = Query(...),
+ success_url: str = Query(...), # find a way to make this optional or moot
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.EDIT_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.create_checkout(
+ organization_id=request.state.organization_id,
+ plan=plan,
+ success_url=success_url,
+ )
+
+ @intercept_exceptions()
+ async def create_checkout_admin_route(
+ self,
+ organization_id: str = Query(...),
+ plan: Plan = Query(...),
+ success_url: str = Query(...), # find a way to make this optional or moot
+ ):
+ return await self.create_checkout(
+ organization_id=organization_id,
+ plan=plan,
+ success_url=success_url,
+ )
+
+ @intercept_exceptions()
+ async def fetch_plan_user_route(
+ self,
+ request: Request,
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.fetch_plans(
+ organization_id=request.state.organization_id,
+ )
+
+ @intercept_exceptions()
+ async def switch_plans_user_route(
+ self,
+ request: Request,
+ plan: Plan = Query(...),
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.EDIT_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.switch_plans(
+ organization_id=request.state.organization_id,
+ plan=plan,
+ )
+
+ @intercept_exceptions()
+ async def switch_plans_admin_route(
+ self,
+ organization_id: str = Query(...),
+ plan: Plan = Query(...),
+ ):
+ return await self.switch_plans(
+ organization_id=organization_id,
+ plan=plan,
+ )
+
+ @intercept_exceptions()
+ async def fetch_subscription_user_route(
+ self,
+ request: Request,
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.fetch_subscription(
+ organization_id=request.state.organization_id,
+ )
+
+ @intercept_exceptions()
+ async def cancel_subscription_user_route(
+ self,
+ request: Request,
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.EDIT_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.cancel_subscription(
+ organization_id=request.state.organization_id,
+ )
+
+ @intercept_exceptions()
+ async def cancel_subscription_admin_route(
+ self,
+ organization_id: str = Query(...),
+ ):
+ return await self.cancel_subscription(
+ organization_id=organization_id,
+ )
+
+ @intercept_exceptions()
+ async def fetch_usage_user_route(
+ self,
+ request: Request,
+ ):
+ if not await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_BILLING,
+ ):
+ return FORBIDDEN_RESPONSE
+
+ return await self.fetch_usage(
+ organization_id=request.state.organization_id,
+ )
diff --git a/api/ee/src/core/__init__.py b/api/ee/src/core/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/core/entitlements/__init__.py b/api/ee/src/core/entitlements/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/core/entitlements/service.py b/api/ee/src/core/entitlements/service.py
new file mode 100644
index 0000000000..f62b11fc74
--- /dev/null
+++ b/api/ee/src/core/entitlements/service.py
@@ -0,0 +1,97 @@
+from typing import Optional, Dict, List
+
+from ee.src.core.entitlements.types import (
+ Tracker,
+ Constraint,
+ ENTITLEMENTS,
+ CONSTRAINTS,
+)
+from ee.src.core.entitlements.types import Quota, Gauge
+from ee.src.core.subscriptions.types import Plan
+from ee.src.core.meters.service import MetersService
+from ee.src.core.meters.types import MeterDTO
+
+
+class ConstaintsException(Exception):
+ issues: Dict[Gauge, int] = {}
+
+
+class EntitlementsService:
+ def __init__(
+ self,
+ meters_service: MetersService,
+ ):
+ self.meters_service = meters_service
+
+ async def enforce(
+ self,
+ *,
+ organization_id: str,
+ plan: str,
+ force: Optional[bool] = False,
+ ) -> None:
+ issues = await self.check(
+ organization_id=organization_id,
+ plan=plan,
+ )
+
+ if issues:
+ if not force:
+ raise ConstaintsException(
+ issues=issues,
+ )
+
+ await self.fix(
+ organization_id=organization_id,
+ issues=issues,
+ )
+
+ async def check(
+ self,
+ *,
+ organization_id: str,
+ plan: Plan,
+ ) -> Dict[Gauge, int]:
+ issues = {}
+
+ for key in CONSTRAINTS[Constraint.BLOCKED][Tracker.GAUGES]:
+ quotas: List[Quota] = ENTITLEMENTS[plan][Tracker.GAUGES]
+
+ if key in quotas:
+ meter = MeterDTO(
+ organization_id=organization_id,
+ key=key,
+ )
+ quota: Quota = quotas[key]
+
+ check, meter = await self.meters_service.check(
+ meter=meter,
+ quota=quota,
+ )
+
+ if not check:
+ issues[key] = quota.limit
+
+ return issues
+
+ async def fix(
+ self,
+ *,
+ organization_id: str,
+ issues: Dict[Gauge, int],
+ ) -> None:
+ # TODO: Implement fix
+ pass
+
+
+# TODO:
+# -- P0 / MUST
+# - Add active : Optional[bool] = None to all scopes and users
+# -- P1 / SHOULD
+# - Add parent scopes to all child scope
+# - Add parent scopes membership on child scope membership creation
+# - Remove children scopes membership on parent scope membership removal
+# -- P2 / COULD
+# - Add created_at / updated_at to all scopes
+# - Set updated_at on all updates + on creation
+# - Move organization roles to memberships
diff --git a/api/ee/src/core/entitlements/types.py b/api/ee/src/core/entitlements/types.py
new file mode 100644
index 0000000000..791ddfd024
--- /dev/null
+++ b/api/ee/src/core/entitlements/types.py
@@ -0,0 +1,277 @@
+from typing import Optional
+from enum import Enum
+from pydantic import BaseModel
+
+from ee.src.core.subscriptions.types import Plan
+
+
+class Tracker(str, Enum):
+ FLAGS = "flags"
+ COUNTERS = "counters"
+ GAUGES = "gauges"
+
+
+class Flag(str, Enum):
+ # HISTORY = "history"
+ HOOKS = "hooks"
+ RBAC = "rbac"
+
+
+class Counter(str, Enum):
+ TRACES = "traces"
+ EVALUATIONS = "evaluations"
+ EVALUATORS = "evaluators"
+ ANNOTATIONS = "annotations"
+
+
+class Gauge(str, Enum):
+ USERS = "users"
+ APPLICATIONS = "applications"
+
+
+class Constraint(str, Enum):
+ BLOCKED = "blocked"
+ READ_ONLY = "read_only"
+
+
+class Quota(BaseModel):
+ free: Optional[int] = None
+ limit: Optional[int] = None
+ monthly: Optional[bool] = None
+ strict: Optional[bool] = False
+
+
+class Probe(BaseModel):
+ monthly: Optional[bool] = False
+ delta: Optional[bool] = False
+
+
+CATALOG = [
+ {
+ "title": "Hobby",
+ "description": "Great for hobby projects and POCs.",
+ "type": "standard",
+ "plan": Plan.CLOUD_V0_HOBBY.value,
+ "price": {
+ "base": {
+ "type": "flat",
+ "currency": "USD",
+ "amount": 0.00,
+ },
+ },
+ "features": [
+ "2 prompts",
+ "5k traces/month",
+ "20 evaluations/month",
+ "2 seats",
+ ],
+ },
+ {
+ "title": "Pro",
+ "description": "For production projects.",
+ "type": "standard",
+ "plan": Plan.CLOUD_V0_PRO.value,
+ "price": {
+ "base": {
+ "type": "flat",
+ "currency": "USD",
+ "amount": 49.00,
+ },
+ "users": {
+ "type": "tiered",
+ "currency": "USD",
+ "tiers": [
+ {
+ "limit": 3,
+ "amount": 0.00,
+ },
+ {
+ "limit": 10,
+ "amount": 20.00,
+ "rate": 1,
+ },
+ ],
+ },
+ "traces": {
+ "type": "tiered",
+ "currency": "USD",
+ "tiers": [
+ {
+ "limit": 10_000,
+ "amount": 0.00,
+ },
+ {
+ "amount": 5.00,
+ "rate": 10_000,
+ },
+ ],
+ },
+ },
+ "features": [
+ "Unlimited prompts",
+ "10k traces/month",
+ "Unlimited evaluations",
+ "3 seats included",
+ "Up to 10 seats",
+ ],
+ },
+ # {
+ # "title": "Business",
+ # "description": "For scale, security, and support.",
+ # "type": "standard",
+ # "price": {
+ # "base": {
+ # "type": "flat",
+ # "currency": "USD",
+ # "amount": 399.00,
+ # "starting_at": True,
+ # },
+ # },
+ # "features": [
+ # "Unlimited prompts",
+ # "Unlimited traces",
+ # "Unlimited evaluations",
+ # "Unlimited seats",
+ # ],
+ # },
+ {
+ "title": "Enterprise",
+ "description": "For large organizations or custom needs.",
+ "type": "standard",
+ "features": [
+ "Everything in Pro",
+ "Unlimited seats",
+ "SOC 2 reports",
+ "Security reviews",
+ "Dedicated support",
+ "Custom SLAs",
+ "Custom terms",
+ "Self-hosted deployment options",
+ ],
+ },
+ {
+ "title": "Humanity Labs",
+ "description": "For Humanity Labs.",
+ "plan": Plan.CLOUD_V0_HUMANITY_LABS.value,
+ "type": "custom",
+ "features": [
+ "Everything in Enterprise",
+ ],
+ },
+ {
+ "title": "X Labs",
+ "description": "For X Labs.",
+ "plan": Plan.CLOUD_V0_X_LABS.value,
+ "type": "custom",
+ "features": [
+ "Everything in Enterprise",
+ ],
+ },
+ {
+ "title": "Agenta",
+ "description": "For Agenta.",
+ "plan": Plan.CLOUD_V0_AGENTA_AI.value,
+ "type": "custom",
+ "features": [
+ "Everything in Enterprise",
+ ],
+ },
+]
+
+ENTITLEMENTS = {
+ Plan.CLOUD_V0_HOBBY: {
+ Tracker.FLAGS: {
+ Flag.HOOKS: False,
+ Flag.RBAC: False,
+ },
+ Tracker.COUNTERS: {
+ Counter.TRACES: Quota(limit=5_000, monthly=True, free=5_000),
+ Counter.EVALUATIONS: Quota(limit=20, monthly=True, free=20, strict=True),
+ },
+ Tracker.GAUGES: {
+ Gauge.USERS: Quota(limit=2, strict=True, free=2),
+ Gauge.APPLICATIONS: Quota(limit=2, strict=True, free=2),
+ },
+ },
+ Plan.CLOUD_V0_PRO: {
+ Tracker.FLAGS: {
+ Flag.HOOKS: True,
+ Flag.RBAC: False,
+ },
+ Tracker.COUNTERS: {
+ Counter.TRACES: Quota(monthly=True, free=10_000),
+ Counter.EVALUATIONS: Quota(monthly=True, strict=True),
+ },
+ Tracker.GAUGES: {
+ Gauge.USERS: Quota(limit=10, strict=True, free=3),
+ Gauge.APPLICATIONS: Quota(strict=True),
+ },
+ },
+ Plan.CLOUD_V0_HUMANITY_LABS: {
+ Tracker.FLAGS: {
+ Flag.HOOKS: True,
+ Flag.RBAC: True,
+ },
+ Tracker.COUNTERS: {
+ Counter.TRACES: Quota(monthly=True),
+ Counter.EVALUATIONS: Quota(monthly=True, strict=True),
+ },
+ Tracker.GAUGES: {
+ Gauge.USERS: Quota(strict=True),
+ Gauge.APPLICATIONS: Quota(strict=True),
+ },
+ },
+ Plan.CLOUD_V0_X_LABS: {
+ Tracker.FLAGS: {
+ Flag.HOOKS: False,
+ Flag.RBAC: False,
+ },
+ Tracker.COUNTERS: {
+ Counter.TRACES: Quota(monthly=True),
+ Counter.EVALUATIONS: Quota(monthly=True, strict=True),
+ },
+ Tracker.GAUGES: {
+ Gauge.USERS: Quota(strict=True),
+ Gauge.APPLICATIONS: Quota(strict=True),
+ },
+ },
+ Plan.CLOUD_V0_AGENTA_AI: {
+ Tracker.FLAGS: {
+ Flag.HOOKS: True,
+ Flag.RBAC: True,
+ },
+ Tracker.COUNTERS: {
+ Counter.TRACES: Quota(monthly=True),
+ Counter.EVALUATIONS: Quota(monthly=True, strict=True),
+ },
+ Tracker.GAUGES: {
+ Gauge.USERS: Quota(strict=True),
+ Gauge.APPLICATIONS: Quota(strict=True),
+ },
+ },
+}
+
+
+REPORTS = [
+ Counter.TRACES.value,
+ Gauge.USERS.value,
+]
+
+CONSTRAINTS = {
+ Constraint.BLOCKED: {
+ Tracker.FLAGS: [
+ Flag.HOOKS,
+ Flag.RBAC,
+ ],
+ Tracker.GAUGES: [
+ Gauge.USERS,
+ Gauge.APPLICATIONS,
+ ],
+ },
+ Constraint.READ_ONLY: {
+ Tracker.COUNTERS: [
+ Counter.TRACES,
+ Counter.EVALUATIONS,
+ ],
+ },
+}
diff --git a/api/ee/src/core/meters/__init__.py b/api/ee/src/core/meters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/core/meters/interfaces.py b/api/ee/src/core/meters/interfaces.py
new file mode 100644
index 0000000000..9f4e66605d
--- /dev/null
+++ b/api/ee/src/core/meters/interfaces.py
@@ -0,0 +1,88 @@
+from typing import Tuple, Callable, Optional
+from datetime import datetime
+
+from ee.src.core.entitlements.types import Quota
+from ee.src.core.meters.types import MeterDTO
+
+
+class MetersDAOInterface:
+ def __init__(self):
+ raise NotImplementedError
+
+ async def dump(
+ self,
+ ) -> list[MeterDTO]:
+ """
+ Dump all meters where 'synced' != 'value'.
+
+ :return: A list of MeterDTO objects for meters where 'synced' != 'value'.
+ """
+ raise NotImplementedError
+
+ async def bump(
+ self,
+ meters: list[MeterDTO],
+ ) -> None:
+ """
+ Update the 'synced' field for the given list of meters.
+
+ :param meters: A list of MeterDTO objects containing the details of meters to update.
+ """
+ raise NotImplementedError
+
+ async def fetch(
+ self,
+ *,
+ organization_id: str,
+ ) -> list[MeterDTO]:
+ """
+ Fetch all meters for a given organization.
+
+ Parameters:
+ - organization_id: The ID of the organization to fetch meters for.
+
+ Returns:
+ - List[MeterDTO]: A list of MeterDTO objects containing the meter details.
+ """
+ raise NotImplementedError
+
+ async def check(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO]:
+ """
+ Check if the meter adjustment or absolute value is allowed.
+
+ Parameters:
+ - meter: MeterDTO containing the current meter information and either `value` or `delta`.
+ - quota: QuotaDTO defining the allowed quota limits.
+
+ Returns:
+ - allowed (bool): Whether the operation is within the allowed limits.
+ - meter (MeterDTO): The current meter value if found or 0 if not.
+ """
+ raise NotImplementedError
+
+ async def adjust(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO, Callable]:
+ """
+ Adjust the meter value based on the quota.
+
+ Parameters:
+ - meter: MeterDTO containing either `value` or `delta` for the adjustment.
+ - quota: QuotaDTO defining the allowed quota limits.
+
+ Returns:
+ - allowed (bool): Whether the adjustment was within quota limits.
+ - meter (MeterDTO): The updated meter value after the adjustment.
+ - rollback (callable): A function to rollback the adjustment (optional, if applicable).
+ """
+ raise NotImplementedError
diff --git a/api/ee/src/core/meters/service.py b/api/ee/src/core/meters/service.py
new file mode 100644
index 0000000000..ed2ef0fa33
--- /dev/null
+++ b/api/ee/src/core/meters/service.py
@@ -0,0 +1,173 @@
+from typing import Tuple, Callable, List, Optional
+from datetime import datetime
+from os import environ
+from json import loads
+
+import stripe
+
+from oss.src.utils.logging import get_module_logger
+
+from ee.src.core.entitlements.types import Quota
+from ee.src.core.entitlements.types import Counter, Gauge, REPORTS
+from ee.src.core.meters.types import MeterDTO
+from ee.src.core.meters.interfaces import MetersDAOInterface
+
+AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}")
+
+log = get_module_logger(__name__)
+
+stripe.api_key = environ.get("STRIPE_API_KEY")
+
+
+class MetersService:
+ def __init__(
+ self,
+ meters_dao: MetersDAOInterface,
+ ):
+ self.meters_dao = meters_dao
+
+ async def dump(
+ self,
+ ) -> List[MeterDTO]:
+ return await self.meters_dao.dump()
+
+ async def bump(
+ self,
+ *,
+ meters: List[MeterDTO],
+ ) -> None:
+ await self.meters_dao.bump(meters=meters)
+
+ async def fetch(
+ self,
+ *,
+ organization_id: str,
+ ) -> List[MeterDTO]:
+ return await self.meters_dao.fetch(organization_id=organization_id)
+
+ async def check(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO]:
+ return await self.meters_dao.check(meter=meter, quota=quota, anchor=anchor)
+
+ async def adjust(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO, Callable]:
+ return await self.meters_dao.adjust(meter=meter, quota=quota, anchor=anchor)
+
+ async def report(self):
+ if not stripe.api_key:
+ log.warn("Missing Stripe API Key.")
+ return
+
+ try:
+ meters = await self.dump()
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error dumping meters: %s", e)
+ return
+
+ try:
+ for meter in meters:
+ if meter.subscription is None:
+ continue
+
+ try:
+ if meter.key.value in REPORTS:
+ subscription_id = meter.subscription.subscription_id
+ customer_id = meter.subscription.customer_id
+
+ if not subscription_id:
+ continue
+
+ if not customer_id:
+ continue
+
+ if meter.key.name in Gauge.__members__.keys():
+ try:
+ price_id = (
+ AGENTA_PRICING.get(meter.subscription.plan, {})
+ .get("users", {})
+ .get("price")
+ )
+
+ if not price_id:
+ continue
+
+ _id = None
+ for item in stripe.SubscriptionItem.list(
+ subscription=subscription_id,
+ ).auto_paging_iter():
+ if item.price.id == price_id:
+ _id = item.id
+ break
+
+ if not _id:
+ continue
+
+ quantity = meter.value
+
+ items = [{"id": _id, "quantity": quantity}]
+
+ stripe.Subscription.modify(
+ subscription_id,
+ items=items,
+ )
+
+ except (
+ Exception # pylint: disable=broad-exception-caught
+ ) as e:
+ log.error("Error modifying subscription: %s", e)
+ continue
+
+ log.info(
+ f"[stripe] updating: {meter.organization_id} | | {'sync ' if meter.key.value in REPORTS else ' '} | {meter.key}: {meter.value}"
+ )
+
+ if meter.key.name in Counter.__members__.keys():
+ try:
+ event_name = meter.key.value
+ delta = meter.value - meter.synced
+ payload = {"delta": delta, "customer_id": customer_id}
+
+ stripe.billing.MeterEvent.create(
+ event_name=event_name,
+ payload=payload,
+ )
+ except (
+ Exception # pylint: disable=broad-exception-caught
+ ) as e:
+ log.error("Error creating meter event: %s", e)
+ continue
+
+ log.info(
+ f"[stripe] reporting: {meter.organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'sync ' if meter.key.value in REPORTS else ' '} | {meter.key}: {meter.value - meter.synced}"
+ )
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error reporting meter: %s", e)
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error reporting meters: %s", e)
+
+ try:
+ for meter in meters:
+ meter.synced = meter.value
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error syncing meters: %s", e)
+
+ try:
+ await self.bump(meters=meters)
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error("Error bumping meters: %s", e)
+ return
diff --git a/api/ee/src/core/meters/types.py b/api/ee/src/core/meters/types.py
new file mode 100644
index 0000000000..a0ada9da16
--- /dev/null
+++ b/api/ee/src/core/meters/types.py
@@ -0,0 +1,32 @@
+from typing import Optional
+
+from uuid import UUID
+from enum import Enum
+
+from pydantic import BaseModel
+
+from ee.src.core.entitlements.types import Counter, Gauge
+from ee.src.core.subscriptions.types import SubscriptionDTO
+
+
+class Meters(str, Enum):
+ # COUNTERS
+ TRACES = Counter.TRACES.value
+ EVALUATIONS = Counter.EVALUATIONS.value
+ # GAUGES
+ USERS = Gauge.USERS.value
+ APPLICATIONS = Gauge.APPLICATIONS.value
+
+
+class MeterDTO(BaseModel):
+ organization_id: UUID
+
+ year: Optional[int] = 0
+ month: Optional[int] = 0
+
+ key: Meters
+ value: Optional[int] = None
+ synced: Optional[int] = None
+ delta: Optional[int] = None
+
+ subscription: Optional[SubscriptionDTO] = None
diff --git a/api/ee/src/core/subscriptions/__init__.py b/api/ee/src/core/subscriptions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/core/subscriptions/interfaces.py b/api/ee/src/core/subscriptions/interfaces.py
new file mode 100644
index 0000000000..2c47a9a302
--- /dev/null
+++ b/api/ee/src/core/subscriptions/interfaces.py
@@ -0,0 +1,56 @@
+from typing import Optional
+
+from ee.src.core.subscriptions.types import SubscriptionDTO
+
+
+class SubscriptionsDAOInterface:
+ def __init__(self):
+ raise NotImplementedError
+
+ async def create(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> SubscriptionDTO:
+ """
+ Create a new subscription.
+
+ Parameters:
+ - subscription: SubscriptionDTO containing subscription details.
+
+ Returns:
+ - SubscriptionDTO: The created subscription.
+ """
+ raise NotImplementedError
+
+ async def read(
+ self,
+ *,
+ organization_id: str,
+ ) -> Optional[SubscriptionDTO]:
+ """
+ Read a subscription by organization ID.
+
+ Parameters:
+ - organization_id: The ID of the organization to fetch.
+
+ Returns:
+ - Optional[SubscriptionDTO]: The subscription if found, else None.
+ """
+ raise NotImplementedError
+
+ async def update(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> Optional[SubscriptionDTO]:
+ """
+ Update an existing subscription.
+
+ Parameters:
+ - subscription: SubscriptionDTO containing updated details.
+
+ Returns:
+ - Optional[SubscriptionDTO]: The updated subscription if found, else None.
+ """
+ raise NotImplementedError
diff --git a/api/ee/src/core/subscriptions/service.py b/api/ee/src/core/subscriptions/service.py
new file mode 100644
index 0000000000..f69adcbd74
--- /dev/null
+++ b/api/ee/src/core/subscriptions/service.py
@@ -0,0 +1,271 @@
+from typing import Optional
+from json import loads
+from uuid import getnode
+from datetime import datetime, timezone, timedelta
+
+from os import environ
+
+import stripe
+
+from oss.src.utils.logging import get_module_logger
+
+from ee.src.core.subscriptions.types import (
+ SubscriptionDTO,
+ Event,
+ Plan,
+ FREE_PLAN,
+ REVERSE_TRIAL_PLAN,
+ REVERSE_TRIAL_DAYS,
+)
+from ee.src.core.subscriptions.interfaces import SubscriptionsDAOInterface
+from ee.src.core.entitlements.service import EntitlementsService
+from ee.src.core.meters.service import MetersService
+
+log = get_module_logger(__name__)
+
+stripe.api_key = environ.get("STRIPE_SECRET_KEY")
+
+MAC_ADDRESS = ":".join(f"{(getnode() >> ele) & 0xff:02x}" for ele in range(40, -1, -8))
+STRIPE_TARGET = environ.get("STRIPE_TARGET") or MAC_ADDRESS
+AGENTA_PRICING = loads(environ.get("AGENTA_PRICING") or "{}")
+
+
+class SwitchException(Exception):
+ pass
+
+
+class EventException(Exception):
+ pass
+
+
+class SubscriptionsService:
+ def __init__(
+ self,
+ subscriptions_dao: SubscriptionsDAOInterface,
+ meters_service: MetersService,
+ ):
+ self.subscriptions_dao = subscriptions_dao
+ self.meters_service = meters_service
+ self.entitlements_service = EntitlementsService(meters_service=meters_service)
+
+ async def create(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> Optional[SubscriptionDTO]:
+ return await self.subscriptions_dao.create(subscription=subscription)
+
+ async def read(
+ self,
+ *,
+ organization_id: str,
+ ) -> Optional[SubscriptionDTO]:
+ return await self.subscriptions_dao.read(organization_id=organization_id)
+
+ async def update(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> Optional[SubscriptionDTO]:
+ return await self.subscriptions_dao.update(subscription=subscription)
+
+ async def start_reverse_trial(
+ self,
+ *,
+ organization_id: str,
+ organization_name: str,
+ organization_email: str,
+ ) -> Optional[SubscriptionDTO]:
+ now = datetime.now(tz=timezone.utc)
+ anchor = now + timedelta(days=REVERSE_TRIAL_DAYS)
+
+ subscription = await self.read(organization_id=organization_id)
+
+ if subscription:
+ return None
+
+ subscription = await self.create(
+ subscription=SubscriptionDTO(
+ organization_id=organization_id,
+ plan=FREE_PLAN,
+ active=True,
+ anchor=anchor.day,
+ )
+ )
+
+ if not subscription:
+ return None
+
+ if not stripe.api_key:
+ log.warn("Missing Stripe API Key.")
+ return None
+
+ customer = stripe.Customer.create(
+ name=organization_name,
+ email=organization_email,
+ metadata={
+ "organization_id": organization_id,
+ "target": STRIPE_TARGET,
+ },
+ )
+
+ customer_id = customer.id
+
+ if not customer_id:
+ log.error(
+ "Failed to create Stripe customer for organization ID: %s",
+ organization_id,
+ )
+
+ return None
+
+ stripe_subscription = stripe.Subscription.create(
+ customer=customer_id,
+ items=list(AGENTA_PRICING[REVERSE_TRIAL_PLAN].values()),
+ #
+ # automatic_tax={"enabled": True},
+ metadata={
+ "organization_id": organization_id,
+ "plan": REVERSE_TRIAL_PLAN.value,
+ "target": STRIPE_TARGET,
+ },
+ #
+ trial_period_days=REVERSE_TRIAL_DAYS,
+ trial_settings={"end_behavior": {"missing_payment_method": "cancel"}},
+ )
+
+ subscription = await self.update(
+ subscription=SubscriptionDTO(
+ organization_id=organization_id,
+ customer_id=customer_id,
+ subscription_id=stripe_subscription.id,
+ plan=REVERSE_TRIAL_PLAN,
+ active=True,
+ anchor=anchor.day,
+ )
+ )
+
+ return subscription
+
+ async def process_event(
+ self,
+ *,
+ organization_id: str,
+ event: Event,
+ subscription_id: Optional[str] = None,
+ plan: Optional[Plan] = None,
+ anchor: Optional[Plan] = None,
+ # force: Optional[bool] = True,
+ **kwargs,
+ ) -> SubscriptionDTO:
+ log.info(
+ "Billing event: %s | %s | %s",
+ organization_id,
+ event,
+ plan,
+ )
+
+ now = datetime.now(tz=timezone.utc)
+
+ if not anchor:
+ anchor = now.day
+
+ subscription = await self.read(organization_id=organization_id)
+
+ if not subscription:
+ raise EventException(
+ "Subscription not found for organization ID: {organization_id}"
+ )
+
+ if event == Event.SUBSCRIPTION_CREATED:
+ subscription.active = True
+ subscription.plan = plan
+ subscription.subscription_id = subscription_id
+ subscription.anchor = anchor
+
+ subscription = await self.update(subscription=subscription)
+
+ elif subscription.plan != FREE_PLAN and event == Event.SUBSCRIPTION_PAUSED:
+ subscription.active = False
+
+ subscription = await self.update(subscription=subscription)
+
+ elif subscription.plan != FREE_PLAN and event == Event.SUBSCRIPTION_RESUMED:
+ subscription.active = True
+
+ subscription = await self.update(subscription=subscription)
+
+ elif subscription.plan != FREE_PLAN and event == Event.SUBSCRIPTION_SWITCHED:
+ if not stripe.api_key:
+ log.warn("Missing Stripe API Key.")
+ return None
+
+ if subscription.plan == plan:
+ log.warn("Subscription already on the plan: %s", plan)
+
+ raise EventException(
+ f"Same plan [{plan}] already exists for organization ID: {organization_id}"
+ )
+
+ if not subscription.subscription_id:
+ raise SwitchException(
+ f"Cannot switch plans without an existing subscription for organization ID: {organization_id}"
+ )
+
+ try:
+ _subscription = stripe.Subscription.retrieve(
+ id=subscription.subscription_id,
+ )
+ except Exception as e: # pylint: disable=too-broad-exception
+ log.warn(
+ "Failed to retrieve subscription from Stripe: %s", subscription
+ )
+
+ raise EventException(
+ "Could not switch plans. Please try again or contact support.",
+ ) from e
+
+ subscription.active = True
+ subscription.plan = plan
+
+ # await self.entitlements_service.enforce(
+ # organization_id=organization_id,
+ # plan=plan,
+ # force=force,
+ # )
+
+ stripe.Subscription.modify(
+ subscription.subscription_id,
+ items=[
+ {"id": item.id, "deleted": True}
+ for item in stripe.SubscriptionItem.list(
+ subscription=subscription.subscription_id,
+ ).data
+ ]
+ + list(AGENTA_PRICING[plan].values()),
+ )
+
+ subscription = await self.update(subscription=subscription)
+
+ elif subscription.plan != FREE_PLAN and event == Event.SUBSCRIPTION_CANCELLED:
+ subscription.active = True
+ subscription.plan = FREE_PLAN
+ subscription.subscription_id = None
+ subscription.anchor = anchor
+
+ # await self.entitlements_service.enforce(
+ # organization_id=organization_id,
+ # plan=FREE_PLAN,
+ # force=True,
+ # )
+
+ subscription = await self.update(subscription=subscription)
+
+ else:
+ log.warn("Invalid subscription event: %s ", subscription)
+
+ raise EventException(
+ f"Invalid subscription event {event} for organization ID: {organization_id}"
+ )
+
+ return subscription
diff --git a/api/ee/src/core/subscriptions/types.py b/api/ee/src/core/subscriptions/types.py
new file mode 100644
index 0000000000..1f55dbe386
--- /dev/null
+++ b/api/ee/src/core/subscriptions/types.py
@@ -0,0 +1,40 @@
+from typing import Optional
+
+from os import environ
+
+from uuid import UUID
+from enum import Enum
+
+from pydantic import BaseModel
+
+
+class Plan(str, Enum):
+ CLOUD_V0_HOBBY = "cloud_v0_hobby"
+ CLOUD_V0_PRO = "cloud_v0_pro"
+ #
+ CLOUD_V0_HUMANITY_LABS = "cloud_v0_humanity_labs"
+ CLOUD_V0_X_LABS = "cloud_v0_x_labs"
+ #
+ CLOUD_V0_AGENTA_AI = "cloud_v0_agenta_ai"
+
+
+class Event(str, Enum):
+ SUBSCRIPTION_CREATED = "subscription_created"
+ SUBSCRIPTION_PAUSED = "subscription_paused"
+ SUBSCRIPTION_RESUMED = "subscription_resumed"
+ SUBSCRIPTION_SWITCHED = "subscription_switched"
+ SUBSCRIPTION_CANCELLED = "subscription_cancelled"
+
+
+class SubscriptionDTO(BaseModel):
+ organization_id: UUID
+ customer_id: Optional[str] = None
+ subscription_id: Optional[str] = None
+ plan: Optional[Plan] = None
+ active: Optional[bool] = None
+ anchor: Optional[int] = None
+
+
+FREE_PLAN = Plan.CLOUD_V0_HOBBY # Move to ENV FILE
+REVERSE_TRIAL_PLAN = Plan.CLOUD_V0_PRO # move to ENV FILE
+REVERSE_TRIAL_DAYS = 14 # move to ENV FILE
diff --git a/api/ee/src/crons/meters.sh b/api/ee/src/crons/meters.sh
new file mode 100644
index 0000000000..c0f7d8c5ae
--- /dev/null
+++ b/api/ee/src/crons/meters.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+
+AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-)
+
+echo "--------------------------------------------------------"
+echo "[$(date)] meters.sh running from cron" >> /proc/1/fd/1
+
+# Make POST request, show status and response
+curl \
+ -s \
+ -w "\nHTTP_STATUS:%{http_code}\n" \
+ -X POST \
+ -H "Authorization: Access ${AGENTA_AUTH_KEY}" \
+ "http://api:8000/admin/billing/usage/report" || echo "❌ CURL failed"
+
+echo "[$(date)] meters.sh done" >> /proc/1/fd/1
\ No newline at end of file
diff --git a/api/ee/src/crons/meters.txt b/api/ee/src/crons/meters.txt
new file mode 100644
index 0000000000..f3acd78570
--- /dev/null
+++ b/api/ee/src/crons/meters.txt
@@ -0,0 +1,2 @@
+* * * * * root echo "cron test $(date)" >> /proc/1/fd/1 2>&1
+0 * * * * root sh /meters.sh >> /proc/1/fd/1 2>&1
diff --git a/api/ee/src/crons/queries.sh b/api/ee/src/crons/queries.sh
new file mode 100644
index 0000000000..b9e8c7a6e1
--- /dev/null
+++ b/api/ee/src/crons/queries.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -eu
+
+AGENTA_AUTH_KEY=$(tr '\0' '\n' < /proc/1/environ | grep ^AGENTA_AUTH_KEY= | cut -d= -f2-)
+TRIGGER_INTERVAL=$(awk 'NR==2 {split($1, a, "/"); print (a[2] ? a[2] : 1)}' /etc/cron.d/queries-cron)
+NOW_UTC=$(date -u "+%Y-%m-%dT%H:%M:00Z")
+MINUTE=$(date -u "+%M" | sed 's/^0*//')
+ROUNDED_MINUTE=$(( (MINUTE / TRIGGER_INTERVAL) * TRIGGER_INTERVAL ))
+TRIGGER_DATETIME=$(date -u "+%Y-%m-%dT%H")
+TRIGGER_DATETIME="${TRIGGER_DATETIME}:$(printf "%02d" $ROUNDED_MINUTE):00Z"
+
+
+echo "--------------------------------------------------------"
+echo "[$(date)] queries.sh running from cron" >> /proc/1/fd/1
+
+# Make POST request, show status and response
+curl \
+ -s \
+ -w "\nHTTP_STATUS:%{http_code}\n" \
+ -X POST \
+ -H "Authorization: Access ${AGENTA_AUTH_KEY}" \
+ "http://api:8000/admin/evaluations/runs/refresh?trigger_interval=${TRIGGER_INTERVAL}&trigger_datetime=${TRIGGER_DATETIME}" || echo "❌ CURL failed"
+
+echo "[$(date)] queries.sh done" >> /proc/1/fd/1
\ No newline at end of file
diff --git a/api/ee/src/crons/queries.txt b/api/ee/src/crons/queries.txt
new file mode 100644
index 0000000000..586a61af8e
--- /dev/null
+++ b/api/ee/src/crons/queries.txt
@@ -0,0 +1,2 @@
+* * * * * root echo "cron test $(date)" >> /proc/1/fd/1 2>&1
+*/1 * * * * root sh /queries.sh >> /proc/1/fd/1 2>&1
diff --git a/api/ee/src/dbs/__init__.py b/api/ee/src/dbs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/__init__.py b/api/ee/src/dbs/postgres/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/meters/__init__.py b/api/ee/src/dbs/postgres/meters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/meters/dao.py b/api/ee/src/dbs/postgres/meters/dao.py
new file mode 100644
index 0000000000..5302329dc3
--- /dev/null
+++ b/api/ee/src/dbs/postgres/meters/dao.py
@@ -0,0 +1,290 @@
+from typing import Callable, Tuple, Optional
+from collections import defaultdict
+from datetime import datetime, timezone
+
+from sqlalchemy import update
+from sqlalchemy.future import select
+from sqlalchemy.orm import joinedload
+from sqlalchemy import case, tuple_
+from sqlalchemy.dialects.postgresql import insert
+from sqlalchemy import func, literal
+
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.dbs.postgres.shared.engine import engine
+
+from ee.src.core.entitlements.types import Quota
+from ee.src.core.meters.types import MeterDTO
+from ee.src.core.subscriptions.types import SubscriptionDTO
+from ee.src.core.meters.interfaces import MetersDAOInterface
+from ee.src.dbs.postgres.meters.dbes import MeterDBE
+
+
+log = get_module_logger(__name__)
+
+
+class MetersDAO(MetersDAOInterface):
+ def __init__(self):
+ pass
+
+ async def dump(self) -> list[MeterDTO]:
+ async with engine.core_session() as session:
+ stmt = (
+ select(MeterDBE)
+ .filter(MeterDBE.synced != MeterDBE.value)
+ .options(joinedload(MeterDBE.subscription))
+ ) # NO RISK OF DEADLOCK
+
+ result = await session.execute(stmt)
+ meters = result.scalars().all()
+
+ return [
+ MeterDTO(
+ organization_id=meter.organization_id,
+ year=meter.year,
+ month=meter.month,
+ value=meter.value,
+ key=meter.key,
+ synced=meter.synced,
+ subscription=(
+ SubscriptionDTO(
+ organization_id=meter.subscription.organization_id,
+ customer_id=meter.subscription.customer_id,
+ subscription_id=meter.subscription.subscription_id,
+ plan=meter.subscription.plan,
+ active=meter.subscription.active,
+ anchor=meter.subscription.anchor,
+ )
+ if meter.subscription
+ else None
+ ),
+ )
+ for meter in meters
+ ]
+
+ async def bump(
+ self,
+ meters: list[MeterDTO],
+ ) -> None:
+ if not meters:
+ return
+
+ # Sort for consistent lock acquisition
+ sorted_meters = sorted(
+ meters,
+ key=lambda m: (
+ m.organization_id,
+ m.key,
+ m.year,
+ m.month,
+ ),
+ )
+
+ async with engine.core_session() as session:
+ for meter in sorted_meters:
+ stmt = (
+ update(MeterDBE)
+ .where(
+ MeterDBE.organization_id == meter.organization_id,
+ MeterDBE.key == meter.key,
+ MeterDBE.year == meter.year,
+ MeterDBE.month == meter.month,
+ )
+ .values(synced=meter.synced)
+ )
+
+ await session.execute(stmt)
+
+ await session.commit()
+
+ async def fetch(
+ self,
+ *,
+ organization_id: str,
+ ) -> list[MeterDTO]:
+ async with engine.core_session() as session:
+ stmt = select(MeterDBE).filter_by(
+ organization_id=organization_id,
+ ) # NO RISK OF DEADLOCK
+
+ result = await session.execute(stmt)
+ meters = result.scalars().all()
+
+ return [
+ MeterDTO(
+ organization_id=meter.organization_id,
+ key=meter.key,
+ year=meter.year,
+ month=meter.month,
+ value=meter.value,
+ synced=meter.synced,
+ )
+ for meter in meters
+ ]
+
+ async def check(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO]:
+ if quota.monthly:
+ now = datetime.now(timezone.utc)
+
+ if not anchor:
+ meter.year = now.year
+ meter.month = now.month
+
+ if anchor:
+ if now.day < anchor:
+ meter.year = now.year
+ meter.month = now.month
+ else:
+ meter.year = now.year + now.month // 12
+ meter.month = (now.month + 1) % 12
+
+ async with engine.core_session() as session:
+ stmt = select(MeterDBE).filter_by(
+ organization_id=meter.organization_id,
+ key=meter.key,
+ year=meter.year,
+ month=meter.month,
+ ) # NO RISK OF DEADLOCK
+
+ result = await session.execute(stmt)
+ meter_record = result.scalar_one_or_none()
+
+ current_value = meter_record.value if meter_record else 0
+
+ adjusted_value = current_value + (meter.delta or 0)
+ adjusted_value = adjusted_value if adjusted_value >= 0 else 0
+
+ if quota.limit is None:
+ allowed = True
+ else:
+ allowed = adjusted_value <= quota.limit
+
+ return (
+ allowed,
+ MeterDTO(
+ **meter.model_dump(exclude={"value", "synced"}),
+ value=current_value,
+ synced=meter_record.synced if meter_record else 0,
+ ),
+ )
+
+ async def adjust(
+ self,
+ *,
+ meter: MeterDTO,
+ quota: Quota,
+ anchor: Optional[int] = None,
+ ) -> Tuple[bool, MeterDTO, Callable]:
+ # 1. Normalize meter.year/month if monthly quota
+ if quota.monthly:
+ now = datetime.now(timezone.utc)
+
+ if not anchor:
+ meter.year = now.year
+ meter.month = now.month
+ elif now.day < anchor:
+ meter.year = now.year
+ meter.month = now.month
+ else:
+ meter.year = now.year + now.month // 12
+ meter.month = (now.month + 1) % 12
+
+ # 2. Calculate proposed value (starting from 0)
+ desired_value = meter.value if meter.value is not None else (meter.delta or 0)
+ desired_value = max(desired_value, 0)
+
+ # 3. Block insert if quota exceeded
+ if quota.limit is not None and desired_value > quota.limit:
+ return (
+ False,
+ MeterDTO(
+ **meter.model_dump(exclude={"value", "synced"}),
+ value=0,
+ synced=0,
+ ),
+ lambda: None,
+ )
+
+ where_clauses = []
+
+ # Handle unlimited quota case
+ if quota.limit is None:
+ where_clauses.append(literal(True))
+
+ # Strict mode: use the adjusted value check
+ elif quota.strict:
+ if meter.delta is not None:
+ adjusted_expr = func.greatest(MeterDBE.value + meter.delta, 0)
+ elif meter.value is not None:
+ adjusted_expr = func.greatest(meter.value, 0)
+ else:
+ raise ValueError("Either delta or value must be set")
+
+ where_clauses.append(adjusted_expr <= quota.limit)
+
+ # Soft mode: just compare current value
+ else:
+ where_clauses.append(MeterDBE.value <= quota.limit)
+
+ # Now safely combine the conditions
+ where = None
+ for where_clause in where_clauses:
+ if where is None:
+ where = where_clause
+ else:
+ where = where | where_clause
+
+ # 4. Build SQL statement (atomic upsert)
+ async with engine.core_session() as session:
+ stmt = (
+ insert(MeterDBE)
+ .values(
+ organization_id=meter.organization_id,
+ key=meter.key,
+ year=meter.year,
+ month=meter.month,
+ value=desired_value,
+ synced=0,
+ )
+ .on_conflict_do_update(
+ index_elements=[
+ MeterDBE.organization_id,
+ MeterDBE.key,
+ MeterDBE.year,
+ MeterDBE.month,
+ ],
+ set_={
+ "value": func.greatest(
+ (
+ (MeterDBE.value + meter.delta)
+ if meter.delta is not None
+ else meter.value
+ ),
+ 0,
+ )
+ },
+ where=where,
+ )
+ )
+
+ result = await session.execute(stmt)
+ await session.commit()
+
+ # 5. Check if update was applied (strict mode)
+ allowed = result.rowcount > 0
+
+ return (
+ allowed,
+ MeterDTO(
+ **meter.model_dump(exclude={"value", "synced"}),
+ value=desired_value, # not technically accurate in soft mode, but good enough
+ synced=0,
+ ),
+ lambda: None, # rollback not needed; no state was touched otherwise
+ )
diff --git a/api/ee/src/dbs/postgres/meters/dbas.py b/api/ee/src/dbs/postgres/meters/dbas.py
new file mode 100644
index 0000000000..450e517d28
--- /dev/null
+++ b/api/ee/src/dbs/postgres/meters/dbas.py
@@ -0,0 +1,29 @@
+from sqlalchemy import Column, Enum as SQLEnum, SmallInteger, BigInteger
+
+from ee.src.core.meters.types import Meters
+
+from oss.src.dbs.postgres.shared.dbas import OrganizationScopeDBA
+
+
+class PeriodDBA:
+ __abstract__ = True
+
+ year = Column(SmallInteger, nullable=False)
+ month = Column(SmallInteger, nullable=False)
+
+
+class MeterDBA(
+ OrganizationScopeDBA,
+ PeriodDBA,
+):
+ __abstract__ = True
+
+ key = Column(
+ SQLEnum(
+ Meters,
+ name="meters_type",
+ ),
+ nullable=False,
+ )
+ value = Column(BigInteger, nullable=False)
+ synced = Column(BigInteger, nullable=False)
diff --git a/api/ee/src/dbs/postgres/meters/dbes.py b/api/ee/src/dbs/postgres/meters/dbes.py
new file mode 100644
index 0000000000..f1353ba022
--- /dev/null
+++ b/api/ee/src/dbs/postgres/meters/dbes.py
@@ -0,0 +1,29 @@
+from sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint, Index, func
+from sqlalchemy.orm import relationship
+
+from oss.src.dbs.postgres.shared.base import Base
+from ee.src.dbs.postgres.meters.dbas import MeterDBA
+
+
+class MeterDBE(Base, MeterDBA):
+ __tablename__ = "meters"
+
+ __table_args__ = (
+ PrimaryKeyConstraint(
+ "organization_id",
+ "key",
+ "year",
+ "month",
+ ),
+ ForeignKeyConstraint(
+ ["organization_id"],
+ ["subscriptions.organization_id"],
+ ),
+ Index(
+ "idx_synced_value",
+ "synced",
+ "value",
+ ),
+ )
+
+ subscription = relationship("SubscriptionDBE", back_populates="meters")
diff --git a/api/ee/src/dbs/postgres/shared/__init__.py b/api/ee/src/dbs/postgres/shared/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/subscriptions/__init__.py b/api/ee/src/dbs/postgres/subscriptions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/dbs/postgres/subscriptions/dao.py b/api/ee/src/dbs/postgres/subscriptions/dao.py
new file mode 100644
index 0000000000..485af2dde0
--- /dev/null
+++ b/api/ee/src/dbs/postgres/subscriptions/dao.py
@@ -0,0 +1,84 @@
+from typing import Optional, List
+
+from sqlalchemy.future import select
+
+from ee.src.core.subscriptions.types import SubscriptionDTO
+from ee.src.core.subscriptions.interfaces import SubscriptionsDAOInterface
+
+from oss.src.dbs.postgres.shared.engine import engine
+from ee.src.dbs.postgres.subscriptions.dbes import SubscriptionDBE
+from ee.src.dbs.postgres.subscriptions.mappings import (
+ map_dbe_to_dto,
+ map_dto_to_dbe,
+)
+
+
+class SubscriptionsDAO(SubscriptionsDAOInterface):
+ def __init__(self):
+ pass
+
+ async def create(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> SubscriptionDTO:
+ async with engine.core_session() as session:
+ subscription_dbe = map_dto_to_dbe(subscription)
+
+ session.add(subscription_dbe)
+
+ await session.commit()
+
+ subscription_dto = map_dbe_to_dto(subscription_dbe)
+
+ return subscription_dto
+
+ async def read(
+ self,
+ *,
+ organization_id: str,
+ ) -> Optional[SubscriptionDTO]:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(SubscriptionDBE).where(
+ SubscriptionDBE.organization_id == organization_id,
+ )
+ )
+
+ subscription_dbe = result.scalars().one_or_none()
+
+ if not subscription_dbe:
+ return None
+
+ subscription_dto = map_dbe_to_dto(subscription_dbe)
+
+ return subscription_dto
+
+ async def update(
+ self,
+ *,
+ subscription: SubscriptionDTO,
+ ) -> Optional[SubscriptionDTO]:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(SubscriptionDBE).where(
+ SubscriptionDBE.organization_id == subscription.organization_id,
+ )
+ )
+
+ subscription_dbe = result.scalars().one_or_none()
+
+ if not subscription_dbe:
+ return None
+
+ subscription_dbe.customer_id = subscription.customer_id
+ subscription_dbe.subscription_id = subscription.subscription_id
+ subscription_dbe.plan = subscription.plan
+ subscription_dbe.active = subscription.active
+ subscription_dbe.anchor = subscription.anchor
+
+ await session.commit()
+
+ subscription_dto = map_dbe_to_dto(subscription_dbe)
+
+ return subscription_dto
diff --git a/api/ee/src/dbs/postgres/subscriptions/dbas.py b/api/ee/src/dbs/postgres/subscriptions/dbas.py
new file mode 100644
index 0000000000..7810907030
--- /dev/null
+++ b/api/ee/src/dbs/postgres/subscriptions/dbas.py
@@ -0,0 +1,19 @@
+from sqlalchemy import Column, String, Boolean, SmallInteger
+
+from oss.src.dbs.postgres.shared.dbas import OrganizationScopeDBA
+
+
+class StripeDBA:
+ customer_id = Column(String, nullable=True)
+ subscription_id = Column(String, nullable=True)
+
+
+class SubscriptionDBA(
+ OrganizationScopeDBA,
+ StripeDBA,
+):
+ __abstract__ = True
+
+ plan = Column(String, nullable=False)
+ active = Column(Boolean, nullable=False)
+ anchor = Column(SmallInteger, nullable=True)
diff --git a/api/ee/src/dbs/postgres/subscriptions/dbes.py b/api/ee/src/dbs/postgres/subscriptions/dbes.py
new file mode 100644
index 0000000000..b548dd1a56
--- /dev/null
+++ b/api/ee/src/dbs/postgres/subscriptions/dbes.py
@@ -0,0 +1,24 @@
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy.orm import relationship
+
+
+from oss.src.dbs.postgres.shared.base import Base
+from ee.src.dbs.postgres.subscriptions.dbas import SubscriptionDBA
+
+
+from sqlalchemy import PrimaryKeyConstraint, Index, func
+
+
+from ee.src.dbs.postgres.meters.dbas import MeterDBA
+
+
+class SubscriptionDBE(Base, SubscriptionDBA):
+ __tablename__ = "subscriptions"
+
+ __table_args__ = (
+ PrimaryKeyConstraint(
+ "organization_id",
+ ),
+ )
+
+ meters = relationship("MeterDBE", back_populates="subscription")
diff --git a/api/ee/src/dbs/postgres/subscriptions/mappings.py b/api/ee/src/dbs/postgres/subscriptions/mappings.py
new file mode 100644
index 0000000000..b8d0b4e8b5
--- /dev/null
+++ b/api/ee/src/dbs/postgres/subscriptions/mappings.py
@@ -0,0 +1,26 @@
+from ee.src.core.subscriptions.types import SubscriptionDTO
+from ee.src.dbs.postgres.subscriptions.dbes import SubscriptionDBE
+
+from ee.src.core.subscriptions.types import Plan
+
+
+def map_dbe_to_dto(subscription_dbe: SubscriptionDBE) -> SubscriptionDTO:
+ return SubscriptionDTO(
+ organization_id=subscription_dbe.organization_id,
+ customer_id=subscription_dbe.customer_id,
+ subscription_id=subscription_dbe.subscription_id,
+ plan=Plan(subscription_dbe.plan),
+ active=subscription_dbe.active,
+ anchor=subscription_dbe.anchor,
+ )
+
+
+def map_dto_to_dbe(subscription_dto: SubscriptionDTO) -> SubscriptionDBE:
+ return SubscriptionDBE(
+ organization_id=subscription_dto.organization_id,
+ customer_id=subscription_dto.customer_id,
+ subscription_id=subscription_dto.subscription_id,
+ plan=subscription_dto.plan.value,
+ active=subscription_dto.active or False,
+ anchor=subscription_dto.anchor,
+ )
diff --git a/api/ee/src/main.py b/api/ee/src/main.py
new file mode 100644
index 0000000000..86d8ecf618
--- /dev/null
+++ b/api/ee/src/main.py
@@ -0,0 +1,123 @@
+from fastapi import FastAPI
+
+from oss.src.utils.logging import get_module_logger
+
+from ee.src.routers import (
+ workspace_router,
+ organization_router,
+ evaluation_router,
+ human_evaluation_router,
+)
+
+from ee.src.dbs.postgres.meters.dao import MetersDAO
+from ee.src.dbs.postgres.subscriptions.dao import SubscriptionsDAO
+
+from ee.src.core.meters.service import MetersService
+from ee.src.core.subscriptions.service import SubscriptionsService
+
+from ee.src.apis.fastapi.billing.router import SubscriptionsRouter
+
+# DBS --------------------------------------------------------------------------
+
+meters_dao = MetersDAO()
+
+subscriptions_dao = SubscriptionsDAO()
+
+# CORE -------------------------------------------------------------------------
+
+meters_service = MetersService(
+ meters_dao=meters_dao,
+)
+
+subscription_service = SubscriptionsService(
+ subscriptions_dao=subscriptions_dao,
+ meters_service=meters_service,
+)
+
+# APIS -------------------------------------------------------------------------
+
+subscriptions_router = SubscriptionsRouter(
+ subscription_service=subscription_service,
+)
+
+
+log = get_module_logger(__name__)
+
+
+def extend_main(app: FastAPI):
+ # ROUTES -------------------------------------------------------------------
+
+ app.include_router(
+ router=subscriptions_router.router,
+ prefix="/billing",
+ tags=["Billing"],
+ )
+
+ app.include_router(
+ router=subscriptions_router.admin_router,
+ prefix="/admin/billing",
+ tags=["Admin", "Billing"],
+ )
+
+ # ROUTES (more) ------------------------------------------------------------
+
+ app.include_router(
+ organization_router.router,
+ prefix="/organizations",
+ )
+
+ app.include_router(
+ workspace_router.router,
+ prefix="/workspaces",
+ )
+
+ app.include_router(
+ evaluation_router.router,
+ prefix="/evaluations",
+ tags=["Evaluations"],
+ )
+
+ app.include_router(
+ human_evaluation_router.router,
+ prefix="/human-evaluations",
+ tags=["Human-Evaluations"],
+ )
+
+ # --------------------------------------------------------------------------
+
+ return app
+
+
+def load_tasks():
+ import ee.src.tasks.evaluations.live
+ import ee.src.tasks.evaluations.legacy
+ import ee.src.tasks.evaluations.batch
+
+
+def extend_app_schema(app: FastAPI):
+ app.openapi()["info"]["title"] = "Agenta API"
+ app.openapi()["info"]["description"] = "Agenta API"
+ app.openapi()["info"]["contact"] = {
+ "name": "Agenta",
+ "url": "https://agenta.ai",
+ "email": "team@agenta.ai",
+ }
+ app.openapi()["components"]["securitySchemes"] = {
+ "APIKeyHeader": {
+ "type": "apiKey",
+ "name": "Authorization",
+ "in": "header",
+ }
+ }
+ app.openapi()["security"] = [
+ {
+ "APIKeyHeader": [],
+ },
+ ]
+ app.openapi()["servers"] = [
+ {
+ "url": "https://cloud.agenta.ai/api",
+ },
+ ]
+
+ return app
diff --git a/api/ee/src/models/api/api_models.py b/api/ee/src/models/api/api_models.py
new file mode 100644
index 0000000000..f15c8ffacc
--- /dev/null
+++ b/api/ee/src/models/api/api_models.py
@@ -0,0 +1,72 @@
+from typing import Optional, List
+from pydantic import BaseModel, Field
+from datetime import datetime, timezone
+
+from oss.src.models.api.api_models import (
+ CreateApp,
+ AppVariant,
+ Environment,
+ AppVariantResponse,
+ AppVariantOutputExtended,
+ EnvironmentOutput,
+ EnvironmentRevision,
+ EnvironmentOutputExtended,
+)
+
+
+class TimestampModel(BaseModel):
+ created_at: str = Field(str(datetime.now(timezone.utc)))
+ updated_at: str = Field(str(datetime.now(timezone.utc)))
+
+
+class InviteRequest(BaseModel):
+ email: str
+ roles: List[str]
+
+
+class ReseendInviteRequest(BaseModel):
+ email: str
+
+
+class InviteToken(BaseModel):
+ token: str
+
+
+class CreateApp_(CreateApp):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class AppVariant_(AppVariant):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class Environment_(Environment):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class AppVariantResponse_(AppVariantResponse):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class AppVariantOutputExtended_(AppVariantOutputExtended):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class EnvironmentOutput_(EnvironmentOutput):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class EnvironmentRevision_(EnvironmentRevision):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
+
+
+class EnvironmentOutputExtended_(EnvironmentOutputExtended):
+ organization_id: Optional[str] = None
+ workspace_id: Optional[str] = None
diff --git a/api/ee/src/models/api/organization_models.py b/api/ee/src/models/api/organization_models.py
new file mode 100644
index 0000000000..1ce05a65fc
--- /dev/null
+++ b/api/ee/src/models/api/organization_models.py
@@ -0,0 +1,33 @@
+from typing import Optional, List
+
+from pydantic import BaseModel, Field
+
+
+class Organization(BaseModel):
+ id: str
+ name: str
+ description: str
+ type: Optional[str] = None
+ owner: str
+ workspaces: List[str] = Field(default_factory=list)
+ members: List[str] = Field(default_factory=list)
+ invitations: List = Field(default_factory=list)
+ is_paying: Optional[bool] = None
+
+
+class CreateOrganization(BaseModel):
+ name: str
+ owner: str
+ description: Optional[str] = None
+ type: Optional[str] = None
+
+
+class OrganizationUpdate(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+ updated_at: Optional[str] = None
+
+
+class OrganizationOutput(BaseModel):
+ id: str
+ name: str
diff --git a/api/ee/src/models/api/user_models.py b/api/ee/src/models/api/user_models.py
new file mode 100644
index 0000000000..8a0d702ad8
--- /dev/null
+++ b/api/ee/src/models/api/user_models.py
@@ -0,0 +1,9 @@
+from typing import List
+
+from pydantic import Field
+
+from oss.src.models.api.user_models import User
+
+
+class User_(User):
+ organizations: List[str] = Field(default_factory=list)
diff --git a/api/ee/src/models/api/workspace_models.py b/api/ee/src/models/api/workspace_models.py
new file mode 100644
index 0000000000..56218eb38a
--- /dev/null
+++ b/api/ee/src/models/api/workspace_models.py
@@ -0,0 +1,58 @@
+from datetime import datetime
+from typing import Optional, List, Dict
+
+from pydantic import BaseModel
+
+from ee.src.models.api.api_models import TimestampModel
+from ee.src.models.shared_models import WorkspaceRole, Permission
+
+
+class WorkspacePermission(BaseModel):
+ role_name: WorkspaceRole
+ role_description: Optional[str] = None
+ permissions: Optional[List[Permission]] = None
+
+
+class WorkspaceMember(BaseModel):
+ user_id: str
+ roles: List[WorkspacePermission]
+
+
+class WorkspaceMemberResponse(BaseModel):
+ user: Dict
+ roles: List[WorkspacePermission]
+
+
+class Workspace(BaseModel):
+ id: Optional[str] = None
+ name: str
+ description: Optional[str] = None
+ type: Optional[str]
+ members: Optional[List[WorkspaceMember]] = None
+
+
+class WorkspaceResponse(TimestampModel):
+ id: str
+ name: str
+ description: Optional[str] = None
+ type: Optional[str]
+ organization: str
+ members: Optional[List[WorkspaceMemberResponse]] = None
+
+
+class CreateWorkspace(BaseModel):
+ name: str
+ description: Optional[str] = None
+ type: Optional[str] = None
+
+
+class UserRole(BaseModel):
+ email: str
+ organization_id: str
+ role: Optional[str] = None
+
+
+class UpdateWorkspace(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+ updated_at: Optional[datetime] = None
diff --git a/api/ee/src/models/db_models.py b/api/ee/src/models/db_models.py
new file mode 100644
index 0000000000..f09b9e0324
--- /dev/null
+++ b/api/ee/src/models/db_models.py
@@ -0,0 +1,518 @@
+from typing import Optional, List, Sequence
+from datetime import datetime, timezone
+
+import uuid_utils.compat as uuid
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.dialects.postgresql import UUID, JSONB
+from sqlalchemy import Column, String, DateTime, Boolean, ForeignKey, Integer
+
+from ee.src.models.shared_models import (
+ WorkspaceRole,
+ Permission,
+)
+from oss.src.models.db_models import (
+ ProjectDB as OssProjectDB,
+ WorkspaceDB as OssWorkspaceDB,
+ OrganizationDB as OssOrganizationDB,
+ DeploymentDB as OssDeploymentDB,
+ # dependency
+ CASCADE_ALL_DELETE,
+ mutable_json_type,
+)
+from oss.src.dbs.postgres.shared.base import Base
+from oss.src.dbs.postgres.observability.dbes import NodesDBE
+
+
+class OrganizationDB(OssOrganizationDB):
+ is_paying = Column(Boolean, nullable=True, default=False)
+
+ organization_members = relationship(
+ "OrganizationMemberDB", back_populates="organization"
+ )
+ project = relationship(
+ "ee.src.models.db_models.ProjectDB",
+ back_populates="organization",
+ overlaps="organization",
+ )
+
+
+class WorkspaceDB(OssWorkspaceDB):
+ pass
+
+ members = relationship("WorkspaceMemberDB", back_populates="workspace")
+ projects = relationship(
+ "ee.src.models.db_models.ProjectDB",
+ cascade="all, delete-orphan",
+ back_populates="workspace",
+ overlaps="workspace",
+ )
+ organization = relationship(
+ "ee.src.models.db_models.OrganizationDB", back_populates="workspaces_relation"
+ )
+
+ def get_member_role(self, user_id: str) -> Optional[str]:
+ member: Optional[WorkspaceMemberDB] = next(
+ (member for member in self.members if str(member.user_id) == user_id),
+ None,
+ )
+ return member.role if member else None # type: ignore
+
+ def get_member_role_name(self, user_id: str) -> Optional[str]:
+ role = self.get_member_role(user_id)
+ return role
+
+ def get_all_members(self) -> List[str]:
+ return [str(member.user_id) for member in self.members]
+
+ def get_member_with_roles(self, user_id: str) -> Optional["WorkspaceMemberDB"]:
+ return next(
+ (member for member in self.members if str(member.user_id) == user_id),
+ None,
+ )
+
+ def get_member_permissions(self, user_id: str) -> List[Permission]:
+ user_role = self.get_member_role(user_id)
+ if user_role:
+ return Permission.default_permissions(user_role)
+ return []
+
+ def has_permission(self, user_id: str, permission: Permission) -> bool:
+ user_role = self.get_member_role(user_id)
+ if user_role and permission in Permission.default_permissions(user_role):
+ return True
+ return False
+
+ def has_role(self, user_id: str, role_to_check: WorkspaceRole) -> bool:
+ user_role = self.get_member_role(user_id)
+ if user_role:
+ return user_role == role_to_check
+ return False
+
+ def is_owner(self, user_id: str) -> bool:
+ return any(
+ str(member.user_id) == user_id
+ and WorkspaceRole.OWNER == self.get_member_role_name(user_id)
+ for member in self.members
+ )
+
+
+class ProjectDB(OssProjectDB):
+ workspace = relationship(
+ "ee.src.models.db_models.WorkspaceDB",
+ back_populates="projects",
+ overlaps="projects",
+ )
+ organization = relationship(
+ "ee.src.models.db_models.OrganizationDB",
+ back_populates="project",
+ )
+ project_members = relationship(
+ "ProjectMemberDB", cascade="all, delete-orphan", back_populates="project"
+ )
+ invitations = relationship(
+ "InvitationDB", cascade="all, delete-orphan", back_populates="project"
+ )
+
+ def get_member_role(
+ self, user_id: str, members: Sequence["ProjectMemberDB"]
+ ) -> Optional[str]:
+ member: Optional["ProjectMemberDB"] = next(
+ (member for member in members if str(member.user_id) == user_id),
+ None,
+ )
+ return member.role if member else None # type: ignore
+
+ def get_member_role_name(
+ self, user_id: str, members: Sequence["ProjectMemberDB"]
+ ) -> Optional[str]:
+ role = self.get_member_role(user_id=user_id, members=members)
+ return role
+
+ def get_all_members(self) -> List[str]:
+ return [str(member.user_id) for member in self.project_members]
+
+ def get_member_with_roles(self, user_id: str) -> Optional["ProjectMemberDB"]:
+ return next(
+ (
+ member
+ for member in self.project_members
+ if str(member.user_id) == user_id
+ ),
+ None,
+ )
+
+ def get_member_permissions(
+ self, user_id: str, members: Sequence["ProjectMemberDB"]
+ ) -> List[Permission]:
+ user_role = self.get_member_role(user_id, members)
+ if user_role:
+ return Permission.default_permissions(user_role)
+ return []
+
+ def has_permission(
+ self, user_id: str, permission: Permission, members: Sequence["ProjectMemberDB"]
+ ) -> bool:
+ user_role = self.get_member_role(user_id, members)
+ if user_role and permission in Permission.default_permissions(user_role):
+ return True
+ return False
+
+ def has_role(
+ self,
+ user_id: str,
+ role_to_check: WorkspaceRole,
+ members: Sequence["ProjectMemberDB"],
+ ) -> bool:
+ user_role = self.get_member_role(user_id, members)
+ if user_role:
+ return user_role == role_to_check
+ return False
+
+ def is_owner(self, user_id: str, members: Sequence["ProjectMemberDB"]) -> bool:
+ return any(
+ str(member.user_id) == user_id
+ and WorkspaceRole.OWNER == self.get_member_role_name(user_id, members)
+ for member in members
+ )
+
+
+class WorkspaceMemberDB(Base):
+ __tablename__ = "workspace_members"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ workspace_id = Column(UUID(as_uuid=True), ForeignKey("workspaces.id"))
+ role = Column(String, default="viewer")
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+ user = relationship(
+ "UserDB", backref=backref("workspace_memberships", lazy="dynamic")
+ )
+ workspace = relationship(
+ "ee.src.models.db_models.WorkspaceDB", back_populates="members"
+ )
+
+
+class OrganizationMemberDB(Base):
+ __tablename__ = "organization_members"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ organization_id = Column(UUID(as_uuid=True), ForeignKey("organizations.id"))
+
+ user = relationship(
+ "UserDB", backref=backref("organization_members", lazy="dynamic")
+ )
+ organization = relationship(
+ "ee.src.models.db_models.OrganizationDB", back_populates="organization_members"
+ )
+
+
+class ProjectMemberDB(Base):
+ __tablename__ = "project_members"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ project_id = Column(UUID(as_uuid=True), ForeignKey("projects.id"))
+ role = Column(String, default="viewer")
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ is_demo = Column(Boolean, nullable=True)
+
+ user = relationship("UserDB")
+ project = relationship("ee.src.models.db_models.ProjectDB")
+
+
+class DeploymentDB(OssDeploymentDB):
+ pass
+
+
+class HumanEvaluationVariantDB(Base):
+ __tablename__ = "human_evaluation_variants"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ human_evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
+ variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ )
+
+ variant = relationship("AppVariantDB", backref="evaluation_variant")
+ variant_revision = relationship(
+ "AppVariantRevisionsDB", backref="evaluation_variant_revision"
+ )
+
+
+class HumanEvaluationDB(Base):
+ __tablename__ = "human_evaluations"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ status = Column(String)
+ evaluation_type = Column(String)
+ testset_id = Column(UUID(as_uuid=True), ForeignKey("testsets.id"))
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+ testset = relationship("TestSetDB")
+ evaluation_variant = relationship(
+ "HumanEvaluationVariantDB",
+ cascade=CASCADE_ALL_DELETE,
+ backref="human_evaluation",
+ )
+ evaluation_scenario = relationship(
+ "HumanEvaluationScenarioDB",
+ cascade=CASCADE_ALL_DELETE,
+ backref="evaluation_scenario",
+ )
+
+
+class HumanEvaluationScenarioDB(Base):
+ __tablename__ = "human_evaluations_scenarios"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("human_evaluations.id", ondelete="CASCADE")
+ )
+ inputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of HumanEvaluationScenarioInput
+ outputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of HumanEvaluationScenarioOutput
+ vote = Column(String)
+ score = Column(String)
+ correct_answer = Column(String)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ is_pinned = Column(Boolean)
+ note = Column(String)
+
+
+class EvaluationAggregatedResultDB(Base):
+ __tablename__ = "auto_evaluation_aggregated_results"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE")
+ )
+ evaluator_config_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"),
+ )
+ result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+
+ evaluator_config = relationship("EvaluatorConfigDB", backref="evaluator_config")
+
+
+class EvaluationScenarioResultDB(Base):
+ __tablename__ = "auto_evaluation_scenario_results"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_scenario_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("auto_evaluation_scenarios.id", ondelete="CASCADE"),
+ )
+ evaluator_config_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"),
+ )
+ result = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+
+
+class EvaluationDB(Base):
+ __tablename__ = "auto_evaluations"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ status = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ testset_id = Column(
+ UUID(as_uuid=True), ForeignKey("testsets.id", ondelete="SET NULL")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
+ variant_revision_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variant_revisions.id", ondelete="SET NULL")
+ )
+ average_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ total_cost = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ average_latency = Column(mutable_json_type(dbtype=JSONB, nested=True)) # Result
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+ project = relationship("ee.src.models.db_models.ProjectDB")
+ testset = relationship("TestSetDB")
+ variant = relationship("AppVariantDB")
+ variant_revision = relationship("AppVariantRevisionsDB")
+ aggregated_results = relationship(
+ "EvaluationAggregatedResultDB",
+ cascade=CASCADE_ALL_DELETE,
+ backref="evaluation",
+ )
+ evaluation_scenarios = relationship(
+ "EvaluationScenarioDB", cascade=CASCADE_ALL_DELETE, backref="evaluation"
+ )
+ evaluator_configs = relationship(
+ "EvaluationEvaluatorConfigDB",
+ cascade=CASCADE_ALL_DELETE,
+ backref="evaluation",
+ )
+
+
+class EvaluationEvaluatorConfigDB(Base):
+ __tablename__ = "auto_evaluation_evaluator_configs"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("auto_evaluations.id", ondelete="CASCADE"),
+ primary_key=True,
+ )
+ evaluator_config_id = Column(
+ UUID(as_uuid=True),
+ ForeignKey("auto_evaluator_configs.id", ondelete="SET NULL"),
+ primary_key=True,
+ )
+
+
+class EvaluationScenarioDB(Base):
+ __tablename__ = "auto_evaluation_scenarios"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("auto_evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
+ inputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of EvaluationScenarioInput
+ outputs = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of EvaluationScenarioOutput
+ correct_answers = Column(
+ mutable_json_type(dbtype=JSONB, nested=True)
+ ) # List of CorrectAnswer
+ is_pinned = Column(Boolean)
+ note = Column(String)
+ latency = Column(Integer)
+ cost = Column(Integer)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+ project = relationship("ee.src.models.db_models.ProjectDB")
+ variant = relationship("AppVariantDB")
+ results = relationship(
+ "EvaluationScenarioResultDB",
+ cascade=CASCADE_ALL_DELETE,
+ backref="evaluation_scenario",
+ )
diff --git a/api/ee/src/models/extended/deprecated_models.py b/api/ee/src/models/extended/deprecated_models.py
new file mode 100644
index 0000000000..c68a07e851
--- /dev/null
+++ b/api/ee/src/models/extended/deprecated_models.py
@@ -0,0 +1,101 @@
+from datetime import datetime, timezone
+
+import uuid_utils.compat as uuid
+
+from sqlalchemy.dialects.postgresql import JSONB, UUID
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import Column, String, DateTime, ForeignKey, Boolean, Integer
+
+
+DeprecatedBase = declarative_base()
+
+
+class DeprecatedAppDB(DeprecatedBase):
+ __tablename__ = "app_db"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ app_name = Column(String)
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ modified_by_id = Column(UUID(as_uuid=True), ForeignKey("users.id"), nullable=True)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+
+class DeprecatedAPIKeyDB(DeprecatedBase):
+ __tablename__ = "api_keys"
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ prefix = Column(String)
+ hashed_key = Column(String)
+ user_id = Column(String, nullable=True)
+ workspace_id = Column(String, nullable=True)
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE"), nullable=True
+ )
+ created_by_id = Column(
+ UUID(as_uuid=True), ForeignKey("users.id", ondelete="SET NULL"), nullable=True
+ )
+ rate_limit = Column(Integer, default=0)
+ hidden = Column(Boolean, default=False)
+ expiration_date = Column(DateTime(timezone=True), nullable=True)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+ updated_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
+
+
+class UserOrganizationDB(DeprecatedBase):
+ __tablename__ = "user_organizations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ user_id = Column(UUID(as_uuid=True), ForeignKey("users.id"))
+ organization_id = Column(UUID(as_uuid=True), ForeignKey("organizations.id"))
+
+
+class OldInvitationDB(DeprecatedBase):
+ __tablename__ = "invitations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ token = Column(String, unique=True, nullable=False)
+ email = Column(String, nullable=False)
+ organization_id = Column(String, nullable=False)
+ used = Column(Boolean, default=False)
+ workspace_id = Column(String, nullable=False)
+ workspace_roles = Column(JSONB, nullable=True)
+ expiration_date = Column(DateTime(timezone=True), nullable=True)
+ created_at = Column(
+ DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
+ )
diff --git a/api/ee/src/models/extended/deprecated_transfer_models.py b/api/ee/src/models/extended/deprecated_transfer_models.py
new file mode 100644
index 0000000000..3657dddacd
--- /dev/null
+++ b/api/ee/src/models/extended/deprecated_transfer_models.py
@@ -0,0 +1,347 @@
+from datetime import datetime, timezone
+
+import uuid_utils.compat as uuid
+
+from sqlalchemy.dialects.postgresql import JSONB, UUID
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import Column, String, DateTime, Boolean, ForeignKey
+
+
+DeprecatedBase = declarative_base()
+
+
+class WorkspaceDB(DeprecatedBase):
+ __tablename__ = "workspaces"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+
+
+class OrganizationDB(DeprecatedBase):
+ __tablename__ = "organizations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+
+
+class ProjectDB(DeprecatedBase):
+ __tablename__ = "projects"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ is_default = Column(Boolean, default=False)
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class AppDB(DeprecatedBase):
+ __tablename__ = "app_db"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class AppVariantDB(DeprecatedBase):
+ __tablename__ = "app_variants"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class AppVariantRevisionsDB(DeprecatedBase):
+ __tablename__ = "app_variant_revisions"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="CASCADE")
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+
+
+class VariantBaseDB(DeprecatedBase):
+ __tablename__ = "bases"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class DeploymentDB(DeprecatedBase):
+ __tablename__ = "deployments"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class AppEnvironmentDB(DeprecatedBase):
+ __tablename__ = "environments"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class AppEnvironmentRevisionDB(DeprecatedBase):
+ __tablename__ = "environments_revisions"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class EvaluationScenarioDB(DeprecatedBase):
+ __tablename__ = "evaluation_scenarios"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ evaluation_id = Column(
+ UUID(as_uuid=True), ForeignKey("evaluations.id", ondelete="CASCADE")
+ )
+ variant_id = Column(
+ UUID(as_uuid=True), ForeignKey("app_variants.id", ondelete="SET NULL")
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class EvaluationDB(DeprecatedBase):
+ __tablename__ = "evaluations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class EvaluatorConfigDB(DeprecatedBase):
+ __tablename__ = "evaluators_configs"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+
+ app_id = Column(UUID(as_uuid=True), ForeignKey("app_db.id", ondelete="CASCADE"))
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class HumanEvaluationDB(DeprecatedBase):
+ __tablename__ = "human_evaluations"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class HumanEvaluationScenarioDB(DeprecatedBase):
+ __tablename__ = "human_evaluations_scenarios"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
+
+
+class TestSetDB(DeprecatedBase):
+ __tablename__ = "testsets"
+ __table_args__ = {"extend_existing": True}
+
+ id = Column(
+ UUID(as_uuid=True),
+ primary_key=True,
+ default=uuid.uuid7,
+ unique=True,
+ nullable=False,
+ )
+ project_id = Column(
+ UUID(as_uuid=True), ForeignKey("projects.id", ondelete="CASCADE")
+ )
+ workspace_id = Column(
+ UUID(as_uuid=True), ForeignKey("workspaces.id", ondelete="SET NULL")
+ )
+ organization_id = Column(
+ UUID(as_uuid=True), ForeignKey("organizations.id", ondelete="SET NULL")
+ )
diff --git a/api/ee/src/models/shared_models.py b/api/ee/src/models/shared_models.py
new file mode 100644
index 0000000000..4f7ed234da
--- /dev/null
+++ b/api/ee/src/models/shared_models.py
@@ -0,0 +1,200 @@
+from enum import Enum
+from typing import List
+
+from pydantic import BaseModel, Field
+
+
+class WorkspaceRole(str, Enum):
+ OWNER = "owner"
+ VIEWER = "viewer"
+ EDITOR = "editor"
+ EVALUATOR = "evaluator"
+ WORKSPACE_ADMIN = "workspace_admin"
+ DEPLOYMENT_MANAGER = "deployment_manager"
+
+ @classmethod
+ def is_valid_role(cls, role: str) -> bool:
+ return role.upper() in list(WorkspaceRole.__members__.keys())
+
+ @classmethod
+ def get_description(cls, role):
+ descriptions = {
+ cls.OWNER: "Can fully manage the workspace, including adding and removing members.",
+ cls.VIEWER: "Can view the workspace content but cannot make changes.",
+ cls.EDITOR: "Can edit workspace content, but cannot manage members or roles.",
+ cls.EVALUATOR: "Can evaluate models and provide feedback within the workspace.",
+ cls.WORKSPACE_ADMIN: "Can manage workspace settings and members but cannot delete the workspace.",
+ cls.DEPLOYMENT_MANAGER: "Can manage model deployments within the workspace.",
+ }
+ return descriptions.get(role, "Description not available, Role not found")
+
+
+class Permission(str, Enum):
+ # general
+ READ_SYSTEM = "read_system"
+
+ # App and variants
+ VIEW_APPLICATIONS = "view_applications"
+ EDIT_APPLICATIONS = "edit_application"
+
+ CREATE_APP_VARIANT = "create_app_variant"
+ DELETE_APP_VARIANT = "delete_app_variant"
+
+ MODIFY_VARIANT_CONFIGURATIONS = "modify_variant_configurations"
+ EDIT_APPLICATIONS_VARIANT = "delete_application_variant"
+
+ # Service
+ RUN_SERVICE = "run_service"
+
+ # Vault Secret
+ CREATE_SECRET = "create_secret"
+ VIEW_SECRET = "view_secret"
+ UPDATE_SECRET = "update_secret"
+ DELETE_SECRET = "delete_secret"
+
+ # App environment deployment
+ VIEW_APP_ENVIRONMENT_DEPLOYMENT = "view_app_environment_deployment"
+ EDIT_APP_ENVIRONMENT_DEPLOYMENT = "edit_app_environment_deployment"
+ CREATE_APP_ENVIRONMENT_DEPLOYMENT = "create_app_environment_deployment"
+
+ # Testset
+ VIEW_TESTSET = "view_testset"
+ EDIT_TESTSET = "edit_testset"
+ CREATE_TESTSET = "create_testset"
+ DELETE_TESTSET = "delete_testset"
+
+ # Evaluation
+ VIEW_EVALUATION = "view_evaluation"
+ RUN_EVALUATIONS = "run_evaluations"
+ EDIT_EVALUATION = "edit_evaluation"
+ CREATE_EVALUATION = "create_evaluation"
+ DELETE_EVALUATION = "delete_evaluation"
+
+ # Deployment
+ DEPLOY_APPLICATION = "deploy_application"
+
+ # Workspace
+ VIEW_WORKSPACE = "view_workspace"
+ EDIT_WORKSPACE = "edit_workspace"
+ CREATE_WORKSPACE = "create_workspace"
+ DELETE_WORKSPACE = "delete_workspace"
+ MODIFY_USER_ROLES = "modify_user_roles"
+ ADD_USER_TO_WORKSPACE = "add_new_user_to_workspace"
+
+ # Organization
+ EDIT_ORGANIZATION = "edit_organization"
+ DELETE_ORGANIZATION = "delete_organization"
+ ADD_USER_TO_ORGANIZATION = "add_new_user_to_organization"
+
+ # User Profile
+ RESET_PASSWORD = "reset_password"
+
+ # Billing (Plans, Subscriptions, Usage, etc)
+ VIEW_BILLING = "view_billing"
+ EDIT_BILLING = "edit_billing"
+
+ # Workflows
+ VIEW_WORKFLOWS = "view_workflows"
+ EDIT_WORKFLOWS = "edit_workflows"
+ RUN_WORKFLOWS = "run_workflows"
+
+ # Evaluators
+ VIEW_EVALUATORS = "view_evaluators"
+ EDIT_EVALUATORS = "edit_evaluators"
+
+ # Queries
+ VIEW_QUERIES = "view_queries"
+ EDIT_QUERIES = "edit_queries"
+
+ # Testsets
+ VIEW_TESTSETS = "view_testsets"
+ EDIT_TESTSETS = "edit_testsets"
+
+ # Annotations
+ VIEW_ANNOTATIONS = "view_annotations"
+ EDIT_ANNOTATIONS = "edit_annotations"
+
+ # Invocations
+ VIEW_INVOCATIONS = "view_invocations"
+ EDIT_INVOCATIONS = "edit_invocations"
+
+ # Evaluations
+ VIEW_EVALUATION_RUNS = "view_evaluation_runs"
+ EDIT_EVALUATION_RUNS = "edit_evaluation_runs"
+
+ VIEW_EVALUATION_SCENARIOS = "view_evaluation_scenarios"
+ EDIT_EVALUATION_SCENARIOS = "edit_evaluation_scenarios"
+
+ VIEW_EVALUATION_RESULTS = "view_evaluation_results"
+ EDIT_EVALUATION_RESULTS = "edit_evaluation_results"
+
+ VIEW_EVALUATION_METRICS = "view_evaluation_metrics"
+ EDIT_EVALUATION_METRICS = "edit_evaluation_metrics"
+
+ VIEW_EVALUATION_QUEUES = "view_evaluation_queues"
+ EDIT_EVALUATION_QUEUES = "edit_evaluation_queues"
+
+ @classmethod
+ def default_permissions(cls, role):
+ VIEWER_PERMISSIONS = [
+ cls.READ_SYSTEM,
+ cls.VIEW_APPLICATIONS,
+ cls.VIEW_SECRET,
+ cls.VIEW_APP_ENVIRONMENT_DEPLOYMENT,
+ cls.VIEW_TESTSET,
+ cls.VIEW_EVALUATION,
+ cls.RUN_SERVICE,
+ cls.VIEW_BILLING,
+ #
+ cls.VIEW_WORKFLOWS,
+ cls.VIEW_EVALUATORS,
+ cls.VIEW_TESTSETS,
+ cls.VIEW_ANNOTATIONS,
+ ]
+ defaults = {
+ WorkspaceRole.OWNER: [p for p in cls],
+ WorkspaceRole.VIEWER: VIEWER_PERMISSIONS,
+ WorkspaceRole.EDITOR: [
+ p
+ for p in cls
+ if p
+ not in [
+ cls.DELETE_SECRET,
+ cls.RESET_PASSWORD,
+ cls.DELETE_TESTSET,
+ cls.DELETE_WORKSPACE,
+ cls.CREATE_WORKSPACE,
+ cls.EDIT_ORGANIZATION,
+ cls.DELETE_EVALUATION,
+ cls.MODIFY_USER_ROLES,
+ cls.EDIT_APPLICATIONS,
+ cls.DELETE_ORGANIZATION,
+ cls.ADD_USER_TO_WORKSPACE,
+ cls.ADD_USER_TO_ORGANIZATION,
+ cls.EDIT_BILLING,
+ ]
+ ],
+ WorkspaceRole.DEPLOYMENT_MANAGER: VIEWER_PERMISSIONS
+ + [cls.DEPLOY_APPLICATION],
+ WorkspaceRole.WORKSPACE_ADMIN: [
+ p
+ for p in cls
+ if p
+ not in [
+ cls.DELETE_WORKSPACE,
+ cls.DELETE_ORGANIZATION,
+ cls.EDIT_ORGANIZATION,
+ cls.ADD_USER_TO_ORGANIZATION,
+ cls.EDIT_BILLING,
+ ]
+ ],
+ WorkspaceRole.EVALUATOR: VIEWER_PERMISSIONS
+ + [cls.CREATE_EVALUATION, cls.RUN_EVALUATIONS],
+ }
+
+ return defaults.get(role, [])
+
+
+class WorkspaceMember(BaseModel):
+ role_name: WorkspaceRole
+ permissions: List[Permission] = Field(default_factory=list)
diff --git a/api/ee/src/routers/evaluation_router.py b/api/ee/src/routers/evaluation_router.py
new file mode 100644
index 0000000000..2cf6dc1da0
--- /dev/null
+++ b/api/ee/src/routers/evaluation_router.py
@@ -0,0 +1,519 @@
+from typing import Any, List
+import random
+
+from fastapi.responses import JSONResponse
+from fastapi import HTTPException, Request, status, Response, Query
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.caching import get_cache, set_cache
+
+from ee.src.services import converters
+from ee.src.services import evaluation_service
+
+from ee.src.tasks.evaluations.legacy import (
+ setup_evaluation,
+ annotate,
+)
+from oss.src.utils.common import APIRouter, is_ee
+from oss.src.models.api.evaluation_model import (
+ Evaluation,
+ EvaluationScenario,
+ NewEvaluation,
+ DeleteEvaluation,
+)
+from ee.src.services import db_manager_ee
+from oss.src.services import app_manager, db_manager
+
+if is_ee():
+ from ee.src.models.shared_models import Permission
+ from ee.src.utils.permissions import check_action_access
+ from ee.src.utils.entitlements import (
+ check_entitlements,
+ Tracker,
+ Counter,
+ NOT_ENTITLED_RESPONSE,
+ )
+
+from oss.src.routers.testset_router import _validate_testset_limits
+
+
+from oss.src.apis.fastapi.evaluations.models import EvaluationRunsResponse
+
+
+router = APIRouter()
+
+
+log = get_module_logger(__name__)
+
+
+@router.get(
+ "/by_resource/",
+ response_model=List[str],
+)
+async def fetch_evaluation_ids(
+ resource_type: str,
+ request: Request,
+ resource_ids: List[str] = Query(None),
+):
+ """Fetches evaluation ids for a given resource type and id.
+
+ Arguments:
+ app_id (str): The ID of the app for which to fetch evaluations.
+ resource_type (str): The type of resource for which to fetch evaluations.
+ resource_ids List[ObjectId]: The IDs of resource for which to fetch evaluations.
+
+ Raises:
+ HTTPException: If the resource_type is invalid or access is denied.
+
+ Returns:
+ List[str]: A list of evaluation ids.
+ """
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+ evaluations = await db_manager_ee.fetch_evaluations_by_resource(
+ resource_type,
+ request.state.project_id,
+ resource_ids,
+ )
+ return list(map(lambda x: str(x.id), evaluations))
+
+
+@router.get(
+ "/{evaluation_id}/status/",
+ operation_id="fetch_evaluation_status",
+)
+async def fetch_evaluation_status(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches the status of the evaluation.
+
+ Args:
+ evaluation_id (str): the evaluation id
+ request (Request): the request object
+
+ Returns:
+ (str): the evaluation status
+ """
+
+ cache_key = {
+ "evaluation_id": evaluation_id,
+ }
+
+ evaluation_status = await get_cache(
+ project_id=request.state.project_id,
+ namespace="fetch_evaluation_status",
+ key=cache_key,
+ retry=False,
+ )
+
+ if evaluation_status is not None:
+ return {"status": evaluation_status}
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=request.state.project_id,
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ evaluation_status = await db_manager_ee.fetch_evaluation_status_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=evaluation_id,
+ )
+
+ await set_cache(
+ project_id=request.state.project_id,
+ namespace="fetch_evaluation_status",
+ key=cache_key,
+ value=evaluation_status,
+ ttl=15, # 15 seconds
+ )
+
+ return {"status": evaluation_status}
+
+
+@router.get(
+ "/{evaluation_id}/results/",
+ operation_id="fetch_legacy_evaluation_results",
+)
+async def fetch_evaluation_results(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches the results of the evaluation
+
+ Args:
+ evaluation_id (str): the evaluation id
+ request (Request): the request object
+
+ Returns:
+ _type_: _description_
+ """
+
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=evaluation_id,
+ )
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ results = converters.aggregated_result_of_evaluation_to_pydantic(
+ evaluation.aggregated_results # type: ignore
+ )
+ return {"results": results, "evaluation_id": evaluation_id}
+
+
+@router.get(
+ "/{evaluation_id}/evaluation_scenarios/",
+ response_model=List[EvaluationScenario],
+ operation_id="fetch_legacy_evaluation_scenarios",
+)
+async def fetch_evaluation_scenarios(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches evaluation scenarios for a given evaluation ID.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluation for which to fetch scenarios.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+ """
+
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=evaluation_id,
+ )
+ if not evaluation:
+ raise HTTPException(
+ status_code=404, detail=f"Evaluation with id {evaluation_id} not found"
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ eval_scenarios = await evaluation_service.fetch_evaluation_scenarios_for_evaluation(
+ evaluation_id=str(evaluation.id), project_id=str(evaluation.project_id)
+ )
+ return eval_scenarios
+
+
+@router.get(
+ "/",
+ response_model=List[Evaluation],
+ operation_id="fetch_legacy_evaluations",
+)
+async def fetch_list_evaluations(
+ app_id: str,
+ request: Request,
+):
+ """Fetches a list of evaluations, optionally filtered by an app ID.
+
+ Args:
+ app_id (Optional[str]): An optional app ID to filter the evaluations.
+
+ Returns:
+ List[Evaluation]: A list of evaluations.
+ """
+
+ app = await db_manager.fetch_app_by_id(app_id)
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(app.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ return await evaluation_service.fetch_list_evaluations(app, str(app.project_id))
+
+
+@router.get(
+ "/{evaluation_id}/",
+ response_model=Evaluation,
+ operation_id="fetch_legacy_evaluation",
+)
+async def fetch_evaluation(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches a single evaluation based on its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ Evaluation: The fetched evaluation.
+ """
+
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=evaluation_id,
+ )
+ if not evaluation:
+ raise HTTPException(
+ status_code=404, detail=f"Evaluation with id {evaluation_id} not found"
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ return await converters.evaluation_db_to_pydantic(evaluation)
+
+
+@router.delete(
+ "/",
+ response_model=List[str],
+ operation_id="delete_legacy_evaluations",
+)
+async def delete_evaluations(
+ payload: DeleteEvaluation,
+ request: Request,
+):
+ """
+ Delete specific comparison tables based on their unique IDs.
+
+ Args:
+ delete_evaluations (List[str]): The unique identifiers of the comparison tables to delete.
+
+ Returns:
+ A list of the deleted comparison tables' IDs.
+ """
+
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=payload.evaluations_ids[0],
+ )
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.DELETE_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ # Update last_modified_by app information
+ await app_manager.update_last_modified_by(
+ user_uid=request.state.user_id,
+ object_id=random.choice(payload.evaluations_ids),
+ object_type="evaluation",
+ project_id=str(evaluation.project_id),
+ )
+
+ await evaluation_service.delete_evaluations(payload.evaluations_ids)
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.get(
+ "/evaluation_scenarios/comparison-results/",
+ response_model=Any,
+ operation_id="fetch_legacy_evaluation_scenarios_comparison_results",
+)
+async def fetch_evaluation_scenarios_comparison_results(
+ evaluations_ids: str,
+ request: Request,
+):
+ """Fetches evaluation scenarios for a given evaluation ID.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluation for which to fetch scenarios.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+ """
+
+ evaluations_ids_list = evaluations_ids.split(",")
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=request.state.project_id,
+ evaluation_id=evaluations_ids_list[0],
+ )
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your organization admin."
+ log.error(error_msg)
+ return JSONResponse(
+ {"detail": error_msg},
+ status_code=403,
+ )
+
+ eval_scenarios = await evaluation_service.compare_evaluations_scenarios(
+ evaluations_ids_list, str(evaluation.project_id)
+ )
+
+ return eval_scenarios
+
+
+@router.post(
+ "/preview/start",
+ response_model=EvaluationRunsResponse,
+ operation_id="start_evaluation",
+)
+async def start_evaluation(
+ request: Request,
+ payload: NewEvaluation,
+) -> EvaluationRunsResponse:
+ try:
+ if is_ee():
+ # Permissions Check ------------------------------------------------
+ check = await check_action_access(
+ project_id=request.state.project_id,
+ user_uid=request.state.user_id,
+ permission=Permission.CREATE_EVALUATION,
+ )
+ if not check:
+ raise HTTPException(
+ status_code=403,
+ detail="You do not have permission to perform this action. Please contact your organization admin.",
+ )
+ # ------------------------------------------------------------------
+
+ # Entitlements Check -----------------------------------------------
+ check, _, _ = await check_entitlements(
+ organization_id=request.state.organization_id,
+ key=Counter.EVALUATIONS,
+ delta=1,
+ )
+
+ if not check:
+ return NOT_ENTITLED_RESPONSE(Tracker.COUNTERS)
+ # ------------------------------------------------------------------
+
+ # Input Validation -----------------------------------------------------
+ nof_runs = len(payload.revisions_ids)
+
+ if nof_runs == 0:
+ raise HTTPException(
+ status_code=400,
+ detail="No revisions provided for evaluation. Please provide at least one revision.",
+ )
+ # ----------------------------------------------------------------------
+
+ # Evaluation Run Execution ---------------------------------------------
+ runs = []
+
+ for i in range(nof_runs):
+ run = await setup_evaluation(
+ project_id=request.state.project_id,
+ user_id=request.state.user_id,
+ #
+ name=payload.name,
+ #
+ testset_id=payload.testset_id,
+ #
+ revision_id=payload.revisions_ids[i],
+ #
+ autoeval_ids=payload.evaluators_configs,
+ )
+
+ if not run:
+ continue
+
+ runs.append(run)
+
+ annotate.delay(
+ project_id=request.state.project_id,
+ user_id=request.state.user_id,
+ #
+ run_id=run.id,
+ #
+ testset_id=payload.testset_id,
+ #
+ revision_id=payload.revisions_ids[i],
+ #
+ autoeval_ids=payload.evaluators_configs,
+ #
+ run_config=payload.rate_limit.model_dump(mode="json"),
+ )
+ # ----------------------------------------------------------------------
+
+ runs_response = EvaluationRunsResponse(
+ count=len(runs),
+ runs=runs,
+ )
+
+ return runs_response
+
+ except KeyError as e:
+ log.error(e, exc_info=True)
+
+ raise HTTPException(
+ status_code=400,
+ detail="Columns in the test set should match the names of the inputs in the variant",
+ ) from e
diff --git a/api/ee/src/routers/human_evaluation_router.py b/api/ee/src/routers/human_evaluation_router.py
new file mode 100644
index 0000000000..3b2904062c
--- /dev/null
+++ b/api/ee/src/routers/human_evaluation_router.py
@@ -0,0 +1,460 @@
+from typing import List, Dict
+from fastapi import HTTPException, Body, Request, status, Response
+
+from oss.src.utils.logging import get_module_logger
+from ee.src.services import converters
+from oss.src.services import db_manager
+from ee.src.services import db_manager_ee
+from ee.src.services import results_service
+from ee.src.services import evaluation_service
+from oss.src.utils.common import APIRouter, is_ee
+from oss.src.models.api.evaluation_model import (
+ DeleteEvaluation,
+ EvaluationScenarioScoreUpdate,
+ HumanEvaluation,
+ HumanEvaluationScenario,
+ HumanEvaluationScenarioUpdate,
+ EvaluationType,
+ HumanEvaluationUpdate,
+ NewHumanEvaluation,
+ SimpleEvaluationOutput,
+)
+from ee.src.services.evaluation_service import (
+ update_human_evaluation_scenario,
+ update_human_evaluation_service,
+)
+
+if is_ee():
+ from ee.src.models.shared_models import (
+ Permission,
+ ) # noqa pylint: disable-all
+ from ee.src.utils.permissions import (
+ check_action_access,
+ ) # noqa pylint: disable-all
+
+
+router = APIRouter()
+
+log = get_module_logger(__name__)
+
+
+@router.post(
+ "/", response_model=SimpleEvaluationOutput, operation_id="create_human_evaluation"
+)
+async def create_human_evaluation(
+ payload: NewHumanEvaluation,
+ request: Request,
+):
+ """Creates a new comparison table document
+ Raises:
+ HTTPException: _description_
+ Returns:
+ _description_
+ """
+
+ try:
+ app = await db_manager.fetch_app_by_id(app_id=payload.app_id)
+ if app is None:
+ raise HTTPException(status_code=404, detail="App not found")
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(app.project_id),
+ permission=Permission.CREATE_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ new_human_evaluation_db = await evaluation_service.create_new_human_evaluation(
+ payload
+ )
+ return await converters.human_evaluation_db_to_simple_evaluation_output(
+ new_human_evaluation_db
+ )
+ except KeyError:
+ raise HTTPException(
+ status_code=400,
+ detail="columns in the test set should match the names of the inputs in the variant",
+ )
+
+
+@router.get("/", response_model=List[HumanEvaluation])
+async def fetch_list_human_evaluations(
+ app_id: str,
+ request: Request,
+):
+ """Fetches a list of evaluations, optionally filtered by an app ID.
+
+ Args:
+ app_id (Optional[str]): An optional app ID to filter the evaluations.
+
+ Returns:
+ List[HumanEvaluation]: A list of evaluations.
+ """
+
+ app = await db_manager.fetch_app_by_id(app_id=app_id)
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(app.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ return await evaluation_service.fetch_list_human_evaluations(
+ app_id, str(app.project_id)
+ )
+
+
+@router.get("/{evaluation_id}/", response_model=HumanEvaluation)
+async def fetch_human_evaluation(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches a single evaluation based on its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ HumanEvaluation: The fetched evaluation.
+ """
+
+ human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id)
+ if not human_evaluation:
+ raise HTTPException(status_code=404, detail="Evaluation not found")
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(human_evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ return await evaluation_service.fetch_human_evaluation(human_evaluation)
+
+
+@router.get(
+ "/{evaluation_id}/evaluation_scenarios/",
+ response_model=List[HumanEvaluationScenario],
+ operation_id="fetch_human_evaluation_scenarios",
+)
+async def fetch_human_evaluation_scenarios(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetches evaluation scenarios for a given evaluation ID.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluation for which to fetch scenarios.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+ """
+
+ human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id)
+ if human_evaluation is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation with id {evaluation_id} not found",
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(human_evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ eval_scenarios = (
+ await evaluation_service.fetch_human_evaluation_scenarios_for_evaluation(
+ human_evaluation
+ )
+ )
+
+ return eval_scenarios
+
+
+@router.put("/{evaluation_id}/", operation_id="update_human_evaluation")
+async def update_human_evaluation(
+ request: Request,
+ evaluation_id: str,
+ update_data: HumanEvaluationUpdate = Body(...),
+):
+ """Updates an evaluation's status.
+
+ Raises:
+ HTTPException: If the columns in the test set do not match with the inputs in the variant.
+
+ Returns:
+ None: A 204 No Content status code, indicating that the update was successful.
+ """
+
+ try:
+ human_evaluation = await db_manager_ee.fetch_human_evaluation_by_id(
+ evaluation_id
+ )
+ if not human_evaluation:
+ raise HTTPException(status_code=404, detail="Evaluation not found")
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(human_evaluation.project_id),
+ permission=Permission.EDIT_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ await update_human_evaluation_service(human_evaluation, update_data)
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+ except KeyError:
+ raise HTTPException(
+ status_code=400,
+ detail="columns in the test set should match the names of the inputs in the variant",
+ )
+
+
+@router.put(
+ "/{evaluation_id}/evaluation_scenario/{evaluation_scenario_id}/{evaluation_type}/"
+)
+async def update_evaluation_scenario_router(
+ evaluation_id: str,
+ evaluation_scenario_id: str,
+ evaluation_type: EvaluationType,
+ payload: HumanEvaluationScenarioUpdate,
+ request: Request,
+):
+ """Updates an evaluation scenario's vote or score based on its type.
+
+ Raises:
+ HTTPException: If update fails or unauthorized.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+ """
+
+ evaluation_scenario_db = await db_manager_ee.fetch_human_evaluation_scenario_by_id(
+ evaluation_scenario_id
+ )
+ if evaluation_scenario_db is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation scenario with id {evaluation_scenario_id} not found",
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation_scenario_db.project_id),
+ permission=Permission.EDIT_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ await update_human_evaluation_scenario(
+ evaluation_scenario_db,
+ payload,
+ evaluation_type,
+ )
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.get("/evaluation_scenario/{evaluation_scenario_id}/score/")
+async def get_evaluation_scenario_score_router(
+ evaluation_scenario_id: str,
+ request: Request,
+) -> Dict[str, str]:
+ """
+ Fetch the score of a specific evaluation scenario.
+
+ Args:
+ evaluation_scenario_id: The ID of the evaluation scenario to fetch.
+
+ Returns:
+ Dictionary containing the scenario ID and its score.
+ """
+
+ evaluation_scenario = db_manager_ee.fetch_evaluation_scenario_by_id(
+ evaluation_scenario_id
+ )
+ if evaluation_scenario is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation scenario with id {evaluation_scenario_id} not found",
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation_scenario.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ return {
+ "scenario_id": str(evaluation_scenario.id),
+ "score": evaluation_scenario.score,
+ }
+
+
+@router.put("/evaluation_scenario/{evaluation_scenario_id}/score/")
+async def update_evaluation_scenario_score_router(
+ evaluation_scenario_id: str,
+ payload: EvaluationScenarioScoreUpdate,
+ request: Request,
+):
+ """Updates the score of an evaluation scenario.
+
+ Raises:
+ HTTPException: Server error if the evaluation update fails.
+
+ Returns:
+ None: 204 No Content status code upon successful update.
+ """
+
+ evaluation_scenario = await db_manager_ee.fetch_evaluation_scenario_by_id(
+ evaluation_scenario_id
+ )
+ if evaluation_scenario is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation scenario with id {evaluation_scenario_id} not found",
+ )
+
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation_scenario.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ await db_manager.update_human_evaluation_scenario(
+ evaluation_scenario_id=str(evaluation_scenario.id), # type: ignore
+ values_to_update=payload.model_dump(),
+ )
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.get("/{evaluation_id}/results/", operation_id="fetch_results")
+async def fetch_results(
+ evaluation_id: str,
+ request: Request,
+):
+ """Fetch all the results for one the comparison table
+
+ Arguments:
+ evaluation_id -- _description_
+
+ Returns:
+ _description_
+ """
+
+ evaluation = await db_manager_ee.fetch_human_evaluation_by_id(evaluation_id)
+ if evaluation is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Evaluation with id {evaluation_id} not found",
+ )
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.VIEW_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ if evaluation.evaluation_type == EvaluationType.human_a_b_testing:
+ results = await results_service.fetch_results_for_evaluation(evaluation)
+ return {"votes_data": results}
+
+ elif evaluation.evaluation_type == EvaluationType.single_model_test:
+ results = await results_service.fetch_results_for_single_model_test(
+ evaluation_id
+ )
+ return {"results_data": results}
+
+
+@router.delete("/", response_model=List[str])
+async def delete_evaluations(
+ payload: DeleteEvaluation,
+ request: Request,
+):
+ """
+ Delete specific comparison tables based on their unique IDs.
+
+ Args:
+ payload (List[str]): The unique identifiers of the comparison tables to delete.
+
+ Returns:
+ A list of the deleted comparison tables' IDs.
+ """
+
+ evaluation = await db_manager_ee.fetch_human_evaluation_by_id(
+ payload.evaluations_ids[0]
+ )
+ if is_ee():
+ has_permission = await check_action_access(
+ user_uid=request.state.user_id,
+ project_id=str(evaluation.project_id),
+ permission=Permission.DELETE_EVALUATION,
+ )
+ if not has_permission:
+ error_msg = f"You do not have permission to perform this action. Please contact your Organization Admin."
+ raise HTTPException(
+ detail=error_msg,
+ status_code=403,
+ )
+
+ await evaluation_service.delete_human_evaluations(payload.evaluations_ids)
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
diff --git a/api/ee/src/routers/organization_router.py b/api/ee/src/routers/organization_router.py
new file mode 100644
index 0000000000..7b265a692a
--- /dev/null
+++ b/api/ee/src/routers/organization_router.py
@@ -0,0 +1,239 @@
+from fastapi.responses import JSONResponse
+from fastapi import HTTPException, Request
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.services import db_manager
+from ee.src.services import db_manager_ee
+from oss.src.utils.common import APIRouter
+from ee.src.services import workspace_manager
+from ee.src.models.db_models import Permission
+from ee.src.services.selectors import (
+ get_user_own_org,
+ get_user_org_and_workspace_id,
+)
+from ee.src.models.api.workspace_models import (
+ CreateWorkspace,
+ UpdateWorkspace,
+ WorkspaceResponse,
+)
+from ee.src.utils.permissions import (
+ check_user_org_access,
+ check_rbac_permission,
+)
+from ee.src.models.api.organization_models import (
+ CreateOrganization,
+ OrganizationUpdate,
+ OrganizationOutput,
+)
+from ee.src.services.organization_service import (
+ update_an_organization,
+ get_organization_details,
+)
+
+
+router = APIRouter()
+
+log = get_module_logger(__name__)
+
+
+@router.get("/own/", response_model=OrganizationOutput, operation_id="get_own_org")
+async def get_user_organization(
+ request: Request,
+):
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ org_db = await get_user_own_org(user_uid=user_org_workspace_data["uid"])
+ if org_db is None:
+ raise HTTPException(404, detail="User does not have an organization")
+
+ return OrganizationOutput(id=str(org_db.id), name=org_db.name)
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/{org_id}/", operation_id="fetch_ee_organization_details")
+async def fetch_organization_details(
+ org_id: str,
+ request: Request,
+):
+ """Get an organization's details.
+
+ Raises:
+ HTTPException: _description_
+ Permission Denied
+
+ Returns:
+ OrganizationDB Instance
+ """
+
+ try:
+ workspace_id = await db_manager_ee.get_default_workspace_id_from_organization(
+ organization_id=org_id
+ )
+
+ project_id = await db_manager.get_default_project_id_from_workspace(
+ workspace_id=workspace_id
+ )
+
+ project_memberships = await db_manager_ee.fetch_project_memberships_by_user_id(
+ user_id=str(request.state.user_id)
+ )
+
+ membership = None
+ for project_membership in project_memberships:
+ if str(project_membership.project_id) == project_id:
+ membership = project_membership
+ break
+
+ if not membership:
+ return JSONResponse(
+ status_code=403,
+ content={"detail": "You do not have access to this organization"},
+ )
+
+ user_org_workspace_data = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(user_org_workspace_data, org_id)
+ if not has_permission:
+ return JSONResponse(
+ status_code=403,
+ content={"detail": "You do not have access to this organization"},
+ )
+
+ organization = await get_organization_details(org_id)
+
+ if membership.role == "viewer" or membership.is_demo:
+ if "default_workspace" in organization:
+ organization["default_workspace"].members = []
+
+ return organization
+
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.put("/{org_id}/", operation_id="update_organization")
+async def update_organization(
+ org_id: str,
+ payload: OrganizationUpdate,
+ request: Request,
+):
+ if not payload.name and not payload.description:
+ return JSONResponse(
+ {"detail": "Please provide a name or description to update"},
+ status_code=400,
+ )
+
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, org_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have permission to perform this action"},
+ status_code=403,
+ )
+
+ organization = await update_an_organization(org_id, payload)
+
+ return organization
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.post(
+ "/{org_id}/workspaces/",
+ operation_id="create_workspace",
+ response_model=WorkspaceResponse,
+)
+async def create_workspace(
+ org_id: str,
+ payload: CreateWorkspace,
+ request: Request,
+) -> WorkspaceResponse:
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ has_permission = await check_user_org_access(
+ user_org_workspace_data, org_id, check_owner=True
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have permission to perform this action"},
+ status_code=403,
+ )
+
+ if not payload.name:
+ return JSONResponse(
+ {"detail": "Please provide a name to create a workspace"},
+ status_code=400,
+ )
+ workspace = await workspace_manager.create_new_workspace(
+ payload, org_id, user_org_workspace_data["uid"]
+ )
+ return workspace
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.put(
+ "/{org_id}/workspaces/{workspace_id}/",
+ operation_id="update_workspace",
+ response_model=WorkspaceResponse,
+)
+async def update_workspace(
+ org_id: str,
+ workspace_id: str,
+ payload: UpdateWorkspace,
+ request: Request,
+) -> WorkspaceResponse:
+ try:
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ project = await db_manager_ee.get_project_by_workspace(workspace_id)
+ has_permission = await check_rbac_permission(
+ user_org_workspace_data=user_org_workspace_data,
+ project_id=str(project.id),
+ permission=Permission.EDIT_WORKSPACE,
+ )
+ if not has_permission:
+ return JSONResponse(
+ {"detail": "You do not have permission to update this workspace"},
+ status_code=403,
+ )
+
+ if not payload.name and not payload.description:
+ return JSONResponse(
+ {"detail": "Please provide a name or description to update"},
+ status_code=400,
+ )
+ workspace = await workspace_manager.update_workspace(payload, workspace_id)
+ return workspace
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
diff --git a/api/ee/src/routers/workspace_router.py b/api/ee/src/routers/workspace_router.py
new file mode 100644
index 0000000000..40e0e17885
--- /dev/null
+++ b/api/ee/src/routers/workspace_router.py
@@ -0,0 +1,173 @@
+from typing import List
+
+from fastapi import HTTPException, Request
+from fastapi.responses import JSONResponse
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.common import APIRouter
+from ee.src.utils.permissions import check_rbac_permission
+from ee.src.services import workspace_manager, db_manager_ee
+from ee.src.services.selectors import get_user_org_and_workspace_id
+
+from ee.src.models.api.workspace_models import (
+ UserRole,
+ Permission,
+ WorkspaceRole,
+)
+
+router = APIRouter()
+
+log = get_module_logger(__name__)
+
+
+@router.get(
+ "/permissions/",
+ operation_id="get_all_workspace_permissions",
+ response_model=List[Permission],
+)
+async def get_all_workspace_permissions() -> List[Permission]:
+ """
+ Get all workspace permissions.
+
+ Returns a list of all available workspace permissions.
+
+ Returns:
+ List[Permission]: A list of Permission objects representing the available workspace permissions.
+
+ Raises:
+ HTTPException: If there is an error retrieving the workspace permissions.
+
+ """
+ try:
+ workspace_permissions = await workspace_manager.get_all_workspace_permissions()
+ return sorted(workspace_permissions)
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.post("/{workspace_id}/roles/", operation_id="assign_role_to_user")
+async def assign_role_to_user(
+ payload: UserRole,
+ workspace_id: str,
+ request: Request,
+):
+ """
+ Assigns a role to a user in a workspace.
+
+ Args:
+ payload (UserRole): The payload containing the organization id, user email, and role to assign.
+ workspace_id (str): The ID of the workspace.
+ request (Request): The FastAPI request object.
+
+ Returns:
+ bool: True if the role was successfully assigned, False otherwise.
+
+ Raises:
+ HTTPException: If the user does not have permission to perform this action.
+ HTTPException: If there is an error assigning the role to the user.
+ """
+
+ try:
+ user_org_workspace_data = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ project = await db_manager_ee.get_project_by_workspace(workspace_id)
+ has_permission = await check_rbac_permission(
+ user_org_workspace_data=user_org_workspace_data,
+ project_id=str(project.id),
+ role=WorkspaceRole.WORKSPACE_ADMIN,
+ )
+ if not has_permission:
+ return JSONResponse(
+ status_code=403,
+ content={
+ "detail": "You do not have permission to perform this action. Please contact your Organization Owner"
+ },
+ )
+
+ if not WorkspaceRole.is_valid_role(payload.role): # type: ignore
+ return JSONResponse(
+ status_code=400, content={"detail": "Workspace role is invalid."}
+ )
+
+ create_user_role = await db_manager_ee.update_user_roles(
+ workspace_id,
+ payload,
+ )
+ return create_user_role
+ except HTTPException as ex:
+ raise ex
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
+
+
+@router.delete("/{workspace_id}/roles/", operation_id="unassign_role_from_user")
+async def unassign_role_from_user(
+ email: str,
+ org_id: str,
+ role: str,
+ workspace_id: str,
+ request: Request,
+):
+ """
+ Delete a role assignment from a user in a workspace.
+
+ Args:
+ workspace_id (str): The ID of the workspace.
+ email (str): The email of the user to remove the role from.
+ org_id (str): The ID of the organization.
+ role (str): The role to remove from the user.
+ request (Request): The FastAPI request object.
+
+ Returns:
+ bool: True if the role assignment was successfully deleted.
+
+ Raises:
+ HTTPException: If there is an error in the request or the user does not have permission to perform the action.
+ HTTPException: If there is an error in updating the user's roles.
+
+ """
+ try:
+ user_org_workspace_data = await get_user_org_and_workspace_id(
+ request.state.user_id
+ )
+ project = await db_manager_ee.get_project_by_workspace(workspace_id)
+ has_permission = await check_rbac_permission(
+ user_org_workspace_data=user_org_workspace_data,
+ project_id=str(project.id),
+ role=WorkspaceRole.WORKSPACE_ADMIN,
+ )
+ if not has_permission:
+ return JSONResponse(
+ status_code=403,
+ content={
+ "detail": "You do not have permission to perform this action. Please contact your Organization Owner"
+ },
+ )
+
+ payload = UserRole(
+ email=email,
+ organization_id=org_id,
+ role=role,
+ )
+
+ delete_user_role = await db_manager_ee.update_user_roles(
+ workspace_id,
+ payload,
+ delete=True,
+ )
+
+ return delete_user_role
+ except HTTPException as ex:
+ raise ex
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=str(e),
+ )
diff --git a/api/ee/src/services/admin_manager.py b/api/ee/src/services/admin_manager.py
new file mode 100644
index 0000000000..57af9d8ef6
--- /dev/null
+++ b/api/ee/src/services/admin_manager.py
@@ -0,0 +1,404 @@
+from typing import Optional, Literal, Any
+from uuid import UUID
+
+from pydantic import BaseModel
+import uuid_utils.compat as uuid
+from sqlalchemy.future import select
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.common import is_ee
+
+from oss.src.dbs.postgres.shared.engine import engine
+
+from oss.src.models.db_models import UserDB
+from oss.src.services.api_key_service import create_api_key
+
+from ee.src.models.db_models import (
+ WorkspaceDB,
+ ProjectDB,
+ OrganizationDB,
+ ProjectMemberDB as ProjectMembershipDB,
+ WorkspaceMemberDB as WorkspaceMembershipDB,
+ OrganizationMemberDB as OrganizationMembershipDB,
+)
+
+log = get_module_logger(__name__)
+
+
+class Reference(BaseModel):
+ id: Optional[UUID] = None
+ slug: Optional[str] = None
+
+ class Config:
+ json_encoders = {UUID: str}
+
+ def encode(self, data: Any) -> Any:
+ if isinstance(data, dict):
+ return {k: self.encode(v) for k, v in data.items()}
+ elif isinstance(data, list):
+ return [self.encode(item) for item in data]
+ for type_, encoder in self.Config.json_encoders.items():
+ if isinstance(data, type_):
+ return encoder(data)
+ return data
+
+ def model_dump(self, *args, **kwargs) -> dict:
+ kwargs.setdefault("exclude_none", True)
+
+ return self.encode(super().model_dump(*args, **kwargs))
+
+
+class UserRequest(BaseModel):
+ name: str
+ email: str
+
+
+Tier = str
+
+
+class OrganizationRequest(BaseModel):
+ name: str
+ description: str
+ is_paying: bool
+
+
+class WorkspaceRequest(BaseModel):
+ name: str
+ description: str
+ is_default: bool
+ #
+ organization_ref: Reference
+
+
+class ProjectRequest(BaseModel):
+ name: str
+ description: str
+ is_default: bool
+ #
+ workspace_ref: Reference
+ organization_ref: Reference
+
+
+OrganizationRole = Literal[
+ "owner",
+ "viewer",
+ "editor",
+ "evaluator",
+ "workspace_admin",
+ "deployment_manager",
+] # update list
+
+
+class OrganizationMembershipRequest(BaseModel):
+ role: OrganizationRole
+ is_demo: bool
+ #
+ user_ref: Reference
+ organization_ref: Reference
+
+
+WorkspaceRole = Literal[ # update list
+ "owner",
+ "viewer",
+ "editor",
+ "evaluator",
+ "workspace_admin",
+ "deployment_manager",
+]
+
+
+class WorkspaceMembershipRequest(BaseModel):
+ role: WorkspaceRole
+ is_demo: bool
+ #
+ user_ref: Reference
+ workspace_ref: Reference
+
+
+ProjectRole = Literal[ # update list
+ "owner",
+ "viewer",
+ "editor",
+ "evaluator",
+ "workspace_admin",
+ "deployment_manager",
+]
+
+
+class ProjectMembershipRequest(BaseModel):
+ role: ProjectRole
+ is_demo: bool
+ #
+ user_ref: Reference
+ project_ref: Reference
+
+
+Credentials = str
+
+
+async def check_user(
+ request: UserRequest,
+) -> Optional[UserRequest]:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(UserDB).filter_by(
+ email=request.email,
+ )
+ )
+
+ user_db = result.scalars().first()
+
+ reference = Reference(id=user_db.id) if user_db else None
+
+ return reference
+
+
+async def create_user(
+ request: UserRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ user_db = UserDB(
+ # id=uuid7() # use default
+ #
+ uid=str(uuid.uuid7()),
+ username=request.name, # rename to 'name'
+ email=request.email,
+ )
+
+ session.add(user_db)
+
+ log.info(
+ "[scopes] user created",
+ user_id=user_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=user_db.id)
+
+ return response
+
+
+async def create_organization(
+ request: OrganizationRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ organization_db = OrganizationDB(
+ # id=uuid7() # use default
+ #
+ name=request.name,
+ description=request.description,
+ #
+ owner="", # move 'owner' from here to membership 'role'
+ # type=... # remove 'type'
+ )
+
+ if is_ee():
+ organization_db.is_paying = request.is_paying
+
+ session.add(organization_db)
+
+ log.info(
+ "[scopes] organization created",
+ organization_id=organization_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=organization_db.id)
+
+ return response
+
+
+async def create_workspace(
+ request: WorkspaceRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ workspace_db = WorkspaceDB(
+ # id=uuid7() # use default
+ #
+ name=request.name,
+ description=request.description,
+ type=("default" if request.is_default else None), # rename to 'is_default'
+ #
+ organization_id=request.organization_ref.id,
+ )
+
+ session.add(workspace_db)
+
+ log.info(
+ "[scopes] workspace created",
+ organization_id=workspace_db.organization_id,
+ workspace_id=workspace_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=workspace_db.id)
+
+ return response
+
+
+async def create_project(
+ request: ProjectRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ project_db = ProjectDB(
+ # id=uuid7() # use default
+ #
+ project_name=request.name, # rename to 'name'
+ # description=... # missing 'description'
+ is_default=request.is_default,
+ #
+ workspace_id=request.workspace_ref.id,
+ organization_id=request.organization_ref.id,
+ )
+
+ session.add(project_db)
+
+ log.info(
+ "[scopes] project created",
+ organization_id=project_db.organization_id,
+ workspace_id=project_db.workspace_id,
+ project_id=project_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=project_db.id)
+
+ return response
+
+
+async def create_organization_membership(
+ request: OrganizationMembershipRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ membership_db = OrganizationMembershipDB(
+ # id=uuid7() # use default
+ #
+ # role=request.role, # move 'owner' from organization to here as 'role'
+ # is_demo=request.is_demo, # add 'is_demo'
+ #
+ user_id=request.user_ref.id,
+ organization_id=request.organization_ref.id,
+ )
+
+ session.add(membership_db)
+
+ log.info(
+ "[scopes] organization membership created",
+ organization_id=request.organization_ref.id,
+ user_id=request.user_ref.id,
+ membership_id=membership_db.id,
+ )
+
+ await session.commit()
+
+ if request.role == "owner":
+ result = await session.execute(
+ select(OrganizationDB).filter_by(
+ id=request.organization_ref.id,
+ )
+ )
+
+ organization_db = result.scalars().first()
+
+ organization_db.owner = str(request.user_ref.id)
+
+ await session.commit()
+
+ response = Reference(id=membership_db.id)
+
+ return response
+
+
+async def create_workspace_membership(
+ request: WorkspaceMembershipRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ workspace = await session.execute(
+ select(WorkspaceDB).filter_by(
+ id=request.workspace_ref.id,
+ )
+ )
+ workspace_db = workspace.scalars().first()
+
+ membership_db = WorkspaceMembershipDB(
+ # id=uuid7() # use default
+ #
+ role=request.role,
+ # is_demo=request.is_demo, # add 'is_demo'
+ #
+ user_id=request.user_ref.id,
+ workspace_id=request.workspace_ref.id,
+ )
+
+ session.add(membership_db)
+
+ log.info(
+ "[scopes] workspace membership created",
+ organization_id=workspace_db.organization_id,
+ workspace_id=request.workspace_ref.id,
+ user_id=request.user_ref.id,
+ membership_id=membership_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=membership_db.id)
+
+ return response
+
+
+async def create_project_membership(
+ request: ProjectMembershipRequest,
+) -> Reference:
+ async with engine.core_session() as session:
+ project = await session.execute(
+ select(ProjectDB).filter_by(
+ id=request.project_ref.id,
+ )
+ )
+ project_db = project.scalars().first()
+
+ membership_db = ProjectMembershipDB(
+ # id=uuid7() # use default
+ #
+ role=request.role,
+ is_demo=request.is_demo,
+ #
+ user_id=request.user_ref.id,
+ project_id=request.project_ref.id,
+ )
+
+ session.add(membership_db)
+
+ log.info(
+ "[scopes] project membership created",
+ organization_id=project_db.organization_id,
+ workspace_id=project_db.workspace_id,
+ project_id=request.project_ref.id,
+ user_id=request.user_ref.id,
+ membership_id=membership_db.id,
+ )
+
+ await session.commit()
+
+ response = Reference(id=membership_db.id)
+
+ return response
+
+
+async def create_credentials(
+ user_id: UUID,
+ project_id: UUID,
+) -> Credentials:
+ apikey_token = await create_api_key(
+ user_id=str(user_id),
+ project_id=str(project_id),
+ )
+
+ credentials = f"ApiKey {apikey_token}"
+
+ return credentials
diff --git a/api/ee/src/services/aggregation_service.py b/api/ee/src/services/aggregation_service.py
new file mode 100644
index 0000000000..55a14e5f8f
--- /dev/null
+++ b/api/ee/src/services/aggregation_service.py
@@ -0,0 +1,135 @@
+import re
+import traceback
+from typing import List, Optional
+
+from oss.src.models.shared_models import InvokationResult, Result, Error
+
+
+def aggregate_ai_critique(results: List[Result]) -> Result:
+ """Aggregates the results for the ai critique evaluation.
+
+ Args:
+ results (List[Result]): list of result objects
+
+ Returns:
+ Result: aggregated result
+ """
+
+ try:
+ numeric_scores = []
+ for result in results:
+ # Extract the first number found in the result value
+ match = re.search(r"\d+", result.value)
+ if match:
+ try:
+ score = int(match.group())
+ numeric_scores.append(score)
+ except ValueError:
+ # Ignore if the extracted value is not an integer
+ continue
+
+ # Calculate the average of numeric scores if any are present
+ average_value = (
+ sum(numeric_scores) / len(numeric_scores) if numeric_scores else None
+ )
+ return Result(
+ type="number",
+ value=average_value,
+ )
+ except Exception as exc:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
+ )
+
+
+def aggregate_binary(results: List[Result]) -> Result:
+ """Aggregates the results for the binary (auto regex) evaluation.
+
+ Args:
+ results (List[Result]): list of result objects
+
+ Returns:
+ Result: aggregated result
+ """
+
+ if all(isinstance(result.value, bool) for result in results):
+ average_value = sum(int(result.value) for result in results) / len(results)
+ else:
+ average_value = None
+ return Result(type="number", value=average_value)
+
+
+def aggregate_float(results: List[Result]) -> Result:
+ """Aggregates the results for evaluations aside from auto regex and ai critique.
+
+ Args:
+ results (List[Result]): list of result objects
+
+ Returns:
+ Result: aggregated result
+ """
+
+ try:
+ average_value = sum(result.value for result in results) / len(results)
+ return Result(type="number", value=average_value)
+ except Exception as exc:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
+ )
+
+
+def aggregate_float_from_llm_app_response(
+ invocation_results: List[InvokationResult], key: Optional[str]
+) -> Result:
+ try:
+ if not key:
+ raise ValueError("Key is required to aggregate InvokationResult objects.")
+
+ values = [
+ getattr(inv_result, key)
+ for inv_result in invocation_results
+ if hasattr(inv_result, key) and getattr(inv_result, key) is not None
+ ]
+
+ if not values:
+ return Result(type=key, value=None)
+
+ average_value = sum(values) / len(values)
+ return Result(type=key, value=average_value)
+ except Exception as exc:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
+ )
+
+
+def sum_float_from_llm_app_response(
+ invocation_results: List[InvokationResult], key: Optional[str]
+) -> Result:
+ try:
+ if not key:
+ raise ValueError("Key is required to aggregate InvokationResult objects.")
+
+ values = [
+ getattr(inv_result, key)
+ for inv_result in invocation_results
+ if hasattr(inv_result, key) and getattr(inv_result, key) is not None
+ ]
+
+ if not values:
+ return Result(type=key, value=None)
+
+ total_value = sum(values)
+
+ return Result(type=key, value=total_value)
+ except Exception as exc:
+ return Result(
+ type="error",
+ value=None,
+ error=Error(message=str(exc), stacktrace=str(traceback.format_exc())),
+ )
diff --git a/api/ee/src/services/commoners.py b/api/ee/src/services/commoners.py
new file mode 100644
index 0000000000..45e5643d78
--- /dev/null
+++ b/api/ee/src/services/commoners.py
@@ -0,0 +1,179 @@
+from os import getenv
+from json import loads
+from typing import List
+from traceback import format_exc
+
+from pydantic import BaseModel
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.services import db_manager
+from oss.src.utils.common import is_ee
+from ee.src.services import workspace_manager
+from ee.src.services.db_manager_ee import (
+ create_organization,
+ add_user_to_organization,
+ add_user_to_workspace,
+ add_user_to_project,
+)
+from ee.src.services.selectors import (
+ user_exists,
+)
+from ee.src.models.api.organization_models import CreateOrganization
+from oss.src.services.user_service import create_new_user
+from ee.src.services.email_helper import (
+ add_contact_to_loops,
+)
+
+log = get_module_logger(__name__)
+
+from ee.src.dbs.postgres.subscriptions.dao import SubscriptionsDAO
+from ee.src.core.subscriptions.service import SubscriptionsService
+from ee.src.dbs.postgres.meters.dao import MetersDAO
+from ee.src.core.meters.service import MetersService
+
+subscription_service = SubscriptionsService(
+ subscriptions_dao=SubscriptionsDAO(),
+ meters_service=MetersService(
+ meters_dao=MetersDAO(),
+ ),
+)
+
+from ee.src.utils.entitlements import check_entitlements, Gauge
+
+DEMOS = "AGENTA_DEMOS"
+DEMO_ROLE = "viewer"
+
+
+class Demo(BaseModel):
+ organization_id: str
+ workspace_id: str
+ project_id: str
+
+
+async def list_all_demos() -> List[Demo]:
+ demos = []
+
+ try:
+ demo_project_ids = loads(getenv(DEMOS) or "[]")
+
+ for project_id in demo_project_ids:
+ project = await db_manager.get_project_by_id(project_id)
+
+ try:
+ demos.append(
+ Demo(
+ organization_id=str(project.organization_id),
+ workspace_id=str(project.workspace_id),
+ project_id=str(project.id),
+ )
+ )
+
+ except: # pylint: disable=bare-except
+ log.error(format_exc())
+
+ except: # pylint: disable=bare-except
+ log.error(format_exc())
+
+ return demos
+
+
+async def add_user_to_demos(user_id: str) -> None:
+ try:
+ demos = await list_all_demos()
+
+ for organization_id in {demo.organization_id for demo in demos}:
+ await add_user_to_organization(
+ organization_id,
+ user_id,
+ # is_demo=True,
+ )
+
+ for workspace_id in {demo.workspace_id for demo in demos}:
+ await add_user_to_workspace(
+ workspace_id,
+ user_id,
+ DEMO_ROLE,
+ # is_demo=True,
+ )
+
+ for project_id in {demo.project_id for demo in demos}:
+ await add_user_to_project(
+ project_id,
+ user_id,
+ DEMO_ROLE,
+ is_demo=True,
+ )
+
+ except Exception as exc:
+ raise exc # TODO: handle exceptions
+
+
+async def create_accounts(payload: dict):
+ """Creates a user account and an associated organization based on the
+ provided payload.
+
+ Arguments:
+ payload (dict): The required payload. It consists of; user_id and user_email
+ """
+
+ user_dict = {
+ **payload,
+ "username": payload["email"].split("@")[0],
+ }
+
+ user = await db_manager.get_user_with_email(email=user_dict["email"])
+ if user is None:
+ log.info("[scopes] Yey! A new user is signing up!")
+
+ # Create user first
+ user = await create_new_user(user_dict)
+
+ log.info("[scopes] User [%s] created", user.id)
+
+ # Prepare payload to create organization
+ create_org_payload = CreateOrganization(
+ name=user_dict["username"],
+ description="My Default Organization",
+ owner=str(user.id),
+ type="default",
+ )
+
+ # Create the user's default organization and workspace
+ organization = await create_organization(
+ payload=create_org_payload,
+ user=user,
+ )
+
+ log.info("[scopes] Organization [%s] created", organization.id)
+
+ # Add the user to demos
+ await add_user_to_demos(str(user.id))
+
+ # Start reverse trial
+ try:
+ await subscription_service.start_reverse_trial(
+ organization_id=str(organization.id),
+ organization_name=organization.name,
+ organization_email=user_dict["email"],
+ )
+
+ except Exception as exc:
+ raise exc # TODO: handle exceptions
+ # await subscription_service.start_free_plan(
+ # organization_id=str(organization.id),
+ # )
+
+ await check_entitlements(
+ organization_id=str(organization.id),
+ key=Gauge.USERS,
+ delta=1,
+ )
+
+ log.info("[scopes] User [%s] authenticated", user.id)
+
+ if is_ee():
+ try:
+ # Adds contact to loops for marketing emails. TODO: Add opt-in checkbox to supertokens
+ add_contact_to_loops(user_dict["email"]) # type: ignore
+ except ConnectionError as ex:
+ log.warn("Error adding contact to loops %s", ex)
diff --git a/api/ee/src/services/converters.py b/api/ee/src/services/converters.py
new file mode 100644
index 0000000000..5b120899fc
--- /dev/null
+++ b/api/ee/src/services/converters.py
@@ -0,0 +1,321 @@
+import uuid
+from typing import List, Dict, Any
+from datetime import datetime, timezone
+
+from oss.src.services import db_manager
+from oss.src.models.api.evaluation_model import (
+ CorrectAnswer,
+ Evaluation,
+ HumanEvaluation,
+ EvaluationScenario,
+ SimpleEvaluationOutput,
+ EvaluationScenarioInput,
+ HumanEvaluationScenario,
+ EvaluationScenarioOutput,
+)
+from ee.src.services import db_manager_ee
+from ee.src.models.api.workspace_models import (
+ WorkspaceRole,
+ WorkspaceResponse,
+)
+from ee.src.models.shared_models import Permission
+from ee.src.models.db_models import (
+ EvaluationDB,
+ HumanEvaluationDB,
+ EvaluationScenarioDB,
+ HumanEvaluationScenarioDB,
+)
+from oss.src.models.db_models import WorkspaceDB
+
+
+async def get_workspace_in_format(
+ workspace: WorkspaceDB,
+) -> WorkspaceResponse:
+ """Converts the workspace object to the WorkspaceResponse model.
+
+ Arguments:
+ workspace (WorkspaceDB): The workspace object
+ project_id (str): The project ID
+
+ Returns:
+ WorkspaceResponse: The workspace object in the WorkspaceResponse model
+ """
+
+ members = []
+
+ project = await db_manager_ee.get_project_by_workspace(
+ workspace_id=str(workspace.id)
+ )
+ project_members = await db_manager_ee.get_project_members(
+ project_id=str(project.id)
+ )
+ invitations = await db_manager_ee.get_project_invitations(
+ project_id=str(project.id), invitation_used=False
+ )
+
+ if len(invitations) > 0:
+ for invitation in invitations:
+ if not invitation.used and str(invitation.project_id) == str(project.id):
+ user = await db_manager.get_user_with_email(invitation.email)
+ member_dict = {
+ "user": {
+ "id": str(user.id) if user else invitation.email,
+ "email": user.email if user else invitation.email,
+ "username": (
+ user.username if user else invitation.email.split("@")[0]
+ ),
+ "status": (
+ "pending"
+ if invitation.expiration_date > datetime.now(timezone.utc)
+ else "expired"
+ ),
+ "created_at": (
+ str(user.created_at)
+ if user
+ else (
+ str(invitation.created_at)
+ if str(invitation.created_at)
+ else None
+ )
+ ),
+ },
+ "roles": [
+ {
+ "role_name": invitation.role,
+ "role_description": WorkspaceRole.get_description(
+ invitation.role
+ ),
+ }
+ ],
+ }
+ members.append(member_dict)
+
+ for project_member in project_members:
+ member_role = project_member.role
+ member_dict = {
+ "user": {
+ "id": str(project_member.user.id),
+ "email": project_member.user.email,
+ "username": project_member.user.username,
+ "status": "member",
+ "created_at": str(project_member.user.created_at),
+ },
+ "roles": (
+ [
+ {
+ "role_name": member_role,
+ "role_description": WorkspaceRole.get_description(member_role),
+ "permissions": Permission.default_permissions(member_role),
+ }
+ ]
+ if member_role
+ else []
+ ),
+ }
+ members.append(member_dict)
+
+ workspace_response = WorkspaceResponse(
+ id=str(workspace.id),
+ name=workspace.name,
+ description=workspace.description,
+ type=workspace.type,
+ members=members,
+ organization=str(workspace.organization_id),
+ created_at=str(workspace.created_at),
+ updated_at=str(workspace.updated_at),
+ )
+ return workspace_response
+
+
+async def get_all_workspace_permissions() -> List[Permission]:
+ """
+ Retrieve all workspace permissions.
+
+ Returns:
+ List[Permission]: A list of all workspace permissions in the DB.
+ """
+ workspace_permissions = list(Permission)
+ return workspace_permissions
+
+
+def get_all_workspace_permissions_by_role(role_name: str) -> Dict[str, List[Any]]:
+ """
+ Retrieve all workspace permissions.
+
+ Returns:
+ List[Permission]: A list of all workspace permissions in the DB.
+ """
+ workspace_permissions = Permission.default_permissions(
+ getattr(WorkspaceRole, role_name.upper())
+ )
+ return workspace_permissions
+
+
+async def human_evaluation_db_to_simple_evaluation_output(
+ human_evaluation_db: HumanEvaluationDB,
+) -> SimpleEvaluationOutput:
+ evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants(
+ human_evaluation_id=str(human_evaluation_db.id)
+ )
+ return SimpleEvaluationOutput(
+ id=str(human_evaluation_db.id),
+ app_id=str(human_evaluation_db.app_id),
+ project_id=str(human_evaluation_db.project_id),
+ status=human_evaluation_db.status, # type: ignore
+ evaluation_type=human_evaluation_db.evaluation_type, # type: ignore
+ variant_ids=[
+ str(evaluation_variant.variant_id)
+ for evaluation_variant in evaluation_variants
+ ],
+ )
+
+
+async def evaluation_db_to_pydantic(
+ evaluation_db: EvaluationDB,
+) -> Evaluation:
+ variant_name = (
+ evaluation_db.variant.variant_name
+ if evaluation_db.variant.variant_name
+ else str(evaluation_db.variant_id)
+ )
+ aggregated_results = aggregated_result_of_evaluation_to_pydantic(
+ evaluation_db.aggregated_results
+ )
+
+ return Evaluation(
+ id=str(evaluation_db.id),
+ app_id=str(evaluation_db.app_id),
+ project_id=str(evaluation_db.project_id),
+ status=evaluation_db.status,
+ variant_ids=[str(evaluation_db.variant_id)],
+ variant_revision_ids=[str(evaluation_db.variant_revision_id)],
+ revisions=[str(evaluation_db.variant_revision.revision)],
+ variant_names=[variant_name],
+ testset_id=str(evaluation_db.testset_id),
+ testset_name=evaluation_db.testset.name,
+ aggregated_results=aggregated_results,
+ created_at=str(evaluation_db.created_at),
+ updated_at=str(evaluation_db.updated_at),
+ average_cost=evaluation_db.average_cost,
+ total_cost=evaluation_db.total_cost,
+ average_latency=evaluation_db.average_latency,
+ )
+
+
+async def human_evaluation_db_to_pydantic(
+ evaluation_db: HumanEvaluationDB,
+) -> HumanEvaluation:
+ evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants(
+ human_evaluation_id=str(evaluation_db.id) # type: ignore
+ )
+
+ revisions = []
+ variants_ids = []
+ variants_names = []
+ variants_revision_ids = []
+ for evaluation_variant in evaluation_variants:
+ variant_name = (
+ evaluation_variant.variant.variant_name
+ if isinstance(evaluation_variant.variant_id, uuid.UUID)
+ else str(evaluation_variant.variant_id)
+ )
+ variants_names.append(str(variant_name))
+ variants_ids.append(str(evaluation_variant.variant_id))
+ variant_revision = (
+ str(evaluation_variant.variant_revision.revision)
+ if isinstance(evaluation_variant.variant_revision_id, uuid.UUID)
+ else " None"
+ )
+ revisions.append(variant_revision)
+ variants_revision_ids.append(str(evaluation_variant.variant_revision_id))
+
+ return HumanEvaluation(
+ id=str(evaluation_db.id),
+ app_id=str(evaluation_db.app_id),
+ project_id=str(evaluation_db.project_id),
+ status=evaluation_db.status, # type: ignore
+ evaluation_type=evaluation_db.evaluation_type, # type: ignore
+ variant_ids=variants_ids,
+ variant_names=variants_names,
+ testset_id=str(evaluation_db.testset_id),
+ testset_name=evaluation_db.testset.name,
+ variants_revision_ids=variants_revision_ids,
+ revisions=revisions,
+ created_at=str(evaluation_db.created_at), # type: ignore
+ updated_at=str(evaluation_db.updated_at), # type: ignore
+ )
+
+
+def human_evaluation_scenario_db_to_pydantic(
+ evaluation_scenario_db: HumanEvaluationScenarioDB, evaluation_id: str
+) -> HumanEvaluationScenario:
+ return HumanEvaluationScenario(
+ id=str(evaluation_scenario_db.id),
+ evaluation_id=evaluation_id,
+ inputs=evaluation_scenario_db.inputs, # type: ignore
+ outputs=evaluation_scenario_db.outputs, # type: ignore
+ vote=evaluation_scenario_db.vote, # type: ignore
+ score=evaluation_scenario_db.score, # type: ignore
+ correct_answer=evaluation_scenario_db.correct_answer, # type: ignore
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
+ )
+
+
+def aggregated_result_of_evaluation_to_pydantic(
+ evaluation_aggregated_results: List,
+) -> List[dict]:
+ transformed_results = []
+ for aggregated_result in evaluation_aggregated_results:
+ evaluator_config_dict = (
+ {
+ "id": str(aggregated_result.evaluator_config.id),
+ "name": aggregated_result.evaluator_config.name,
+ "evaluator_key": aggregated_result.evaluator_config.evaluator_key,
+ "settings_values": aggregated_result.evaluator_config.settings_values,
+ "created_at": str(aggregated_result.evaluator_config.created_at),
+ "updated_at": str(aggregated_result.evaluator_config.updated_at),
+ }
+ if isinstance(aggregated_result.evaluator_config_id, uuid.UUID)
+ else None
+ )
+ transformed_results.append(
+ {
+ "evaluator_config": (
+ {} if evaluator_config_dict is None else evaluator_config_dict
+ ),
+ "result": aggregated_result.result,
+ }
+ )
+ return transformed_results
+
+
+async def evaluation_scenario_db_to_pydantic(
+ evaluation_scenario_db: EvaluationScenarioDB, evaluation_id: str
+) -> EvaluationScenario:
+ scenario_results = [
+ {
+ "evaluator_config": str(scenario_result.evaluator_config_id),
+ "result": scenario_result.result,
+ }
+ for scenario_result in evaluation_scenario_db.results
+ ]
+ return EvaluationScenario(
+ id=str(evaluation_scenario_db.id),
+ evaluation_id=evaluation_id,
+ inputs=[
+ EvaluationScenarioInput(**scenario_input) # type: ignore
+ for scenario_input in evaluation_scenario_db.inputs
+ ],
+ outputs=[
+ EvaluationScenarioOutput(**scenario_output) # type: ignore
+ for scenario_output in evaluation_scenario_db.outputs
+ ],
+ correct_answers=[
+ CorrectAnswer(**correct_answer) # type: ignore
+ for correct_answer in evaluation_scenario_db.correct_answers
+ ],
+ is_pinned=evaluation_scenario_db.is_pinned or False, # type: ignore
+ note=evaluation_scenario_db.note or "", # type: ignore
+ results=scenario_results, # type: ignore
+ )
diff --git a/api/ee/src/services/db_manager.py b/api/ee/src/services/db_manager.py
new file mode 100644
index 0000000000..1091c4f736
--- /dev/null
+++ b/api/ee/src/services/db_manager.py
@@ -0,0 +1,35 @@
+import uuid
+
+from oss.src.dbs.postgres.shared.engine import engine
+from ee.src.models.db_models import DeploymentDB_ as DeploymentDB
+
+
+async def create_deployment(
+ app_id: str,
+ project_id: str,
+ uri: str,
+) -> DeploymentDB:
+ """Create a new deployment.
+ Args:
+ app_id (str): The app variant to create the deployment for.
+ project_id (str): The project variant to create the deployment for.
+ uri (str): The URI of the service.
+ Returns:
+ DeploymentDB: The created deployment.
+ """
+
+ async with engine.core_session() as session:
+ try:
+ deployment = DeploymentDB(
+ app_id=uuid.UUID(app_id),
+ project_id=uuid.UUID(project_id),
+ uri=uri,
+ )
+
+ session.add(deployment)
+ await session.commit()
+ await session.refresh(deployment)
+
+ return deployment
+ except Exception as e:
+ raise Exception(f"Error while creating deployment: {e}")
diff --git a/api/ee/src/services/db_manager_ee.py b/api/ee/src/services/db_manager_ee.py
new file mode 100644
index 0000000000..c0076afac3
--- /dev/null
+++ b/api/ee/src/services/db_manager_ee.py
@@ -0,0 +1,2129 @@
+import uuid
+from typing import List, Dict, Union, Any, NoReturn, Optional, Tuple
+
+import sendgrid
+from fastapi import HTTPException
+from sendgrid.helpers.mail import Mail
+
+from sqlalchemy import func, asc
+from sqlalchemy.future import select
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import joinedload, load_only, aliased
+from sqlalchemy.exc import NoResultFound, MultipleResultsFound
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.common import is_ee
+
+from oss.src.dbs.postgres.shared.engine import engine
+from oss.src.services import db_manager, evaluator_manager
+from ee.src.models.api.workspace_models import (
+ UserRole,
+ UpdateWorkspace,
+ CreateWorkspace,
+ WorkspaceResponse,
+)
+from ee.src.models.api.organization_models import (
+ Organization,
+ CreateOrganization,
+ OrganizationUpdate,
+)
+from ee.src.models.shared_models import WorkspaceRole
+from ee.src.models.db_models import (
+ ProjectDB,
+ WorkspaceDB,
+ EvaluationDB,
+ OrganizationDB,
+ ProjectMemberDB,
+ WorkspaceMemberDB,
+ HumanEvaluationDB,
+ OrganizationMemberDB,
+ EvaluationScenarioDB,
+ HumanEvaluationScenarioDB,
+ HumanEvaluationVariantDB,
+ EvaluationScenarioResultDB,
+ EvaluationEvaluatorConfigDB,
+ EvaluationAggregatedResultDB,
+)
+from oss.src.models.db_models import (
+ AppVariantDB,
+ UserDB,
+ AppDB,
+ TestSetDB,
+ InvitationDB,
+ EvaluatorConfigDB,
+ AppVariantRevisionsDB,
+)
+from oss.src.models.shared_models import (
+ Result,
+ CorrectAnswer,
+ AggregatedResult,
+ EvaluationScenarioResult,
+ EvaluationScenarioInput,
+ EvaluationScenarioOutput,
+ HumanEvaluationScenarioInput,
+)
+from ee.src.services.converters import get_workspace_in_format
+from ee.src.services.selectors import get_org_default_workspace
+
+from oss.src.utils.env import env
+
+
+# Initialize sendgrid api client
+sg = sendgrid.SendGridAPIClient(api_key=env.SENDGRID_API_KEY)
+
+log = get_module_logger(__name__)
+
+
+async def get_organization(organization_id: str) -> OrganizationDB:
+ """
+ Fetches an organization by its ID.
+
+ Args:
+ organization_id (str): The ID of the organization to fetch.
+
+ Returns:
+ OrganizationDB: The fetched organization.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDB).filter_by(id=uuid.UUID(organization_id))
+ )
+ organization = result.scalars().first()
+ return organization
+
+
+async def get_organizations_by_list_ids(organization_ids: List) -> List[OrganizationDB]:
+ """
+ Retrieve organizations from the database by their IDs.
+
+ Args:
+ organization_ids (List): A list of organization IDs to retrieve.
+
+ Returns:
+ List: A list of dictionaries representing the retrieved organizations.
+ """
+
+ async with engine.core_session() as session:
+ organization_uuids = [uuid.UUID(org_id) for org_id in organization_ids]
+ query = select(OrganizationDB).where(OrganizationDB.id.in_(organization_uuids))
+ result = await session.execute(query)
+ organizations = result.scalars().all()
+ return organizations
+
+
+async def get_default_workspace_id(user_id: str) -> str:
+ """
+ Retrieve the default workspace ID for a user.
+
+ Args:
+ user_id (str): The user id.
+
+ Returns:
+ str: The default workspace ID.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(WorkspaceMemberDB)
+ .filter_by(user_id=uuid.UUID(user_id), role=WorkspaceRole.OWNER)
+ .options(load_only(WorkspaceMemberDB.workspace_id)) # type: ignore
+ )
+ member_in_workspace = result.scalars().first()
+ return str(member_in_workspace.workspace_id)
+
+
+async def get_organization_workspaces(organization_id: str):
+ """
+ Retries workspaces belonging to an organization.
+
+ Args:
+ organization_id (str): The ID of the organization
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(WorkspaceDB)
+ .filter_by(organization_id=uuid.UUID(organization_id))
+ .options(load_only(WorkspaceDB.organization_id)) # type: ignore
+ )
+ workspaces = result.scalars().all()
+ return workspaces
+
+
+async def get_workspace_administrators(workspace: WorkspaceDB) -> List[UserDB]:
+ """
+ Retrieve the administrators of a workspace.
+
+ Args:
+ workspace (WorkspaceDB): The workspace to retrieve the administrators for.
+
+ Returns:
+ List[UserDB]: A list of UserDB objects representing the administrators of the workspace.
+ """
+
+ administrators = []
+ for member in workspace.members:
+ if workspace.has_role(
+ member.user_id, WorkspaceRole.WORKSPACE_ADMIN
+ ) or workspace.has_role(member.user_id, WorkspaceRole.OWNER):
+ user = await db_manager.get_user_with_id(member.user_id)
+ administrators.append(user)
+ return administrators
+
+
+async def create_project(
+ project_name: str, workspace_id: str, organization_id: str, session: AsyncSession
+) -> WorkspaceDB:
+ """
+ Create a new project.
+
+ Args:
+ project_name (str): The name of the project.
+ workspace_id (str): The ID of the workspace.
+ organization_id (str): The ID of the organization.
+ session (AsyncSession): The database session.
+
+ Returns:
+ WorkspaceDB: The created project.
+ """
+
+ project_db = ProjectDB(
+ project_name=project_name,
+ is_default=True,
+ organization_id=uuid.UUID(organization_id),
+ workspace_id=uuid.UUID(workspace_id),
+ )
+
+ session.add(project_db)
+
+ log.info(
+ "[scopes] project created",
+ organization_id=organization_id,
+ workspace_id=workspace_id,
+ project_id=project_db.id,
+ )
+
+ await session.commit()
+
+ return project_db
+
+
+async def create_default_project(
+ organization_id: str, workspace_id: str, session: AsyncSession
+) -> WorkspaceDB:
+ """
+ Create a default project for an organization.
+
+ Args:
+ organization_id (str): The ID of the organization.
+ workspace_id (str): The ID of the workspace.
+ session (AsyncSession): The database session.
+
+ Returns:
+ WorkspaceDB: The created default project.
+ """
+
+ project_db = await create_project(
+ "Default",
+ workspace_id=workspace_id,
+ organization_id=organization_id,
+ session=session,
+ )
+ return project_db
+
+
+async def get_default_workspace_id_from_organization(
+ organization_id: str,
+) -> Union[str, NoReturn]:
+ """
+ Get the default (first) workspace ID belonging to a user from a organization.
+
+ Args:
+ organization_id (str): The ID of the organization.
+
+ Returns:
+ str: The default (first) workspace ID.
+ """
+
+ async with engine.core_session() as session:
+ workspace_query = await session.execute(
+ select(WorkspaceDB)
+ .where(
+ WorkspaceDB.organization_id == uuid.UUID(organization_id),
+ )
+ .options(load_only(WorkspaceDB.id))
+ )
+ workspace = workspace_query.scalars().first()
+ if workspace is None:
+ raise NoResultFound(
+ f"No default workspace for the provided organization_id {organization_id} found"
+ )
+ return str(workspace.id)
+
+
+async def get_project_by_workspace(workspace_id: str) -> ProjectDB:
+ """Get the project from database using the organization id and workspace id.
+
+ Args:
+ workspace_id (str): The ID of the workspace
+
+ Returns:
+ ProjectDB: The retrieved project
+ """
+
+ assert workspace_id is not None, "Workspace ID is required to retrieve project"
+ async with engine.core_session() as session:
+ project_query = await session.execute(
+ select(ProjectDB).where(
+ ProjectDB.workspace_id == uuid.UUID(workspace_id),
+ )
+ )
+ project = project_query.scalars().first()
+ if project is None:
+ raise NoResultFound(f"No project with workspace IDs ({workspace_id}) found")
+ return project
+
+
+async def create_project_member(
+ user_id: str, project_id: str, role: str, session: AsyncSession
+) -> None:
+ """
+ Create a new project member.
+
+ Args:
+ user_id (str): The ID of the user.
+ project_id (str): The ID of the project.
+ role (str): The role of the user in the workspace.
+ session (AsyncSession): The database session.
+ """
+
+ project = await db_manager.fetch_project_by_id(
+ project_id=project_id,
+ )
+
+ if not project:
+ raise Exception(f"No project found with ID {project_id}")
+
+ project_member = ProjectMemberDB(
+ user_id=uuid.UUID(user_id),
+ project_id=uuid.UUID(project_id),
+ role=role,
+ )
+
+ session.add(project_member)
+
+ log.info(
+ "[scopes] project membership created",
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=project_id,
+ user_id=user_id,
+ membership_id=project_member.id,
+ )
+
+ await session.commit()
+
+
+async def fetch_project_memberships_by_user_id(
+ user_id: str,
+) -> List[ProjectMemberDB]:
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(ProjectMemberDB)
+ .filter_by(user_id=uuid.UUID(user_id))
+ .options(
+ joinedload(ProjectMemberDB.project).joinedload(ProjectDB.workspace),
+ joinedload(ProjectMemberDB.project).joinedload(ProjectDB.organization),
+ )
+ )
+ project_memberships = result.scalars().all()
+
+ return project_memberships
+
+
+async def create_workspace_db_object(
+ session: AsyncSession,
+ payload: CreateWorkspace,
+ organization: OrganizationDB,
+ user: UserDB,
+ return_wrk_prj: bool = False,
+) -> WorkspaceDB:
+ """Create a new workspace.
+
+ Args:
+ payload (Workspace): The workspace payload.
+ organization (OrganizationDB): The organization that the workspace belongs to.
+ user (UserDB): The user that the workspace belongs to.
+
+ Returns:
+ Workspace: The created workspace.
+ """
+
+ workspace = WorkspaceDB(
+ name=payload.name,
+ type=payload.type if payload.type else "",
+ description=payload.description if payload.description else "",
+ organization_id=organization.id,
+ )
+
+ session.add(workspace)
+
+ log.info(
+ "[scopes] workspace created",
+ organization_id=organization.id,
+ workspace_id=workspace.id,
+ )
+
+ await session.commit()
+
+ # add user as a member to the workspace with the owner role
+ workspace_member = WorkspaceMemberDB(
+ user_id=user.id,
+ workspace_id=workspace.id,
+ role="owner",
+ )
+ session.add(workspace_member)
+
+ log.info(
+ "[scopes] workspace membership created",
+ organization_id=workspace.organization_id,
+ workspace_id=workspace.id,
+ user_id=user.id,
+ membership_id=workspace_member.id,
+ )
+
+ await session.commit()
+
+ await session.refresh(workspace, attribute_names=["organization"])
+
+ project_db = await create_default_project(
+ organization_id=str(organization.id),
+ workspace_id=str(workspace.id),
+ session=session,
+ )
+
+ # add user as a member to the project member with the owner role
+ await create_project_member(
+ user_id=str(user.id),
+ project_id=str(project_db.id),
+ role=workspace_member.role,
+ session=session,
+ )
+
+ # add default testset and evaluators
+ await db_manager.add_testset_to_app_variant(
+ template_name="completion", # type: ignore
+ app_name="completion", # type: ignore
+ project_id=str(project_db.id),
+ )
+ await evaluator_manager.create_ready_to_use_evaluators(
+ project_id=str(project_db.id)
+ )
+
+ if return_wrk_prj:
+ return workspace, project_db
+
+ return workspace
+
+
+async def create_workspace(
+ payload: CreateWorkspace, organization_id: str, user_uid: str
+) -> WorkspaceResponse:
+ """
+ Create a new workspace.
+
+ Args:
+ payload (CreateWorkspace): The workspace payload.
+ organization_id (str): The organization id.
+ user_uid (str): The user uid.
+
+ Returns:
+ Workspace: The created workspace.
+
+ """
+ try:
+ user = await db_manager.get_user(user_uid)
+ organization = await get_organization(organization_id)
+
+ async with engine.core_session() as session:
+ user_result = await session.execute(select(UserDB).filter_by(uid=user_uid))
+ user = user_result.scalars().first()
+
+ organization_result = await session.execute(
+ select(OrganizationDB).filter_by(id=uuid.UUID(organization_id))
+ )
+ organization = organization_result.scalars().first()
+
+ # create workspace
+ workspace_db = await create_workspace_db_object(
+ session, payload, organization, user
+ )
+
+ return await get_workspace_in_format(workspace_db)
+ except Exception as e:
+ raise e
+
+
+async def update_workspace(
+ payload: UpdateWorkspace, workspace: WorkspaceDB
+) -> WorkspaceResponse:
+ """
+ Update a workspace's details.
+
+ Args:
+ workspace (WorkspaceDB): The workspace to update.
+ payload (UpdateWorkspace): The data to update the workspace with.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(select(WorkspaceDB).filter_by(id=workspace.id))
+ workspace = result.scalars().first()
+
+ if not workspace:
+ raise NoResultFound(f"Workspace with id {str(workspace.id)} not found")
+
+ for key, value in payload.dict(exclude_unset=True).items():
+ if hasattr(workspace, key):
+ setattr(workspace, key, value)
+
+ await session.commit()
+ await session.refresh(workspace)
+
+ return await get_workspace_in_format(workspace)
+
+
+async def check_user_in_workspace_with_email(email: str, workspace_id: str) -> bool:
+ """
+ Check if a user belongs to a workspace.
+
+ Args:
+ email (str): The email of the user to check.
+ workspace_id (str): The workspace to check.
+
+ Raises:
+ Exception: If there is an error checking if the user belongs to the workspace.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(WorkspaceMemberDB)
+ .join(UserDB, UserDB.id == WorkspaceMemberDB.user_id)
+ .where(
+ UserDB.email == email,
+ WorkspaceMemberDB.workspace_id == uuid.UUID(workspace_id),
+ )
+ )
+ workspace_member = result.scalars().first()
+ return False if workspace_member is None else True
+
+
+async def update_user_roles(
+ workspace_id: str,
+ payload: UserRole,
+ delete: bool = False,
+) -> bool:
+ """
+ Update a user's roles in a workspace.
+
+ Args:
+ workspace_id (str): The ID of the workspace.
+ payload (UserRole): The payload containing the user email and role to update.
+ delete (bool): Whether to delete the user's role or not.
+
+ Returns:
+ bool: True if the user's roles were successfully updated, False otherwise.
+
+ Raises:
+ Exception: If there is an error updating the user's roles.
+ """
+
+ user = await db_manager.get_user_with_email(payload.email)
+ project_id = await db_manager.get_default_project_id_from_workspace(
+ workspace_id=workspace_id
+ )
+
+ async with engine.core_session() as session:
+ # Ensure that an admin can not remove the owner of the workspace/project
+ project_owner_result = await session.execute(
+ select(ProjectMemberDB)
+ .filter_by(project_id=uuid.UUID(project_id), role="owner")
+ .options(
+ load_only(
+ ProjectMemberDB.user_id, # type: ignore
+ ProjectMemberDB.role, # type: ignore
+ )
+ )
+ )
+ project_owner = project_owner_result.scalars().first()
+ if user.id == project_owner.user_id and project_owner.role == "owner":
+ raise HTTPException(
+ 403,
+ {
+ "message": "You do not have permission to perform this action. Please contact your Organization Owner"
+ },
+ )
+
+ project_member_result = await session.execute(
+ select(ProjectMemberDB).filter_by(
+ project_id=uuid.UUID(project_id), user_id=user.id
+ )
+ )
+ project_member = project_member_result.scalars().first()
+ if not project_member:
+ raise NoResultFound(
+ f"User with id {str(user.id)} is not part of the workspace member."
+ )
+
+ workspace_member_result = await session.execute(
+ select(WorkspaceMemberDB).filter_by(
+ workspace_id=uuid.UUID(workspace_id), user_id=user.id
+ )
+ )
+ workspace_member = workspace_member_result.scalars().first()
+ if not workspace_member:
+ raise NoResultFound(
+ f"User with id {str(user.id)} is not part of the workspace member."
+ )
+
+ if not delete:
+ # Update the member's role
+ project_member.role = payload.role
+ workspace_member.role = payload.role
+
+ await session.commit()
+ await session.refresh(project_member)
+ return True
+
+
+async def add_user_to_workspace_and_org(
+ organization: OrganizationDB,
+ workspace: WorkspaceDB,
+ user: UserDB,
+ project_id: str,
+ role: str,
+) -> bool:
+ async with engine.core_session() as session:
+ # create joined organization for user
+ user_organization = OrganizationMemberDB(
+ user_id=user.id, organization_id=organization.id
+ )
+ session.add(user_organization)
+
+ log.info(
+ "[scopes] organization membership created",
+ organization_id=organization.id,
+ user_id=user.id,
+ membership_id=user_organization.id,
+ )
+
+ # add user to workspace
+ workspace_member = WorkspaceMemberDB(
+ user_id=user.id,
+ workspace_id=workspace.id,
+ role=role,
+ )
+
+ session.add(workspace_member)
+
+ log.info(
+ "[scopes] workspace membership created",
+ organization_id=organization.id,
+ workspace_id=workspace.id,
+ user_id=user.id,
+ membership_id=workspace_member.id,
+ )
+
+ # add user to project
+ await create_project_member(
+ user_id=str(user.id), project_id=project_id, role=role, session=session
+ )
+
+ return True
+
+
+async def remove_user_from_workspace(
+ workspace_id: str,
+ email: str,
+) -> WorkspaceResponse:
+ """
+ Remove a user from a workspace.
+
+ Args:
+ workspace_id (str): The ID of the workspace.
+ payload (UserRole): The payload containing the user email and role to remove.
+
+ Returns:
+ workspace (WorkspaceResponse): The updated workspace.
+
+ Raises:
+ HTTPException -- 403, from fastapi import Request
+ """
+
+ user = await db_manager.get_user_with_email(email)
+ project_id = await db_manager.get_default_project_id_from_workspace(
+ workspace_id=workspace_id
+ )
+ project = await db_manager.get_project_by_id(project_id=project_id)
+
+ async with engine.core_session() as session:
+ if (
+ not user
+ ): # User is an invited user who has not yet created an account and therefore does not have a user object
+ pass
+ else:
+ # Ensure that a user can not remove the owner of the workspace
+ workspace_owner_result = await session.execute(
+ select(WorkspaceMemberDB)
+ .filter_by(
+ workspace_id=project.workspace_id, user_id=user.id, role="owner"
+ )
+ .options(
+ load_only(
+ WorkspaceMemberDB.user_id, # type: ignore
+ WorkspaceMemberDB.role, # type: ignore
+ )
+ )
+ )
+ workspace_owner = workspace_owner_result.scalars().first()
+ if (workspace_owner is not None and user is not None) and (
+ user.id == workspace_owner.user_id and workspace_owner.role == "owner"
+ ):
+ raise HTTPException(
+ status_code=403,
+ detail={
+ "message": "You do not have permission to perform this action. Please contact your Organization Owner"
+ },
+ )
+
+ # remove user from workspace
+ workspace_member_result = await session.execute(
+ select(WorkspaceMemberDB).filter(
+ WorkspaceMemberDB.workspace_id == project.workspace_id,
+ WorkspaceMemberDB.user_id == user.id,
+ WorkspaceMemberDB.role != "owner",
+ )
+ )
+ workspace_member = workspace_member_result.scalars().first()
+ if workspace_member:
+ await session.delete(workspace_member)
+
+ log.info(
+ "[scopes] workspace membership deleted",
+ organization_id=project.organization_id,
+ workspace_id=workspace_id,
+ user_id=user.id,
+ membership_id=workspace_member.id,
+ )
+
+ # remove user from project
+ project_member_result = await session.execute(
+ select(ProjectMemberDB).filter(
+ ProjectMemberDB.project_id == project.id,
+ ProjectMemberDB.user_id == user.id,
+ ProjectMemberDB.role != "owner",
+ )
+ )
+ project_member = project_member_result.scalars().first()
+ if project_member:
+ await session.delete(project_member)
+
+ log.info(
+ "[scopes] project membership deleted",
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=project.id,
+ user_id=user.id,
+ membership_id=project_member.id,
+ )
+
+ # remove user from organization
+ joined_org_result = await session.execute(
+ select(OrganizationMemberDB).filter_by(
+ user_id=user.id, organization_id=project.organization_id
+ )
+ )
+ member_joined_org = joined_org_result.scalars().first()
+ if member_joined_org:
+ await session.delete(member_joined_org)
+
+ log.info(
+ "[scopes] organization membership deleted",
+ organization_id=project.organization_id,
+ user_id=user.id,
+ membership_id=member_joined_org.id,
+ )
+
+ await session.commit()
+
+ # If there's an invitation for the provided email address, delete it
+ user_workspace_invitations_query = await session.execute(
+ select(InvitationDB)
+ .filter_by(project_id=project.id, email=email)
+ .options(
+ load_only(
+ InvitationDB.id, # type: ignore
+ InvitationDB.project_id, # type: ignore
+ )
+ )
+ )
+ user_invitations = user_workspace_invitations_query.scalars().all()
+ for invitation in user_invitations:
+ await delete_invitation(str(invitation.id))
+
+ workspace_db = await db_manager.get_workspace(workspace_id=workspace_id)
+ return await get_workspace_in_format(workspace_db)
+
+
+async def create_organization(
+ payload: CreateOrganization,
+ user: UserDB,
+ return_org_wrk: Optional[bool] = False,
+ return_org_wrk_prj: Optional[bool] = False,
+) -> Union[
+ OrganizationDB,
+ Tuple[OrganizationDB, WorkspaceDB],
+ Tuple[OrganizationDB, WorkspaceDB, ProjectDB],
+]:
+ """Create a new organization.
+
+ Args:
+ payload (Organization): The organization payload.
+
+ Returns:
+ Organization: The created organization.
+ Optional[Workspace]: The created workspace if return_org_wrk is True.
+
+ Raises:
+ Exception: If there is an error creating the organization.
+ """
+
+ async with engine.core_session() as session:
+ create_org_data = payload.model_dump(exclude_unset=True)
+ if "owner" not in create_org_data:
+ create_org_data["owner"] = str(user.id)
+
+ # create organization
+ organization_db = OrganizationDB(**create_org_data)
+ session.add(organization_db)
+
+ log.info(
+ "[scopes] organization created",
+ organization_id=organization_db.id,
+ )
+
+ await session.commit()
+
+ # create joined organization for user
+ user_organization = OrganizationMemberDB(
+ user_id=user.id, organization_id=organization_db.id
+ )
+ session.add(user_organization)
+
+ log.info(
+ "[scopes] organization membership created",
+ organization_id=organization_db.id,
+ user_id=user.id,
+ membership_id=user_organization.id,
+ )
+
+ await session.commit()
+
+ # construct workspace payload
+ workspace_payload = CreateWorkspace(
+ name=payload.name,
+ type=payload.type if payload.type else "",
+ description=(
+ "My Default Workspace"
+ if payload.type == "default"
+ else payload.description
+ if payload.description
+ else ""
+ ),
+ )
+
+ # create workspace
+ workspace, project = await create_workspace_db_object(
+ session,
+ workspace_payload,
+ organization_db,
+ user,
+ return_wrk_prj=True,
+ )
+
+ if return_org_wrk_prj:
+ return organization_db, workspace, project
+
+ if return_org_wrk:
+ return organization_db, workspace
+
+ return organization_db
+
+
+async def update_organization(
+ organization_id: str, payload: OrganizationUpdate
+) -> OrganizationDB:
+ """
+ Update an organization's details.
+
+ Args:
+ organization_id (str): The organization to update.
+ payload (OrganizationUpdate): The data to update the organization with.
+
+ Returns:
+ Organization: The updated organization.
+
+ Raises:
+ Exception: If there is an error updating the organization.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDB).filter_by(id=uuid.UUID(organization_id))
+ )
+ organization = result.scalars().first()
+
+ if not organization:
+ raise NoResultFound(f"Organization with id {organization_id} not found")
+
+ for key, value in payload.model_dump(exclude_unset=True).items():
+ if hasattr(organization, key):
+ setattr(organization, key, value)
+
+ await session.commit()
+ await session.refresh(organization)
+ return organization
+
+
+async def delete_invitation(invitation_id: str) -> bool:
+ """
+ Delete an invitation from an organization.
+
+ Args:
+ invitation (str): The invitation to delete.
+
+ Returns:
+ bool: True if the invitation was successfully deleted, False otherwise.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(InvitationDB).filter_by(id=uuid.UUID(invitation_id))
+ )
+
+ try:
+ invitation = result.scalars().one_or_none()
+ except MultipleResultsFound as e:
+ log.error(
+ f"Critical error: Database returned two rows when retrieving invitation with ID {invitation_id} to delete from Invitations table. Error details: {str(e)}"
+ )
+ raise HTTPException(
+ 500,
+ {
+ "message": f"Error occured while trying to delete invitation with ID {invitation_id} from Invitations table. Error details: {str(e)}"
+ },
+ )
+
+ project = await session.execute(
+ select(ProjectDB).filter_by(id=invitation.project_id)
+ )
+ project = project.scalars().one_or_none()
+
+ if not project:
+ log.error(f"Project with ID {invitation.project_id} not found.")
+ raise Exception(f"No project found with ID {invitation.project_id}")
+
+ await session.delete(invitation)
+
+ log.info(
+ "[scopes] invitation deleted",
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=invitation.project_id,
+ user_id=invitation.user_id,
+ membership_id=invitation.id,
+ )
+
+ await session.commit()
+
+ return True
+
+
+async def mark_invitation_as_used(
+ project_id: str, user_id: str, invitation: InvitationDB
+) -> bool:
+ """
+ Mark an invitation as used.
+
+ Args:
+ project_id (str): The ID of the project.
+ user_id (str): the ID of the user.
+ invitation (InvitationDB): The invitation to mark as used.
+
+ Returns:
+ bool: True if the invitation was successfully marked as used, False otherwise.
+
+ Raises:
+ HTTPException: If there is an error marking the invitation as used.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(InvitationDB).filter_by(
+ project_id=uuid.UUID(project_id), token=invitation.token
+ )
+ )
+ organization_invitation = result.scalars().first()
+ if not organization_invitation:
+ return False
+
+ organization_invitation.used = True
+ organization_invitation.user_id = uuid.UUID(user_id)
+
+ await session.commit()
+ return True
+
+
+async def get_org_details(organization: Organization) -> dict:
+ """
+ Retrieve details of an organization.
+
+ Args:
+ organization (Organization): The organization to retrieve details for.
+ project_id (str): The project_id to retrieve details for.
+
+ Returns:
+ dict: A dictionary containing the organization's details.
+ """
+
+ default_workspace_db = await get_org_default_workspace(organization)
+ default_workspace = await get_workspace_details(default_workspace_db)
+ workspaces = await get_organization_workspaces(organization_id=str(organization.id))
+
+ sample_organization = {
+ "id": str(organization.id),
+ "name": organization.name,
+ "description": organization.description,
+ "type": organization.type,
+ "owner": organization.owner,
+ "workspaces": [str(workspace.id) for workspace in workspaces],
+ "default_workspace": default_workspace,
+ "is_paying": organization.is_paying if is_ee() else None,
+ }
+ return sample_organization
+
+
+async def get_workspace_details(workspace: WorkspaceDB) -> WorkspaceResponse:
+ """
+ Retrieve details of a workspace.
+
+ Args:
+ workspace (Workspace): The workspace to retrieve details for.
+ project_id (str): The project_id to retrieve details for.
+
+ Returns:
+ dict: A dictionary containing the workspace's details.
+
+ Raises:
+ Exception: If there is an error retrieving the workspace details.
+ """
+
+ try:
+ workspace_response = await get_workspace_in_format(workspace)
+ return workspace_response
+ except Exception as e:
+ import traceback
+
+ traceback.print_exc()
+ raise e
+
+
+async def get_organization_invitations(organization_id: str):
+ """
+ Gets the organization invitations.
+
+ Args:
+ organization_id (str): The ID of the organization
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(InvitationDB).filter_by(organization_id=organization_id)
+ )
+ invitations = result.scalars().all()
+ return invitations
+
+
+async def get_project_invitations(project_id: str, **kwargs):
+ """
+ Gets the project invitations.
+
+ Args:
+ project_id (str): The ID of the project
+ """
+
+ async with engine.core_session() as session:
+ stmt = select(InvitationDB).filter(
+ InvitationDB.project_id == uuid.UUID(project_id)
+ )
+ if kwargs.get("has_pending", False):
+ stmt = stmt.filter(InvitationDB.used == kwargs["invitation_used"])
+
+ result = await session.execute(stmt)
+ invitations = result.scalars().all()
+ return invitations
+
+
+async def get_all_pending_invitations(email: str):
+ """
+ Gets all pending invitations for a given email.
+
+ Args:
+ email (str): The email address of the user.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(InvitationDB).filter(
+ InvitationDB.email == email,
+ InvitationDB.used == False,
+ )
+ )
+ invitations = result.scalars().all()
+ return invitations
+
+
+async def get_project_invitation(
+ project_id: str, token: str, email: str
+) -> InvitationDB:
+ """Get project invitation by project ID, token and email.
+
+ Args:
+ project_id (str): The ID of the project.
+ token (str): The invitation token.
+ email (str): The email address of the invited user.
+
+ Returns:
+ InvitationDB: invitation object
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(InvitationDB).filter_by(
+ project_id=uuid.UUID(project_id), token=token, email=email
+ )
+ )
+ invitation = result.scalars().first()
+ return invitation
+
+
+async def get_project_members(project_id: str):
+ """Gets the members of a project.
+
+ Args:
+ project_id (str): The ID of the project
+ """
+
+ async with engine.core_session() as session:
+ members_query = await session.execute(
+ select(ProjectMemberDB)
+ .filter(ProjectMemberDB.project_id == uuid.UUID(project_id))
+ .options(joinedload(ProjectMemberDB.user))
+ )
+ project_members = members_query.scalars().all()
+ return project_members
+
+
+async def create_org_workspace_invitation(
+ workspace_role: str,
+ token: str,
+ email: str,
+ project_id: str,
+ expiration_date,
+) -> InvitationDB:
+ """
+ Create an organization invitation.
+
+ Args:
+ - workspace_role (str): The role to assign the invited user in the project/workspace.
+ - token (str): The token for the invitation.
+ - email (str): The email address of the invited user.
+ - expiration_date: The expiration date of the invitation.
+
+ Returns:
+ InvitationDB: The created invitation.
+
+ """
+
+ user = await db_manager.get_user_with_email(email=email)
+
+ user_id = None
+ if user:
+ user_id = user.id
+
+ project = await db_manager.fetch_project_by_id(
+ project_id=project_id,
+ )
+
+ if not project:
+ raise Exception(f"No project found with ID {project_id}")
+
+ async with engine.core_session() as session:
+ invitation = InvitationDB(
+ token=token,
+ email=email,
+ project_id=uuid.UUID(project_id),
+ expiration_date=expiration_date,
+ role=workspace_role,
+ used=False,
+ )
+
+ session.add(invitation)
+
+ log.info(
+ "[scopes] invitation created",
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=project_id,
+ user_id=user_id,
+ invitation_id=invitation.id,
+ )
+
+ await session.commit()
+
+ return invitation
+
+
+async def get_all_workspace_roles() -> List[WorkspaceRole]:
+ """
+ Retrieve all workspace roles.
+
+ Returns:
+ List[WorkspaceRole]: A list of all workspace roles in the DB.
+ """
+ workspace_roles = list(WorkspaceRole)
+ return workspace_roles
+
+
+# async def get_project_id_from_db_entity(
+# object_id: str, type: str, project_id: str
+# ) -> dict:
+# """
+# Get the project id of the object.
+
+# Args:
+# object_id (str): The ID of the object.
+# type (str): The type of the object.
+
+# Returns:
+# dict: The project_id of the object.
+
+# Raises:
+# ValueError: If the object type is unknown.
+# Exception: If there is an error retrieving the project_id.
+# """
+# try:
+# if type == "app":
+# app = await db_manager.fetch_app_by_id(object_id)
+# project_id = app.project_id
+
+# elif type == "app_variant":
+# app_variant = await db_manager.fetch_app_variant_by_id(object_id)
+# project_id = app_variant.project_id
+
+# elif type == "base":
+# base = await db_manager.fetch_base_by_id(object_id)
+# project_id = base.project_id
+
+# elif type == "deployment":
+# deployment = await db_manager.get_deployment_by_id(object_id)
+# project_id = deployment.project_id
+
+# elif type == "testset":
+# testset = await db_manager.fetch_testset_by_id(object_id)
+# project_id = testset.project_id
+
+# elif type == "evaluation":
+# evaluation = await db_manager.fetch_evaluation_by_id(object_id)
+# project_id = evaluation.project_id
+
+# elif type == "evaluation_scenario":
+# evaluation_scenario = await db_manager.fetch_evaluation_scenario_by_id(
+# object_id
+# )
+# project_id = evaluation_scenario.project_id
+
+# elif type == "evaluator_config":
+# evaluator_config = await db_manager.fetch_evaluator_config(object_id)
+# project_id = evaluator_config.project_id
+
+# elif type == "human_evaluation":
+# human_evaluation = await db_manager.fetch_human_evaluation_by_id(object_id)
+# project_id = human_evaluation.project_id
+
+# elif type == "human_evaluation_scenario":
+# human_evaluation_scenario = (
+# await db_manager.fetch_human_evaluation_scenario_by_id(object_id)
+# )
+# project_id = human_evaluation_scenario.project_id
+
+# elif type == "human_evaluation_scenario_by_evaluation_id":
+# human_evaluation_scenario_by_evaluation = (
+# await db_manager.fetch_human_evaluation_scenario_by_evaluation_id(
+# object_id
+# )
+# )
+# project_id = human_evaluation_scenario_by_evaluation.project_id
+
+# else:
+# raise ValueError(f"Unknown object type: {type}")
+
+# return str(project_id)
+
+# except Exception as e:
+# raise e
+
+
+async def add_user_to_organization(
+ organization_id: str,
+ user_id: str,
+ # is_demo: bool = False,
+) -> None:
+ async with engine.core_session() as session:
+ organization_member = OrganizationMemberDB(
+ user_id=user_id,
+ organization_id=organization_id,
+ )
+
+ session.add(organization_member)
+
+ log.info(
+ "[scopes] organization membership created",
+ organization_id=organization_id,
+ user_id=user_id,
+ membership_id=organization_member.id,
+ )
+
+ await session.commit()
+
+
+async def add_user_to_workspace(
+ workspace_id: str,
+ user_id: str,
+ role: str,
+ # is_demo: bool = False,
+) -> None:
+ async with engine.core_session() as session:
+ # fetch workspace by workspace_id (SQL)
+ stmt = select(WorkspaceDB).filter_by(id=workspace_id)
+ workspace = await session.execute(stmt)
+ workspace = workspace.scalars().first()
+
+ if not workspace:
+ raise Exception(f"No workspace found with ID {workspace_id}")
+
+ workspace_member = WorkspaceMemberDB(
+ user_id=user_id,
+ workspace_id=workspace_id,
+ role=role,
+ )
+
+ session.add(workspace_member)
+
+ # TODO: add organization_id
+ log.info(
+ "[scopes] workspace membership created",
+ organization_id=workspace.organization_id,
+ workspace_id=workspace_id,
+ user_id=user_id,
+ membership_id=workspace_member.id,
+ )
+
+ await session.commit()
+
+
+async def add_user_to_project(
+ project_id: str,
+ user_id: str,
+ role: str,
+ is_demo: bool = False,
+) -> None:
+ project = await db_manager.fetch_project_by_id(
+ project_id=project_id,
+ )
+
+ if not project:
+ raise Exception(f"No project found with ID {project_id}")
+
+ async with engine.core_session() as session:
+ project_member = ProjectMemberDB(
+ user_id=user_id,
+ project_id=project_id,
+ role=role,
+ is_demo=is_demo,
+ )
+
+ session.add(project_member)
+
+ log.info(
+ "[scopes] project membership created",
+ organization_id=project.organization_id,
+ workspace_id=project.workspace_id,
+ project_id=project_id,
+ user_id=user_id,
+ membership_id=project_member.id,
+ )
+
+ await session.commit()
+
+
+async def fetch_evaluation_status_by_id(
+ project_id: str,
+ evaluation_id: str,
+) -> Optional[str]:
+ """Fetch only the status of an evaluation by its ID."""
+ assert evaluation_id is not None, "evaluation_id cannot be None"
+
+ async with engine.core_session() as session:
+ query = (
+ select(EvaluationDB)
+ .filter_by(project_id=project_id, id=uuid.UUID(evaluation_id))
+ .options(load_only(EvaluationDB.status))
+ )
+
+ result = await session.execute(query)
+ evaluation = result.scalars().first()
+ return evaluation.status if evaluation else None
+
+
+async def fetch_evaluation_by_id(
+ project_id: str,
+ evaluation_id: str,
+) -> Optional[EvaluationDB]:
+ """Fetches a evaluation by its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ EvaluationDB: The fetched evaluation, or None if no evaluation was found.
+ """
+
+ assert evaluation_id is not None, "evaluation_id cannot be None"
+ async with engine.core_session() as session:
+ base_query = select(EvaluationDB).filter_by(
+ project_id=project_id,
+ id=uuid.UUID(evaluation_id),
+ )
+ query = base_query.options(
+ joinedload(EvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+
+ result = await session.execute(
+ query.options(
+ joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore
+ joinedload(
+ EvaluationDB.aggregated_results.of_type(
+ EvaluationAggregatedResultDB
+ )
+ ).joinedload(EvaluationAggregatedResultDB.evaluator_config),
+ )
+ )
+ evaluation = result.unique().scalars().first()
+ return evaluation
+
+
+async def list_human_evaluations(app_id: str, project_id: str):
+ """
+ Fetches human evaluations belonging to an App.
+
+ Args:
+ app_id (str): The application identifier
+ """
+
+ async with engine.core_session() as session:
+ base_query = (
+ select(HumanEvaluationDB)
+ .filter_by(app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id))
+ .filter(HumanEvaluationDB.testset_id.isnot(None))
+ )
+ query = base_query.options(
+ joinedload(HumanEvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+
+ result = await session.execute(query)
+ human_evaluations = result.scalars().all()
+ return human_evaluations
+
+
+async def create_human_evaluation(
+ app: AppDB,
+ status: str,
+ evaluation_type: str,
+ testset_id: str,
+ variants_ids: List[str],
+):
+ """
+ Creates a human evaluation.
+
+ Args:
+ app (AppDB: The app object
+ status (str): The status of the evaluation
+ evaluation_type (str): The evaluation type
+ testset_id (str): The ID of the evaluation testset
+ variants_ids (List[str]): The IDs of the variants for the evaluation
+ """
+
+ async with engine.core_session() as session:
+ human_evaluation = HumanEvaluationDB(
+ app_id=app.id,
+ project_id=app.project_id,
+ status=status,
+ evaluation_type=evaluation_type,
+ testset_id=testset_id,
+ )
+
+ session.add(human_evaluation)
+ await session.commit()
+ await session.refresh(human_evaluation, attribute_names=["testset"])
+
+ # create variants for human evaluation
+ await create_human_evaluation_variants(
+ human_evaluation_id=str(human_evaluation.id),
+ variants_ids=variants_ids,
+ )
+ return human_evaluation
+
+
+async def fetch_human_evaluation_variants(human_evaluation_id: str):
+ """
+ Fetches human evaluation variants.
+
+ Args:
+ human_evaluation_id (str): The human evaluation ID
+
+ Returns:
+ The human evaluation variants.
+ """
+
+ async with engine.core_session() as session:
+ base_query = select(HumanEvaluationVariantDB).filter_by(
+ human_evaluation_id=uuid.UUID(human_evaluation_id)
+ )
+ query = base_query.options(
+ joinedload(HumanEvaluationVariantDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(HumanEvaluationVariantDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.id, AppVariantRevisionsDB.revision), # type: ignore
+ )
+
+ result = await session.execute(query)
+ evaluation_variants = result.scalars().all()
+ return evaluation_variants
+
+
+async def create_human_evaluation_variants(
+ human_evaluation_id: str, variants_ids: List[str]
+):
+ """
+ Creates human evaluation variants.
+
+ Args:
+ human_evaluation_id (str): The human evaluation identifier
+ variants_ids (List[str]): The variants identifiers
+ project_id (str): The project ID
+ """
+
+ variants_dict = {}
+ for variant_id in variants_ids:
+ variant = await db_manager.fetch_app_variant_by_id(app_variant_id=variant_id)
+ if variant:
+ variants_dict[variant_id] = variant
+
+ variants_revisions_dict = {}
+ for variant_id, variant in variants_dict.items():
+ variant_revision = await db_manager.fetch_app_variant_revision_by_variant(
+ app_variant_id=str(variant.id), project_id=str(variant.project_id), revision=variant.revision # type: ignore
+ )
+ if variant_revision:
+ variants_revisions_dict[variant_id] = variant_revision
+
+ if set(variants_dict.keys()) != set(variants_revisions_dict.keys()):
+ raise ValueError("Mismatch between variants and their revisions")
+
+ async with engine.core_session() as session:
+ for variant_id in variants_ids:
+ variant = variants_dict[variant_id]
+ variant_revision = variants_revisions_dict[variant_id]
+ human_evaluation_variant = HumanEvaluationVariantDB(
+ human_evaluation_id=uuid.UUID(human_evaluation_id),
+ variant_id=variant.id, # type: ignore
+ variant_revision_id=variant_revision.id, # type: ignore
+ )
+ session.add(human_evaluation_variant)
+
+ await session.commit()
+
+
+async def fetch_human_evaluation_by_id(
+ evaluation_id: str,
+) -> Optional[HumanEvaluationDB]:
+ """
+ Fetches a evaluation by its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to fetch.
+
+ Returns:
+ EvaluationDB: The fetched evaluation, or None if no evaluation was found.
+ """
+
+ assert evaluation_id is not None, "evaluation_id cannot be None"
+ async with engine.core_session() as session:
+ base_query = select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ query = base_query.options(
+ joinedload(HumanEvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+ result = await session.execute(query)
+ evaluation = result.scalars().first()
+ return evaluation
+
+
+async def update_human_evaluation(evaluation_id: str, values_to_update: dict):
+ """Updates human evaluation with the specified values.
+
+ Args:
+ evaluation_id (str): The evaluation ID
+ values_to_update (dict): The values to update
+
+ Exceptions:
+ NoResultFound: if human evaluation is not found
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ human_evaluation = result.scalars().first()
+ if not human_evaluation:
+ raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
+
+ for key, value in values_to_update.items():
+ if hasattr(human_evaluation, key):
+ setattr(human_evaluation, key, value)
+
+ await session.commit()
+ await session.refresh(human_evaluation)
+
+
+async def delete_human_evaluation(evaluation_id: str):
+ """Delete the evaluation by its ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation to delete.
+ """
+
+ assert evaluation_id is not None, "evaluation_id cannot be None"
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationDB).filter_by(id=uuid.UUID(evaluation_id))
+ )
+ evaluation = result.scalars().first()
+ if not evaluation:
+ raise NoResultFound(f"Human evaluation with id {evaluation_id} not found")
+
+ await session.delete(evaluation)
+ await session.commit()
+
+
+async def create_human_evaluation_scenario(
+ inputs: List[HumanEvaluationScenarioInput],
+ project_id: str,
+ evaluation_id: str,
+ evaluation_extend: Dict[str, Any],
+):
+ """
+ Creates a human evaluation scenario.
+
+ Args:
+ inputs (List[HumanEvaluationScenarioInput]): The inputs.
+ evaluation_id (str): The evaluation identifier.
+ evaluation_extend (Dict[str, any]): An extended required payload for the evaluation scenario. Contains score, vote, and correct_answer.
+ """
+
+ async with engine.core_session() as session:
+ evaluation_scenario = HumanEvaluationScenarioDB(
+ **evaluation_extend,
+ project_id=uuid.UUID(project_id),
+ evaluation_id=uuid.UUID(evaluation_id),
+ inputs=[input.model_dump() for input in inputs],
+ outputs=[],
+ )
+
+ session.add(evaluation_scenario)
+ await session.commit()
+
+
+async def update_human_evaluation_scenario(
+ evaluation_scenario_id: str, values_to_update: dict
+):
+ """Updates human evaluation scenario with the specified values.
+
+ Args:
+ evaluation_scenario_id (str): The evaluation scenario ID
+ values_to_update (dict): The values to update
+
+ Exceptions:
+ NoResultFound: if human evaluation scenario is not found
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ id=uuid.UUID(evaluation_scenario_id)
+ )
+ )
+ human_evaluation_scenario = result.scalars().first()
+ if not human_evaluation_scenario:
+ raise NoResultFound(
+ f"Human evaluation scenario with id {evaluation_scenario_id} not found"
+ )
+
+ for key, value in values_to_update.items():
+ if hasattr(human_evaluation_scenario, key):
+ setattr(human_evaluation_scenario, key, value)
+
+ await session.commit()
+ await session.refresh(human_evaluation_scenario)
+
+
+async def fetch_human_evaluation_scenarios(evaluation_id: str):
+ """
+ Fetches human evaluation scenarios.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+
+ Returns:
+ The evaluation scenarios.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB)
+ .filter_by(evaluation_id=uuid.UUID(evaluation_id))
+ .order_by(asc(HumanEvaluationScenarioDB.created_at))
+ )
+ evaluation_scenarios = result.scalars().all()
+ return evaluation_scenarios
+
+
+async def fetch_evaluation_scenarios(evaluation_id: str, project_id: str):
+ """
+ Fetches evaluation scenarios.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+ project_id (str): The ID of the project
+
+ Returns:
+ The evaluation scenarios.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(EvaluationScenarioDB)
+ .filter_by(
+ evaluation_id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id)
+ )
+ .options(joinedload(EvaluationScenarioDB.results))
+ )
+ evaluation_scenarios = result.unique().scalars().all()
+ return evaluation_scenarios
+
+
+async def fetch_evaluation_scenario_by_id(
+ evaluation_scenario_id: str,
+) -> Optional[EvaluationScenarioDB]:
+ """Fetches and evaluation scenario by its ID.
+
+ Args:
+ evaluation_scenario_id (str): The ID of the evaluation scenario to fetch.
+
+ Returns:
+ EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
+ """
+
+ assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None"
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(EvaluationScenarioDB).filter_by(id=uuid.UUID(evaluation_scenario_id))
+ )
+ evaluation_scenario = result.scalars().first()
+ return evaluation_scenario
+
+
+async def fetch_human_evaluation_scenario_by_id(
+ evaluation_scenario_id: str,
+) -> Optional[HumanEvaluationScenarioDB]:
+ """Fetches and evaluation scenario by its ID.
+
+ Args:
+ evaluation_scenario_id (str): The ID of the evaluation scenario to fetch.
+
+ Returns:
+ EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
+ """
+
+ assert evaluation_scenario_id is not None, "evaluation_scenario_id cannot be None"
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ id=uuid.UUID(evaluation_scenario_id)
+ )
+ )
+ evaluation_scenario = result.scalars().first()
+ return evaluation_scenario
+
+
+async def fetch_human_evaluation_scenario_by_evaluation_id(
+ evaluation_id: str,
+) -> Optional[HumanEvaluationScenarioDB]:
+ """Fetches and evaluation scenario by its ID.
+ Args:
+ evaluation_id (str): The ID of the evaluation object to use in fetching the human evaluation.
+ Returns:
+ EvaluationScenarioDB: The fetched evaluation scenario, or None if no evaluation scenario was found.
+ """
+
+ evaluation = await fetch_human_evaluation_by_id(evaluation_id)
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(HumanEvaluationScenarioDB).filter_by(
+ evaluation_id=evaluation.id # type: ignore
+ )
+ )
+ human_eval_scenario = result.scalars().first()
+ return human_eval_scenario
+
+
+async def create_new_evaluation(
+ app: AppDB,
+ project_id: str,
+ testset: TestSetDB,
+ status: Result,
+ variant: str,
+ variant_revision: str,
+) -> EvaluationDB:
+ """Create a new evaluation scenario.
+ Returns:
+ EvaluationScenarioDB: The created evaluation scenario.
+ """
+
+ async with engine.core_session() as session:
+ evaluation = EvaluationDB(
+ app_id=app.id,
+ project_id=uuid.UUID(project_id),
+ testset_id=testset.id,
+ status=status.model_dump(),
+ variant_id=uuid.UUID(variant),
+ variant_revision_id=uuid.UUID(variant_revision),
+ )
+
+ session.add(evaluation)
+ await session.commit()
+ await session.refresh(
+ evaluation,
+ attribute_names=[
+ "testset",
+ "variant",
+ "variant_revision",
+ "aggregated_results",
+ ],
+ )
+
+ return evaluation
+
+
+async def list_evaluations(app_id: str, project_id: str):
+ """Retrieves evaluations of the specified app from the db.
+
+ Args:
+ app_id (str): The ID of the app
+ project_id (str): The ID of the project
+ """
+
+ async with engine.core_session() as session:
+ base_query = select(EvaluationDB).filter_by(
+ app_id=uuid.UUID(app_id), project_id=uuid.UUID(project_id)
+ )
+ query = base_query.options(
+ joinedload(EvaluationDB.testset.of_type(TestSetDB)).load_only(TestSetDB.id, TestSetDB.name), # type: ignore
+ )
+
+ result = await session.execute(
+ query.options(
+ joinedload(EvaluationDB.variant.of_type(AppVariantDB)).load_only(AppVariantDB.id, AppVariantDB.variant_name), # type: ignore
+ joinedload(EvaluationDB.variant_revision.of_type(AppVariantRevisionsDB)).load_only(AppVariantRevisionsDB.revision), # type: ignore
+ joinedload(
+ EvaluationDB.aggregated_results.of_type(
+ EvaluationAggregatedResultDB
+ )
+ ).joinedload(EvaluationAggregatedResultDB.evaluator_config),
+ )
+ )
+ evaluations = result.unique().scalars().all()
+ return evaluations
+
+
+async def fetch_evaluations_by_resource(
+ resource_type: str, project_id: str, resource_ids: List[str]
+):
+ """
+ Fetches an evaluations by resource.
+
+ Args:
+ resource_type (str): The resource type
+ project_id (str): The ID of the project
+ resource_ids (List[str]): The resource identifiers
+
+ Returns:
+ The evaluations by resource.
+
+ Raises:
+ HTTPException:400 resource_type {type} is not supported
+ """
+
+ ids = list(map(uuid.UUID, resource_ids))
+
+ async with engine.core_session() as session:
+ if resource_type == "variant":
+ result_evaluations = await session.execute(
+ select(EvaluationDB)
+ .filter(
+ EvaluationDB.variant_id.in_(ids),
+ EvaluationDB.project_id == uuid.UUID(project_id),
+ )
+ .options(load_only(EvaluationDB.id)) # type: ignore
+ )
+ result_human_evaluations = await session.execute(
+ select(HumanEvaluationDB)
+ .join(HumanEvaluationVariantDB)
+ .filter(
+ HumanEvaluationVariantDB.variant_id.in_(ids),
+ HumanEvaluationDB.project_id == uuid.UUID(project_id),
+ )
+ .options(load_only(HumanEvaluationDB.id)) # type: ignore
+ )
+ res_evaluations = result_evaluations.scalars().all()
+ res_human_evaluations = result_human_evaluations.scalars().all()
+ return res_evaluations + res_human_evaluations
+
+ elif resource_type == "testset":
+ result_evaluations = await session.execute(
+ select(EvaluationDB)
+ .filter(
+ EvaluationDB.testset_id.in_(ids),
+ EvaluationDB.project_id == uuid.UUID(project_id),
+ )
+ .options(load_only(EvaluationDB.id)) # type: ignore
+ )
+ result_human_evaluations = await session.execute(
+ select(HumanEvaluationDB)
+ .filter(
+ HumanEvaluationDB.testset_id.in_(ids),
+ HumanEvaluationDB.project_id
+ == uuid.UUID(project_id), # Fixed to match HumanEvaluationDB
+ )
+ .options(load_only(HumanEvaluationDB.id)) # type: ignore
+ )
+ res_evaluations = result_evaluations.scalars().all()
+ res_human_evaluations = result_human_evaluations.scalars().all()
+ return res_evaluations + res_human_evaluations
+
+ elif resource_type == "evaluator_config":
+ query = (
+ select(EvaluationDB)
+ .join(EvaluationDB.evaluator_configs)
+ .filter(
+ EvaluationEvaluatorConfigDB.evaluator_config_id.in_(ids),
+ EvaluationDB.project_id == uuid.UUID(project_id),
+ )
+ )
+ result = await session.execute(query)
+ res = result.scalars().all()
+ return res
+
+ raise HTTPException(
+ status_code=400,
+ detail=f"resource_type {resource_type} is not supported",
+ )
+
+
+async def delete_evaluations(evaluation_ids: List[str]) -> None:
+ """Delete evaluations based on the ids provided from the db.
+
+ Args:
+ evaluations_ids (list[str]): The IDs of the evaluation
+ """
+
+ async with engine.core_session() as session:
+ query = select(EvaluationDB).where(EvaluationDB.id.in_(evaluation_ids))
+ result = await session.execute(query)
+ evaluations = result.scalars().all()
+ for evaluation in evaluations:
+ await session.delete(evaluation)
+ await session.commit()
+
+
+async def create_new_evaluation_scenario(
+ project_id: str,
+ evaluation_id: str,
+ variant_id: str,
+ inputs: List[EvaluationScenarioInput],
+ outputs: List[EvaluationScenarioOutput],
+ correct_answers: Optional[List[CorrectAnswer]],
+ is_pinned: Optional[bool],
+ note: Optional[str],
+ results: List[EvaluationScenarioResult],
+) -> EvaluationScenarioDB:
+ """Create a new evaluation scenario.
+
+ Returns:
+ EvaluationScenarioDB: The created evaluation scenario.
+ """
+
+ async with engine.core_session() as session:
+ evaluation_scenario = EvaluationScenarioDB(
+ project_id=uuid.UUID(project_id),
+ evaluation_id=uuid.UUID(evaluation_id),
+ variant_id=uuid.UUID(variant_id),
+ inputs=[input.model_dump() for input in inputs],
+ outputs=[output.model_dump() for output in outputs],
+ correct_answers=(
+ [correct_answer.model_dump() for correct_answer in correct_answers]
+ if correct_answers is not None
+ else []
+ ),
+ is_pinned=is_pinned,
+ note=note,
+ )
+
+ session.add(evaluation_scenario)
+ await session.commit()
+ await session.refresh(evaluation_scenario)
+
+ # create evaluation scenario result
+ for result in results:
+ evaluation_scenario_result = EvaluationScenarioResultDB(
+ evaluation_scenario_id=evaluation_scenario.id,
+ evaluator_config_id=uuid.UUID(result.evaluator_config),
+ result=result.result.model_dump(),
+ )
+
+ session.add(evaluation_scenario_result)
+
+ await session.commit() # ensures that scenario results insertion is committed
+ await session.refresh(evaluation_scenario)
+
+ return evaluation_scenario
+
+
+async def update_evaluation_with_aggregated_results(
+ evaluation_id: str, aggregated_results: List[AggregatedResult]
+):
+ async with engine.core_session() as session:
+ for result in aggregated_results:
+ aggregated_result = EvaluationAggregatedResultDB(
+ evaluation_id=uuid.UUID(evaluation_id),
+ evaluator_config_id=uuid.UUID(result.evaluator_config),
+ result=result.result.model_dump(),
+ )
+ session.add(aggregated_result)
+
+ await session.commit()
+
+
+async def fetch_eval_aggregated_results(evaluation_id: str):
+ """
+ Fetches an evaluation aggregated results by evaluation identifier.
+
+ Args:
+ evaluation_id (str): The evaluation identifier
+
+ Returns:
+ The evaluation aggregated results by evaluation identifier.
+ """
+
+ async with engine.core_session() as session:
+ base_query = select(EvaluationAggregatedResultDB).filter_by(
+ evaluation_id=uuid.UUID(evaluation_id)
+ )
+ query = base_query.options(
+ joinedload(
+ EvaluationAggregatedResultDB.evaluator_config.of_type(EvaluatorConfigDB)
+ ).load_only(
+ EvaluatorConfigDB.id, # type: ignore
+ EvaluatorConfigDB.name, # type: ignore
+ EvaluatorConfigDB.evaluator_key, # type: ignore
+ EvaluatorConfigDB.settings_values, # type: ignore
+ EvaluatorConfigDB.created_at, # type: ignore
+ EvaluatorConfigDB.updated_at, # type: ignore
+ )
+ )
+
+ result = await session.execute(query)
+ aggregated_results = result.scalars().all()
+ return aggregated_results
+
+
+async def update_evaluation(
+ evaluation_id: str, project_id: str, updates: Dict[str, Any]
+) -> EvaluationDB:
+ """
+ Update an evaluator configuration in the database with the provided id.
+
+ Arguments:
+ evaluation_id (str): The ID of the evaluator configuration to be updated.
+ project_id (str): The ID of the project.
+ updates (Dict[str, Any]): The updates to apply to the evaluator configuration.
+
+ Returns:
+ EvaluatorConfigDB: The updated evaluator configuration object.
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(EvaluationDB).filter_by(
+ id=uuid.UUID(evaluation_id), project_id=uuid.UUID(project_id)
+ )
+ )
+ evaluation = result.scalars().first()
+ for key, value in updates.items():
+ if hasattr(evaluation, key):
+ setattr(evaluation, key, value)
+
+ await session.commit()
+ await session.refresh(evaluation)
+
+ return evaluation
+
+
+async def check_if_evaluation_contains_failed_evaluation_scenarios(
+ evaluation_id: str,
+) -> bool:
+ async with engine.core_session() as session:
+ EvaluationResultAlias = aliased(EvaluationScenarioResultDB)
+ query = (
+ select(func.count(EvaluationScenarioDB.id))
+ .join(EvaluationResultAlias, EvaluationScenarioDB.results)
+ .where(
+ EvaluationScenarioDB.evaluation_id == uuid.UUID(evaluation_id),
+ EvaluationResultAlias.result["type"].astext == "error",
+ )
+ )
+
+ result = await session.execute(query)
+ count = result.scalar()
+ if not count:
+ return False
+ return count > 0
diff --git a/api/ee/src/services/email_helper.py b/api/ee/src/services/email_helper.py
new file mode 100644
index 0000000000..4316160ddf
--- /dev/null
+++ b/api/ee/src/services/email_helper.py
@@ -0,0 +1,51 @@
+import time
+
+import requests
+
+from oss.src.utils.env import env
+
+
+def add_contact_to_loops(email, max_retries=5, initial_delay=1):
+ """
+ Add a contact to Loops audience with retry and exponential backoff.
+
+ Args:
+ email (str): Email address of the contact to be added.
+ max_retries (int): Maximum number of retries in case of rate limiting.
+ initial_delay (int): Initial delay in seconds before retrying.
+
+ Raises:
+ ConnectionError: If max retries reached and unable to connect to Loops API.
+
+ Returns:
+ requests.Response: Response object from the Loops API.
+ """
+
+ # Endpoint URL
+ url = "https://app.loops.so/api/v1/contacts/create"
+
+ # Request headers
+ headers = {"Authorization": f"Bearer {env.LOOPS_API_KEY}"}
+
+ # Request payload/body
+ data = {"email": email}
+
+ retries = 0
+ delay = initial_delay
+
+ while retries < max_retries:
+ # Making the POST request
+ response = requests.post(url, json=data, headers=headers, timeout=20)
+
+ # If response code is 429, it indicates rate limiting
+ if response.status_code == 429:
+ print(f"Rate limit hit. Retrying in {delay} seconds...")
+ time.sleep(delay)
+ retries += 1
+ delay *= 2 # Double the delay for exponential backoff
+ else:
+ # If response is not 429, return it
+ return response
+
+ # If max retries reached, raise an exception or handle as needed
+ raise ConnectionError("Max retries reached. Unable to connect to Loops API.")
diff --git a/api/ee/src/services/evaluation_service.py b/api/ee/src/services/evaluation_service.py
new file mode 100644
index 0000000000..e2cd9a2d8f
--- /dev/null
+++ b/api/ee/src/services/evaluation_service.py
@@ -0,0 +1,502 @@
+from typing import Dict, List, Any
+
+from fastapi import HTTPException
+
+from oss.src.utils.logging import get_module_logger
+from ee.src.services import converters
+from oss.src.services import db_manager
+from ee.src.services import db_manager_ee
+
+from oss.src.models.api.evaluation_model import (
+ Evaluation,
+ EvaluationType,
+ HumanEvaluation,
+ HumanEvaluationScenario,
+ HumanEvaluationUpdate,
+ EvaluationScenarioUpdate,
+ EvaluationStatusEnum,
+ NewHumanEvaluation,
+)
+from oss.src.models.db_models import AppDB
+from ee.src.models.db_models import (
+ EvaluationDB,
+ HumanEvaluationDB,
+ HumanEvaluationScenarioDB,
+)
+
+from oss.src.models.shared_models import (
+ HumanEvaluationScenarioInput,
+ HumanEvaluationScenarioOutput,
+ Result,
+)
+
+log = get_module_logger(__name__)
+
+
+class UpdateEvaluationScenarioError(Exception):
+ """Custom exception for update evaluation scenario errors."""
+
+ pass
+
+
+async def prepare_csvdata_and_create_evaluation_scenario(
+ csvdata: List[Dict[str, str]],
+ payload_inputs: List[str],
+ project_id: str,
+ evaluation_type: EvaluationType,
+ new_evaluation: HumanEvaluationDB,
+):
+ """
+ Prepares CSV data and creates evaluation scenarios based on the inputs, evaluation
+ type, and other parameters provided.
+
+ Args:
+ csvdata: A list of dictionaries representing the CSV data.
+ payload_inputs: A list of strings representing the names of the inputs in the variant.
+ project_id (str): The ID of the project
+ evaluation_type: The type of evaluation
+ new_evaluation: The instance of EvaluationDB
+ """
+
+ for datum in csvdata:
+ # Check whether the inputs in the test set match the inputs in the variant
+ try:
+ inputs = [
+ {"input_name": name, "input_value": datum[name]}
+ for name in payload_inputs
+ ]
+ except KeyError:
+ await db_manager_ee.delete_human_evaluation(
+ evaluation_id=str(new_evaluation.id)
+ )
+ msg = f"""
+ Columns in the test set should match the names of the inputs in the variant.
+ Inputs names in variant are: {[variant_input for variant_input in payload_inputs]} while
+ columns in test set are: {[col for col in datum.keys() if col != 'correct_answer']}
+ """
+ raise HTTPException(
+ status_code=400,
+ detail=msg,
+ )
+
+ # Prepare scenario inputs
+ list_of_scenario_input = []
+ for scenario_input in inputs:
+ eval_scenario_input_instance = HumanEvaluationScenarioInput(
+ input_name=scenario_input["input_name"],
+ input_value=scenario_input["input_value"],
+ )
+ list_of_scenario_input.append(eval_scenario_input_instance)
+
+ evaluation_scenario_extend_payload = {
+ **_extend_with_evaluation(evaluation_type),
+ **_extend_with_correct_answer(evaluation_type, datum),
+ }
+ await db_manager_ee.create_human_evaluation_scenario(
+ inputs=list_of_scenario_input,
+ project_id=project_id,
+ evaluation_id=str(new_evaluation.id),
+ evaluation_extend=evaluation_scenario_extend_payload,
+ )
+
+
+async def update_human_evaluation_service(
+ evaluation: EvaluationDB, update_payload: HumanEvaluationUpdate
+) -> None:
+ """
+ Update an existing evaluation based on the provided payload.
+
+ Args:
+ evaluation (EvaluationDB): The evaluation instance.
+ update_payload (EvaluationUpdate): The payload for the update.
+ """
+
+ # Update the evaluation
+ await db_manager_ee.update_human_evaluation(
+ evaluation_id=str(evaluation.id), values_to_update=update_payload.model_dump()
+ )
+
+
+async def fetch_evaluation_scenarios_for_evaluation(
+ evaluation_id: str, project_id: str
+):
+ """
+ Fetch evaluation scenarios for a given evaluation ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation.
+ project_id (str): The ID of the project.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+ """
+
+ evaluation_scenarios = await db_manager_ee.fetch_evaluation_scenarios(
+ evaluation_id=evaluation_id, project_id=project_id
+ )
+ return [
+ await converters.evaluation_scenario_db_to_pydantic(
+ evaluation_scenario_db=evaluation_scenario, evaluation_id=evaluation_id
+ )
+ for evaluation_scenario in evaluation_scenarios
+ ]
+
+
+async def fetch_human_evaluation_scenarios_for_evaluation(
+ human_evaluation: HumanEvaluationDB,
+) -> List[HumanEvaluationScenario]:
+ """
+ Fetch evaluation scenarios for a given evaluation ID.
+
+ Args:
+ evaluation_id (str): The ID of the evaluation.
+
+ Raises:
+ HTTPException: If the evaluation is not found or access is denied.
+
+ Returns:
+ List[EvaluationScenario]: A list of evaluation scenarios.
+ """
+ human_evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios(
+ evaluation_id=str(human_evaluation.id)
+ )
+ eval_scenarios = [
+ converters.human_evaluation_scenario_db_to_pydantic(
+ evaluation_scenario_db=human_evaluation_scenario,
+ evaluation_id=str(human_evaluation.id),
+ )
+ for human_evaluation_scenario in human_evaluation_scenarios
+ ]
+ return eval_scenarios
+
+
+async def update_human_evaluation_scenario(
+ evaluation_scenario_db: HumanEvaluationScenarioDB,
+ evaluation_scenario_data: EvaluationScenarioUpdate,
+ evaluation_type: EvaluationType,
+) -> None:
+ """
+ Updates an evaluation scenario.
+
+ Args:
+ evaluation_scenario_db (EvaluationScenarioDB): The evaluation scenario instance.
+ evaluation_scenario_data (EvaluationScenarioUpdate): New data for the scenario.
+ evaluation_type (EvaluationType): Type of the evaluation.
+
+ Raises:
+ HTTPException: If evaluation scenario not found or access denied.
+ """
+
+ values_to_update = {}
+ payload = evaluation_scenario_data.model_dump(exclude_unset=True)
+
+ if "score" in payload and evaluation_type == EvaluationType.single_model_test:
+ values_to_update["score"] = str(payload["score"])
+
+ if "vote" in payload and evaluation_type == EvaluationType.human_a_b_testing:
+ values_to_update["vote"] = payload["vote"]
+
+ if "outputs" in payload:
+ new_outputs: List[Dict[str, Any]] = [
+ HumanEvaluationScenarioOutput(
+ variant_id=output["variant_id"],
+ variant_output=output["variant_output"],
+ ).model_dump()
+ for output in payload["outputs"]
+ ]
+ values_to_update["outputs"] = new_outputs # type: ignore
+
+ if "inputs" in payload:
+ new_inputs: List[Dict[str, Any]] = [
+ HumanEvaluationScenarioInput(
+ input_name=input_item["input_name"],
+ input_value=input_item["input_value"],
+ ).model_dump()
+ for input_item in payload["inputs"]
+ ]
+ values_to_update["inputs"] = new_inputs # type: ignore
+
+ if "is_pinned" in payload:
+ values_to_update["is_pinned"] = payload["is_pinned"]
+
+ if "note" in payload:
+ values_to_update["note"] = payload["note"]
+
+ if "correct_answer" in payload:
+ values_to_update["correct_answer"] = payload["correct_answer"]
+
+ await db_manager_ee.update_human_evaluation_scenario(
+ evaluation_scenario_id=str(evaluation_scenario_db.id),
+ values_to_update=values_to_update,
+ )
+
+
+def _extend_with_evaluation(evaluation_type: EvaluationType):
+ evaluation = {}
+ if evaluation_type == EvaluationType.single_model_test:
+ evaluation["score"] = ""
+
+ if evaluation_type == EvaluationType.human_a_b_testing:
+ evaluation["vote"] = ""
+ return evaluation
+
+
+def _extend_with_correct_answer(evaluation_type: EvaluationType, row: dict):
+ correct_answer = {"correct_answer": ""}
+ if row.get("correct_answer") is not None:
+ correct_answer["correct_answer"] = row["correct_answer"]
+ return correct_answer
+
+
+async def fetch_list_evaluations(app: AppDB, project_id: str) -> List[Evaluation]:
+ """
+ Fetches a list of evaluations based on the provided filtering criteria.
+
+ Args:
+ app (AppDB): An app to filter the evaluations.
+ project_id (str): The ID of the project
+
+ Returns:
+ List[Evaluation]: A list of evaluations.
+ """
+
+ evaluations_db = await db_manager_ee.list_evaluations(
+ app_id=str(app.id), project_id=project_id
+ )
+ return [
+ await converters.evaluation_db_to_pydantic(evaluation)
+ for evaluation in evaluations_db
+ ]
+
+
+async def fetch_list_human_evaluations(
+ app_id: str, project_id: str
+) -> List[HumanEvaluation]:
+ """
+ Fetches a list of evaluations based on the provided filtering criteria.
+
+ Args:
+ app_id (Optional[str]): An optional app ID to filter the evaluations.
+ project_id (str): The ID of the project.
+
+ Returns:
+ List[Evaluation]: A list of evaluations.
+ """
+
+ evaluations_db = await db_manager_ee.list_human_evaluations(
+ app_id=app_id, project_id=project_id
+ )
+ return [
+ await converters.human_evaluation_db_to_pydantic(evaluation)
+ for evaluation in evaluations_db
+ ]
+
+
+async def fetch_human_evaluation(human_evaluation_db) -> HumanEvaluation:
+ """
+ Fetches a single evaluation based on its ID.
+
+ Args:
+ human_evaluation_db (HumanEvaluationDB): The evaluation instance.
+
+ Returns:
+ Evaluation: The fetched evaluation.
+ """
+
+ return await converters.human_evaluation_db_to_pydantic(human_evaluation_db)
+
+
+async def delete_human_evaluations(evaluation_ids: List[str]) -> None:
+ """
+ Delete evaluations by their IDs.
+
+ Args:
+ evaluation_ids (List[str]): A list of evaluation IDs.
+ project_id (str): The ID of the project.
+
+ Raises:
+ NoResultFound: If evaluation not found or access denied.
+ """
+
+ for evaluation_id in evaluation_ids:
+ await db_manager_ee.delete_human_evaluation(evaluation_id=evaluation_id)
+
+
+async def delete_evaluations(evaluation_ids: List[str]) -> None:
+ """
+ Delete evaluations by their IDs.
+
+ Args:
+ evaluation_ids (List[str]): A list of evaluation IDs.
+
+ Raises:
+ HTTPException: If evaluation not found or access denied.
+ """
+
+ await db_manager_ee.delete_evaluations(evaluation_ids=evaluation_ids)
+
+
+async def create_new_human_evaluation(payload: NewHumanEvaluation) -> HumanEvaluationDB:
+ """
+ Create a new evaluation based on the provided payload and additional arguments.
+
+ Args:
+ payload (NewHumanEvaluation): The evaluation payload.
+
+ Returns:
+ HumanEvaluationDB
+ """
+
+ app = await db_manager.fetch_app_by_id(app_id=payload.app_id)
+ if app is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"App with id {payload.app_id} does not exist",
+ )
+
+ human_evaluation = await db_manager_ee.create_human_evaluation(
+ app=app,
+ status=payload.status,
+ evaluation_type=payload.evaluation_type,
+ testset_id=payload.testset_id,
+ variants_ids=payload.variant_ids,
+ )
+ if human_evaluation is None:
+ raise HTTPException(
+ status_code=500, detail="Failed to create evaluation_scenario"
+ )
+
+ await prepare_csvdata_and_create_evaluation_scenario(
+ human_evaluation.testset.csvdata,
+ payload.inputs,
+ str(app.project_id),
+ payload.evaluation_type,
+ human_evaluation,
+ )
+ return human_evaluation
+
+
+async def create_new_evaluation(
+ app_id: str,
+ project_id: str,
+ revision_id: str,
+ testset_id: str,
+) -> Evaluation:
+ """
+ Create a new evaluation in the db
+
+ Args:
+ app_id (str): The ID of the app.
+ project_id (str): The ID of the project.
+ revision_id (str): The ID of the variant revision.
+ testset_id (str): The ID of the testset.
+
+ Returns:
+ Evaluation: The newly created evaluation.
+ """
+
+ app = await db_manager.fetch_app_by_id(app_id=app_id)
+ testset = await db_manager.fetch_testset_by_id(testset_id=testset_id)
+ variant_revision = await db_manager.fetch_app_variant_revision_by_id(
+ variant_revision_id=revision_id
+ )
+
+ assert (
+ variant_revision and variant_revision.revision is not None
+ ), f"Variant revision with {revision_id} cannot be None"
+
+ evaluation_db = await db_manager_ee.create_new_evaluation(
+ app=app,
+ project_id=project_id,
+ testset=testset,
+ status=Result(
+ value=EvaluationStatusEnum.EVALUATION_INITIALIZED, type="status", error=None
+ ),
+ variant=str(variant_revision.variant_id),
+ variant_revision=str(variant_revision.id),
+ )
+ return await converters.evaluation_db_to_pydantic(evaluation_db)
+
+
+async def compare_evaluations_scenarios(evaluations_ids: List[str], project_id: str):
+ evaluation = await db_manager_ee.fetch_evaluation_by_id(
+ project_id=project_id,
+ evaluation_id=evaluations_ids[0],
+ )
+ testset = evaluation.testset
+ unique_testset_datapoints = remove_duplicates(testset.csvdata)
+ formatted_inputs = extract_inputs_values_from_testset(unique_testset_datapoints)
+ # # formatted_inputs: [{'input_name': 'country', 'input_value': 'Nauru'}]
+
+ all_scenarios = []
+
+ for evaluation_id in evaluations_ids:
+ eval_scenarios = await fetch_evaluation_scenarios_for_evaluation(
+ evaluation_id=evaluation_id, project_id=project_id
+ )
+ all_scenarios.append(eval_scenarios)
+
+ grouped_scenarios_by_inputs = find_scenarios_by_input(
+ formatted_inputs, all_scenarios
+ )
+
+ return grouped_scenarios_by_inputs
+
+
+def extract_inputs_values_from_testset(testset):
+ extracted_values = []
+
+ input_keys = testset[0].keys()
+
+ for entry in testset:
+ for key in input_keys:
+ if key != "correct_answer":
+ extracted_values.append({"input_name": key, "input_value": entry[key]})
+
+ return extracted_values
+
+
+def find_scenarios_by_input(formatted_inputs, all_scenarios):
+ results = []
+ flattened_scenarios = [
+ scenario for sublist in all_scenarios for scenario in sublist
+ ]
+
+ for formatted_input in formatted_inputs:
+ input_name = formatted_input["input_name"]
+ input_value = formatted_input["input_value"]
+
+ matching_scenarios = [
+ scenario
+ for scenario in flattened_scenarios
+ if any(
+ input_item.name == input_name and input_item.value == input_value
+ for input_item in scenario.inputs
+ )
+ ]
+
+ results.append(
+ {
+ "input_name": input_name,
+ "input_value": input_value,
+ "scenarios": matching_scenarios,
+ }
+ )
+
+ return {
+ "inputs": formatted_inputs,
+ "data": results,
+ }
+
+
+def remove_duplicates(csvdata):
+ unique_data = set()
+ unique_entries = []
+
+ for entry in csvdata:
+ entry_tuple = tuple(entry.items())
+ if entry_tuple not in unique_data:
+ unique_data.add(entry_tuple)
+ unique_entries.append(entry)
+
+ return unique_entries
diff --git a/api/ee/src/services/llm_apps_service.py b/api/ee/src/services/llm_apps_service.py
new file mode 100644
index 0000000000..15267ec378
--- /dev/null
+++ b/api/ee/src/services/llm_apps_service.py
@@ -0,0 +1,578 @@
+import json
+import asyncio
+import traceback
+import aiohttp
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils import common
+from oss.src.services import helpers
+from oss.src.services.auth_helper import sign_secret_token
+from oss.src.services.db_manager import get_project_by_id
+from oss.src.apis.fastapi.tracing.utils import make_hash_id
+from oss.src.models.shared_models import InvokationResult, Result, Error
+
+log = get_module_logger(__name__)
+
+
+def get_nested_value(d: dict, keys: list, default=None):
+ """
+ Helper function to safely retrieve nested values.
+ """
+ try:
+ for key in keys:
+ if isinstance(d, dict):
+ d = d.get(key, default)
+ else:
+ return default
+ return d
+ except Exception as e:
+ log.error(f"Error accessing nested value: {e}")
+ return default
+
+
+def extract_result_from_response(response: dict):
+ # Initialize default values
+ value = None
+ latency = None
+ cost = None
+ tokens = None
+
+ try:
+ # Validate input
+ if not isinstance(response, dict):
+ raise ValueError("The response must be a dictionary.")
+
+ # Handle version 3.0 response
+ if response.get("version") == "3.0":
+ value = response
+ # Ensure 'data' is a dictionary or convert it to a string
+ if not isinstance(value.get("data"), dict):
+ value["data"] = str(value.get("data"))
+
+ if "tree" in response:
+ trace_tree = response.get("tree", {}).get("nodes", [])[0]
+
+ duration_ms = get_nested_value(
+ trace_tree, ["metrics", "acc", "duration", "total"]
+ )
+ if duration_ms:
+ duration_seconds = duration_ms / 1000
+ else:
+ start_time = get_nested_value(trace_tree, ["time", "start"])
+ end_time = get_nested_value(trace_tree, ["time", "end"])
+
+ if start_time and end_time:
+ duration_seconds = (
+ datetime.fromisoformat(end_time)
+ - datetime.fromisoformat(start_time)
+ ).total_seconds()
+ else:
+ duration_seconds = None
+
+ latency = duration_seconds
+ cost = get_nested_value(
+ trace_tree, ["metrics", "acc", "costs", "total"]
+ )
+ tokens = get_nested_value(
+ trace_tree, ["metrics", "acc", "tokens", "total"]
+ )
+
+ # Handle version 2.0 response
+ elif response.get("version") == "2.0":
+ value = response
+ if not isinstance(value.get("data"), dict):
+ value["data"] = str(value.get("data"))
+
+ if "trace" in response:
+ latency = response["trace"].get("latency", None)
+ cost = response["trace"].get("cost", None)
+ tokens = response["trace"].get("tokens", None)
+
+ # Handle generic response (neither 2.0 nor 3.0)
+ else:
+ value = {"data": str(response.get("message", ""))}
+ latency = response.get("latency", None)
+ cost = response.get("cost", None)
+ tokens = response.get("tokens", None)
+
+ # Determine the type of 'value' (either 'text' or 'object')
+ kind = "text" if isinstance(value, str) else "object"
+
+ except ValueError as ve:
+ log.error(f"Input validation error: {ve}")
+ value = {"error": str(ve)}
+ kind = "error"
+
+ except KeyError as ke:
+ log.error(f"Missing key: {ke}")
+ value = {"error": f"Missing key: {ke}"}
+ kind = "error"
+
+ except TypeError as te:
+ log.error(f"Type error: {te}")
+ value = {"error": f"Type error: {te}"}
+ kind = "error"
+
+ except Exception as e:
+ log.error(f"Unexpected error: {e}")
+ value = {"error": f"Unexpected error: {e}"}
+ kind = "error"
+
+ return value, kind, cost, tokens, latency
+
+
+async def make_payload(
+ datapoint: Any, parameters: Dict, openapi_parameters: List[Dict]
+) -> Dict:
+ """
+ Constructs the payload for invoking an app based on OpenAPI parameters.
+
+ Args:
+ datapoint (Any): The data to be sent to the app.
+ parameters (Dict): The parameters required by the app taken from the db.
+ openapi_parameters (List[Dict]): The OpenAPI parameters of the app.
+
+ Returns:
+ Dict: The constructed payload for the app.
+ """
+ payload = {}
+ inputs = {}
+ messages = []
+
+ for param in openapi_parameters:
+ if param["name"] == "ag_config":
+ payload["ag_config"] = parameters
+ elif param["type"] == "input":
+ item = datapoint.get(param["name"], parameters.get(param["name"], ""))
+ assert (
+ param["name"] != "ag_config"
+ ), "ag_config should be handled separately"
+ payload[param["name"]] = item
+
+ # in case of dynamic inputs (as in our templates)
+ elif param["type"] == "dict":
+ # let's get the list of the dynamic inputs
+ if (
+ param["name"] in parameters
+ ): # in case we have modified in the playground the default list of inputs (e.g. country_name)
+ input_names = [_["name"] for _ in parameters[param["name"]]]
+ else: # otherwise we use the default from the openapi
+ input_names = param["default"]
+
+ for input_name in input_names:
+ item = datapoint.get(input_name, "")
+ inputs[input_name] = item
+ elif param["type"] == "messages":
+ # TODO: Right now the FE is saving chats always under the column name chats. The whole logic for handling chats and dynamic inputs is convoluted and needs rework in time.
+ chat_data = datapoint.get("chat", "")
+ item = json.loads(chat_data)
+ payload[param["name"]] = item
+ elif param["type"] == "file_url":
+ item = datapoint.get(param["name"], "")
+ payload[param["name"]] = item
+ else:
+ if param["name"] in parameters: # hotfix
+ log.warn(
+ f"Processing other param type '{param['type']}': {param['name']}"
+ )
+ item = parameters[param["name"]]
+ payload[param["name"]] = item
+
+ try:
+ input_keys = helpers.find_key_occurrences(parameters, "input_keys") or []
+ inputs = {key: datapoint.get(key, None) for key in input_keys}
+
+ messages_data = datapoint.get("messages", "[]")
+ messages = json.loads(messages_data)
+ payload["messages"] = messages
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.warn(f"Error making payload: {e}")
+
+ payload["inputs"] = inputs
+
+ return payload
+
+
+async def invoke_app(
+ uri: str,
+ datapoint: Any,
+ parameters: Dict,
+ openapi_parameters: List[Dict],
+ user_id: str,
+ project_id: str,
+ **kwargs,
+) -> InvokationResult:
+ """
+ Invokes an app for one datapoint using the openapi_parameters to determine
+ how to invoke the app.
+
+ Args:
+ uri (str): The URI of the app to invoke.
+ datapoint (Any): The data to be sent to the app.
+ parameters (Dict): The parameters required by the app taken from the db.
+ openapi_parameters (List[Dict]): The OpenAPI parameters of the app.
+
+ Returns:
+ InvokationResult: The output of the app.
+
+ Raises:
+ aiohttp.ClientError: If the POST request fails.
+ """
+
+ url = f"{uri}/test"
+ if "application_id" in kwargs:
+ url = url + f"?application_id={kwargs.get('application_id')}"
+
+ payload = await make_payload(datapoint, parameters, openapi_parameters)
+
+ project = await get_project_by_id(
+ project_id=project_id,
+ )
+
+ secret_token = await sign_secret_token(
+ user_id=str(user_id),
+ project_id=str(project_id),
+ workspace_id=str(project.workspace_id),
+ organization_id=str(project.organization_id),
+ )
+
+ headers = {}
+ if secret_token:
+ headers = {"Authorization": f"Secret {secret_token}"}
+ headers["ngrok-skip-browser-warning"] = "1"
+
+ async with aiohttp.ClientSession() as client:
+ app_response = {}
+
+ try:
+ log.info("Invoking workflow...", url=url)
+ response = await client.post(
+ url,
+ json=payload,
+ headers=headers,
+ timeout=900,
+ )
+ app_response = await response.json()
+ response.raise_for_status()
+
+ (
+ value,
+ kind,
+ cost,
+ tokens,
+ latency,
+ ) = extract_result_from_response(app_response)
+
+ trace_id = app_response.get("trace_id", None)
+ span_id = app_response.get("span_id", None)
+
+ return InvokationResult(
+ result=Result(
+ type=kind,
+ value=value,
+ error=None,
+ ),
+ latency=latency,
+ cost=cost,
+ tokens=tokens,
+ trace_id=trace_id,
+ span_id=span_id,
+ )
+
+ except aiohttp.ClientResponseError as e:
+ error_message = app_response.get("detail", {}).get(
+ "error", f"HTTP error {e.status}: {e.message}"
+ )
+ stacktrace = app_response.get("detail", {}).get(
+ "message"
+ ) or app_response.get("detail", {}).get(
+ "traceback", "".join(traceback.format_exception_only(type(e), e))
+ )
+ log.error(f"HTTP error occurred during request: {error_message}")
+ except aiohttp.ServerTimeoutError as e:
+ error_message = "Request timed out"
+ stacktrace = "".join(traceback.format_exception_only(type(e), e))
+ log.error(error_message)
+ except aiohttp.ClientConnectionError as e:
+ error_message = f"Connection error: {str(e)}"
+ stacktrace = "".join(traceback.format_exception_only(type(e), e))
+ log.error(error_message)
+ except json.JSONDecodeError as e:
+ error_message = "Failed to decode JSON from response"
+ stacktrace = "".join(traceback.format_exception_only(type(e), e))
+ log.error(error_message)
+ except Exception as e:
+ error_message = f"Unexpected error: {str(e)}"
+ stacktrace = "".join(traceback.format_exception_only(type(e), e))
+ log.error(error_message)
+
+ return InvokationResult(
+ result=Result(
+ type="error",
+ error=Error(
+ message=error_message,
+ stacktrace=stacktrace,
+ ),
+ )
+ )
+
+
+async def run_with_retry(
+ uri: str,
+ input_data: Any,
+ parameters: Dict,
+ max_retry_count: int,
+ retry_delay: int,
+ openapi_parameters: List[Dict],
+ user_id: str,
+ project_id: str,
+ **kwargs,
+) -> InvokationResult:
+ """
+ Runs the specified app with retry mechanism.
+
+ Args:
+ uri (str): The URI of the app.
+ input_data (Any): The input data for the app.
+ parameters (Dict): The parameters for the app.
+ max_retry_count (int): The maximum number of retries.
+ retry_delay (int): The delay between retries in seconds.
+ openapi_parameters (List[Dict]): The OpenAPI parameters for the app.
+
+ Returns:
+ InvokationResult: The invokation result.
+
+ """
+
+ if "references" in kwargs and "testcase_id" in input_data:
+ kwargs["references"]["testcase"] = {"id": input_data["testcase_id"]}
+
+ references = kwargs.get("references", None)
+ links = kwargs.get("links", None)
+ # hash_id = make_hash_id(references=references, links=links)
+
+ retries = 0
+ last_exception = None
+ while retries < max_retry_count:
+ try:
+ result = await invoke_app(
+ uri,
+ input_data,
+ parameters,
+ openapi_parameters,
+ user_id,
+ project_id,
+ **kwargs,
+ )
+ return result
+ except aiohttp.ClientError as e:
+ last_exception = e
+ log.error(f"Error in evaluation. Retrying in {retry_delay} seconds:", e)
+ await asyncio.sleep(retry_delay)
+ retries += 1
+ except Exception as e:
+ last_exception = e
+ log.warn(f"Error processing datapoint: {input_data}. {str(e)}")
+ log.warn("".join(traceback.format_exception_only(type(e), e)))
+ retries += 1
+
+ # If max retries is reached or an exception that isn't in the second block,
+ # update & return the last exception
+ log.warn("Max retries reached")
+ exception_message = (
+ "Max retries reached"
+ if retries == max_retry_count
+ else f"Error processing {input_data} datapoint"
+ )
+
+ return InvokationResult(
+ result=Result(
+ type="error",
+ value=None,
+ error=Error(message=exception_message, stacktrace=str(last_exception)),
+ )
+ )
+
+
+async def batch_invoke(
+ uri: str,
+ testset_data: List[Dict],
+ parameters: Dict,
+ rate_limit_config: Dict,
+ user_id: str,
+ project_id: str,
+ **kwargs,
+) -> List[InvokationResult]:
+ """
+ Invokes the LLm apps in batches, processing the testset data.
+
+ Args:
+ uri (str): The URI of the LLm app.
+ testset_data (List[Dict]): The testset data to be processed.
+ parameters (Dict): The parameters for the LLm app.
+ rate_limit_config (Dict): The rate limit configuration.
+
+ Returns:
+ List[InvokationResult]: The list of app outputs after running all batches.
+ """
+ batch_size = rate_limit_config[
+ "batch_size"
+ ] # Number of testset to make in each batch
+ max_retries = rate_limit_config[
+ "max_retries"
+ ] # Maximum number of times to retry the failed llm call
+ retry_delay = rate_limit_config[
+ "retry_delay"
+ ] # Delay before retrying the failed llm call (in seconds)
+ delay_between_batches = rate_limit_config[
+ "delay_between_batches"
+ ] # Delay between batches (in seconds)
+
+ list_of_app_outputs: List[
+ InvokationResult
+ ] = [] # Outputs after running all batches
+
+ project = await get_project_by_id(
+ project_id=project_id,
+ )
+
+ secret_token = await sign_secret_token(
+ user_id=str(user_id),
+ project_id=str(project_id),
+ workspace_id=str(project.workspace_id),
+ organization_id=str(project.organization_id),
+ )
+
+ headers = {}
+ if secret_token:
+ headers = {"Authorization": f"Secret {secret_token}"}
+ headers["ngrok-skip-browser-warning"] = "1"
+
+ openapi_parameters = None
+ max_recursive_depth = 5
+ runtime_prefix = uri
+ route_path = ""
+
+ while max_recursive_depth > 0 and not openapi_parameters:
+ try:
+ openapi_parameters = await get_parameters_from_openapi(
+ runtime_prefix + "/openapi.json",
+ route_path,
+ headers,
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ openapi_parameters = None
+
+ if not openapi_parameters:
+ max_recursive_depth -= 1
+ if not runtime_prefix.endswith("/"):
+ route_path = "/" + runtime_prefix.split("/")[-1] + route_path
+ runtime_prefix = "/".join(runtime_prefix.split("/")[:-1])
+ else:
+ route_path = ""
+ runtime_prefix = runtime_prefix[:-1]
+
+ # Final attempt to fetch OpenAPI parameters
+ openapi_parameters = await get_parameters_from_openapi(
+ runtime_prefix + "/openapi.json",
+ route_path,
+ headers,
+ )
+
+ # 🆕 Rewritten loop instead of recursion
+ for start_idx in range(0, len(testset_data), batch_size):
+ tasks = []
+
+ end_idx = min(start_idx + batch_size, len(testset_data))
+ for index in range(start_idx, end_idx):
+ task = asyncio.ensure_future(
+ run_with_retry(
+ uri,
+ testset_data[index],
+ parameters,
+ max_retries,
+ retry_delay,
+ openapi_parameters,
+ user_id,
+ project_id,
+ **kwargs,
+ )
+ )
+ tasks.append(task)
+
+ results = await asyncio.gather(*tasks)
+
+ for result in results:
+ list_of_app_outputs.append(result)
+
+ # Delay between batches if more to come
+ if end_idx < len(testset_data):
+ await asyncio.sleep(delay_between_batches)
+
+ return list_of_app_outputs
+
+
+async def get_parameters_from_openapi(
+ runtime_prefix: str,
+ route_path: str,
+ headers: Optional[Dict[str, str]],
+) -> List[Dict]:
+ """
+ Parse the OpenAI schema of an LLM app to return list of parameters that it takes with their type as determined by the x-parameter
+ Args:
+ uri (str): The URI of the OpenAPI schema.
+
+ Returns:
+ list: A list of parameters. Each a dict with name and type.
+ Type can be one of: input, text, choice, float, dict, bool, int, file_url, messages.
+
+ Raises:
+ KeyError: If the required keys are not found in the schema.
+
+ """
+
+ schema = await _get_openai_json_from_uri(runtime_prefix, headers)
+
+ try:
+ body_schema_name = (
+ schema["paths"][route_path + "/test"]["post"]["requestBody"]["content"][
+ "application/json"
+ ]["schema"]["$ref"]
+ .split("/")
+ .pop()
+ )
+ except KeyError:
+ body_schema_name = ""
+
+ try:
+ properties = schema["components"]["schemas"][body_schema_name]["properties"]
+ except KeyError:
+ properties = {}
+
+ parameters = []
+ for name, param in properties.items():
+ parameters.append(
+ {
+ "name": name,
+ "type": param.get("x-parameter", "input"),
+ "default": param.get("default", []),
+ }
+ )
+ return parameters
+
+
+async def _get_openai_json_from_uri(
+ uri: str,
+ headers: Optional[Dict[str, str]],
+):
+ if headers is None:
+ headers = {}
+ headers["ngrok-skip-browser-warning"] = "1"
+
+ async with aiohttp.ClientSession() as client:
+ resp = await client.get(uri, headers=headers, timeout=5)
+ resp_text = await resp.text()
+ json_data = json.loads(resp_text)
+ return json_data
diff --git a/api/ee/src/services/organization_service.py b/api/ee/src/services/organization_service.py
new file mode 100644
index 0000000000..7ee4fdb150
--- /dev/null
+++ b/api/ee/src/services/organization_service.py
@@ -0,0 +1,121 @@
+from urllib.parse import quote
+
+from ee.src.services import db_manager_ee
+from oss.src.services import email_service
+from oss.src.models.db_models import UserDB
+from ee.src.models.db_models import (
+ WorkspaceDB,
+ OrganizationDB,
+)
+from ee.src.models.api.organization_models import (
+ OrganizationUpdate,
+)
+
+from oss.src.utils.env import env
+
+
+async def update_an_organization(
+ org_id: str, payload: OrganizationUpdate
+) -> OrganizationDB:
+ org = await db_manager_ee.get_organization(org_id)
+ if org is not None:
+ await db_manager_ee.update_organization(str(org.id), payload)
+ return org
+ raise NotFound("Organization not found")
+
+
+class NotFound(Exception):
+ """Custom exception for credentials not found"""
+
+ pass
+
+
+async def send_invitation_email(
+ email: str,
+ token: str,
+ project_id: str,
+ workspace: WorkspaceDB,
+ organization: OrganizationDB,
+ user: UserDB,
+):
+ """
+ Sends an invitation email to the specified email address, containing a link to accept the invitation.
+
+ Args:
+ email (str): The email address to send the invitation to.
+ token (str): The token to include in the invitation link.
+ project_id (str): The ID of the project that the user is being invited to join.
+ workspace (WorkspaceDB): The workspace that the user is being invited to join.
+ user (UserDB): The user who is sending the invitation.
+
+ Returns:
+ bool: True if the email was sent successfully, False otherwise.
+ """
+
+ html_template = email_service.read_email_template("./templates/send_email.html")
+
+ token_param = quote(token, safe="")
+ email_param = quote(email, safe="")
+ org_param = quote(str(organization.id), safe="")
+ workspace_param = quote(str(workspace.id), safe="")
+ project_param = quote(project_id, safe="")
+
+ invite_link = (
+ f"{env.AGENTA_WEB_URL}/auth?token={token_param}&email={email_param}"
+ f"&org_id={org_param}&workspace_id={workspace_param}&project_id={project_param}"
+ )
+
+ html_content = html_template.format(
+ username_placeholder=user.username,
+ action_placeholder="invited you to join",
+ workspace_placeholder=workspace.name,
+ call_to_action=(
+ "Click the link below to accept the invitation:
"
+ f'Accept Invitation'
+ ),
+ )
+
+ await email_service.send_email(
+ from_email="account@hello.agenta.ai",
+ to_email=email,
+ subject=f"{user.username} invited you to join {workspace.name}",
+ html_content=html_content,
+ )
+ return True
+
+
+async def notify_org_admin_invitation(workspace: WorkspaceDB, user: UserDB) -> bool:
+ """
+ Sends an email notification to the owner of an organization when a new member joins.
+
+ Args:
+ workspace (WorkspaceDB): The workspace that the user has joined.
+ user (UserDB): The user who has joined the organization.
+
+ Returns:
+ bool: True if the email was sent successfully, False otherwise.
+ """
+
+ html_template = email_service.read_email_template("./templates/send_email.html")
+ html_content = html_template.format(
+ username_placeholder=user.username,
+ action_placeholder="joined your Workspace",
+ workspace_placeholder=f'"{workspace.name}"',
+ call_to_action=f'Click the link below to view your Workspace:
View Workspace',
+ )
+
+ workspace_admins = await db_manager_ee.get_workspace_administrators(workspace)
+ for workspace_admin in workspace_admins:
+ await email_service.send_email(
+ from_email="account@hello.agenta.ai",
+ to_email=workspace_admin.email,
+ subject=f"New Member Joined {workspace.name}",
+ html_content=html_content,
+ )
+
+ return True
+
+
+async def get_organization_details(org_id: str) -> dict:
+ organization = await db_manager_ee.get_organization(org_id)
+ return await db_manager_ee.get_org_details(organization)
diff --git a/api/ee/src/services/results_service.py b/api/ee/src/services/results_service.py
new file mode 100644
index 0000000000..ca52151315
--- /dev/null
+++ b/api/ee/src/services/results_service.py
@@ -0,0 +1,116 @@
+import uuid
+from typing import Sequence, Dict, Any
+
+from ee.src.services import db_manager_ee
+from oss.src.models.api.evaluation_model import EvaluationType
+from ee.src.models.db_models import (
+ HumanEvaluationDB,
+ EvaluationScenarioDB,
+)
+
+
+async def fetch_results_for_evaluation(evaluation: HumanEvaluationDB):
+ evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios(
+ evaluation_id=str(evaluation.id)
+ )
+
+ results: Dict[str, Any] = {}
+ if len(evaluation_scenarios) == 0:
+ return results
+
+ evaluation_variants = await db_manager_ee.fetch_human_evaluation_variants(
+ human_evaluation_id=str(evaluation.id)
+ )
+ results["variants"] = [
+ str(evaluation_variant.variant_id) for evaluation_variant in evaluation_variants
+ ]
+
+ variant_names: list[str] = []
+ for evaluation_variant in evaluation_variants:
+ variant_name = (
+ evaluation_variant.variant.variant_name
+ if isinstance(evaluation_variant.variant_id, uuid.UUID)
+ else str(evaluation_variant.variant_id)
+ )
+ variant_names.append(str(variant_name))
+
+ results["variant_names"] = variant_names
+ results["nb_of_rows"] = len(evaluation_scenarios)
+
+ if evaluation.evaluation_type == EvaluationType.human_a_b_testing: # type: ignore
+ results.update(
+ await _compute_stats_for_human_a_b_testing_evaluation(evaluation_scenarios)
+ )
+
+ return results
+
+
+async def _compute_stats_for_evaluation(evaluation_scenarios: list, classes: list):
+ results = {}
+ for cl in classes:
+ results[cl] = [
+ scenario for scenario in evaluation_scenarios if scenario.score == cl
+ ]
+ return results
+
+
+async def _compute_stats_for_human_a_b_testing_evaluation(
+ evaluation_scenarios: Sequence[EvaluationScenarioDB],
+):
+ results: Dict[str, Any] = {}
+ results["variants_votes_data"] = {}
+ results["flag_votes"] = {}
+ results["positive_votes"] = {}
+
+ flag_votes_nb = [
+ scenario for scenario in evaluation_scenarios if scenario.vote == "0"
+ ]
+
+ positive_votes_nb = [
+ scenario for scenario in evaluation_scenarios if scenario.vote == "1"
+ ]
+
+ results["positive_votes"]["number_of_votes"] = len(positive_votes_nb)
+ results["positive_votes"]["percentage"] = (
+ round(len(positive_votes_nb) / len(evaluation_scenarios) * 100, 2)
+ if len(evaluation_scenarios)
+ else 0
+ )
+
+ results["flag_votes"]["number_of_votes"] = len(flag_votes_nb)
+ results["flag_votes"]["percentage"] = (
+ round(len(flag_votes_nb) / len(evaluation_scenarios) * 100, 2)
+ if len(evaluation_scenarios)
+ else 0
+ )
+
+ for scenario in evaluation_scenarios:
+ if scenario.vote not in results["variants_votes_data"]:
+ results["variants_votes_data"][scenario.vote] = {}
+ results["variants_votes_data"][scenario.vote]["number_of_votes"] = 1
+ else:
+ results["variants_votes_data"][scenario.vote]["number_of_votes"] += 1
+
+ for key, value in results["variants_votes_data"].items():
+ value["percentage"] = round(
+ value["number_of_votes"] / len(evaluation_scenarios) * 100, 2
+ )
+ return results
+
+
+async def fetch_results_for_single_model_test(evaluation_id: str):
+ evaluation_scenarios = await db_manager_ee.fetch_human_evaluation_scenarios(
+ evaluation_id=str(evaluation_id)
+ )
+ scores_and_counts: Dict[str, Any] = {}
+ for evaluation_scenario in evaluation_scenarios:
+ score = evaluation_scenario.score
+ if isinstance(score, str):
+ if score.isdigit(): # Check if the string is a valid integer
+ score = int(score)
+ else:
+ continue # Skip if the string is not a valid integer
+
+ scores_and_counts[score] = scores_and_counts.get(score, 0) + 1
+
+ return scores_and_counts
diff --git a/api/ee/src/services/selectors.py b/api/ee/src/services/selectors.py
new file mode 100644
index 0000000000..f8a10ceecb
--- /dev/null
+++ b/api/ee/src/services/selectors.py
@@ -0,0 +1,125 @@
+from typing import Dict, List, Union
+
+from sqlalchemy.future import select
+from sqlalchemy.exc import NoResultFound
+from sqlalchemy.orm import load_only, joinedload
+
+from oss.src.services import db_manager
+from oss.src.utils.logging import get_module_logger
+
+from oss.src.dbs.postgres.shared.engine import engine
+from ee.src.models.api.organization_models import Organization
+from ee.src.models.db_models import (
+ WorkspaceDB,
+ OrganizationDB,
+ WorkspaceMemberDB,
+ OrganizationMemberDB,
+)
+
+log = get_module_logger(__name__)
+
+
+async def get_user_org_and_workspace_id(user_uid) -> Dict[str, Union[str, List[str]]]:
+ """
+ Retrieves the user ID and organization IDs associated with a given user UID.
+
+ Args:
+ user_uid (str): The UID of the user.
+
+ Returns:
+ dict: A dictionary containing the user UID, ID, list of workspace IDS and list of organization IDS associated with a user.
+ If the user is not found, returns None
+
+ Example Usage:
+ result = await get_user_org_and_workspace_id("user123")
+ print(result)
+
+ Output:
+ { "id": "123", "uid": "user123", "organization_ids": [], "workspace_ids": []}
+ """
+
+ async with engine.core_session() as session:
+ user = await db_manager.get_user_with_id(user_id=user_uid)
+ if not user:
+ raise NoResultFound(f"User with uid {user_uid} not found")
+
+ user_org_result = await session.execute(
+ select(OrganizationMemberDB)
+ .filter_by(user_id=user.id)
+ .options(load_only(OrganizationMemberDB.organization_id)) # type: ignore
+ )
+ orgs = user_org_result.scalars().all()
+ organization_ids = [str(user_org.organization_id) for user_org in orgs]
+
+ member_in_workspaces_result = await session.execute(
+ select(WorkspaceMemberDB)
+ .filter_by(user_id=user.id)
+ .options(load_only(WorkspaceMemberDB.workspace_id)) # type: ignore
+ )
+ workspaces_ids = [
+ str(user_workspace.workspace_id)
+ for user_workspace in member_in_workspaces_result.scalars().all()
+ ]
+
+ return {
+ "id": str(user.id),
+ "uid": str(user.uid),
+ "workspace_ids": workspaces_ids,
+ "organization_ids": organization_ids,
+ }
+
+
+async def user_exists(user_email: str) -> bool:
+ """Check if user exists in the database.
+
+ Arguments:
+ user_email (str): The email address of the logged-in user
+
+ Returns:
+ bool: confirming if the user exists or not.
+ """
+
+ user = await db_manager.get_user_with_email(email=user_email)
+ return False if not user else True
+
+
+async def get_user_own_org(user_uid: str) -> OrganizationDB:
+ """Get's the default users' organization from the database.
+
+ Arguments:
+ user_uid (str): The uid of the user
+
+ Returns:
+ Organization: Instance of OrganizationDB
+ """
+
+ user = await db_manager.get_user_with_id(user_id=user_uid)
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(OrganizationDB).filter_by(
+ owner=str(user.id),
+ type="default",
+ )
+ )
+ org = result.scalars().first()
+ return org
+
+
+async def get_org_default_workspace(organization: Organization) -> WorkspaceDB:
+ """Get's the default workspace for an organization from the database.
+
+ Arguments:
+ organization (Organization): The organization
+
+ Returns:
+ WorkspaceDB: Instance of WorkspaceDB
+ """
+
+ async with engine.core_session() as session:
+ result = await session.execute(
+ select(WorkspaceDB)
+ .filter_by(organization_id=organization.id, type="default")
+ .options(joinedload(WorkspaceDB.members))
+ )
+ workspace = result.scalars().first()
+ return workspace
diff --git a/api/ee/src/services/templates/send_email.html b/api/ee/src/services/templates/send_email.html
new file mode 100644
index 0000000000..7d124ffd8a
--- /dev/null
+++ b/api/ee/src/services/templates/send_email.html
@@ -0,0 +1,7 @@
+Hello,
+
+ {username_placeholder} has {action_placeholder} {workspace_placeholder} on
+ Agenta.
+
+{call_to_action}
+Thank you for using Agenta!
diff --git a/api/ee/src/services/utils.py b/api/ee/src/services/utils.py
new file mode 100644
index 0000000000..0eaedde4ff
--- /dev/null
+++ b/api/ee/src/services/utils.py
@@ -0,0 +1,21 @@
+# Stdlib Imports
+import asyncio
+from functools import partial
+from typing import Callable, Coroutine
+
+
+async def run_in_separate_thread(func: Callable, *args, **kwargs) -> Coroutine:
+ """
+ Run a synchronous function in a separate thread.
+
+ Args:
+ func (callable): The synchronous function to be executed.
+ args (tuple): Positional arguments to be passed to `func`.
+ kwargs (dict): Keyword arguments to be passed to `func`.
+
+ Returns:
+ The result of the synchronous function.
+ """
+
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(None, partial(func, *args, **kwargs))
diff --git a/api/ee/src/services/workspace_manager.py b/api/ee/src/services/workspace_manager.py
new file mode 100644
index 0000000000..d446729804
--- /dev/null
+++ b/api/ee/src/services/workspace_manager.py
@@ -0,0 +1,355 @@
+import asyncio
+
+from typing import List
+from fastapi import HTTPException
+from fastapi.responses import JSONResponse
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.services import db_manager
+from ee.src.services import db_manager_ee, converters
+from ee.src.models.db_models import (
+ WorkspaceDB,
+ OrganizationDB,
+)
+from oss.src.models.db_models import UserDB
+from ee.src.models.api.api_models import (
+ InviteRequest,
+ ReseendInviteRequest,
+)
+from ee.src.models.api.workspace_models import (
+ Permission,
+ WorkspaceRole,
+ WorkspaceResponse,
+ CreateWorkspace,
+ UpdateWorkspace,
+)
+from oss.src.models.db_models import InvitationDB
+from oss.src.services.organization_service import (
+ create_invitation,
+ check_existing_invitation,
+ check_valid_invitation,
+)
+from ee.src.services.organization_service import send_invitation_email
+
+log = get_module_logger(__name__)
+
+
+async def get_workspace(workspace_id: str) -> WorkspaceDB:
+ """
+ Get the workspace object based on the provided workspace ID.
+
+ Parameters:
+ - workspace_id (str): The ID of the workspace.
+
+ Returns:
+ - WorkspaceDB: The workspace object corresponding to the provided ID.
+
+ Raises:
+ - HTTPException: If the workspace with the provided ID is not found.
+
+ """
+
+ workspace = await db_manager.get_workspace(workspace_id)
+ if workspace is not None:
+ return workspace
+ raise HTTPException(
+ status_code=404, detail=f"Workspace by id {workspace_id} not found"
+ )
+
+
+async def create_new_workspace(
+ payload: CreateWorkspace, organization_id: str, user_uid: str
+) -> WorkspaceResponse:
+ """
+ Create a new workspace.
+
+ Args:
+ payload (CreateWorkspace): The workspace payload.
+ organization_id (str): The organization id.
+ user_uid (str): The user uid.
+
+ Returns:
+ WorkspaceResponse: The created workspace.
+ """
+
+ workspace = await db_manager_ee.create_workspace(payload, organization_id, user_uid)
+ return workspace
+
+
+async def update_workspace(
+ payload: UpdateWorkspace, workspace_id: str
+) -> WorkspaceResponse:
+ """
+ Update a workspace's details.
+
+ Args:
+ payload (UpdateWorkspace): The data to update the workspace with.
+ workspace_id (str): The ID of the workspace to update.
+
+ Returns:
+ WorkspaceResponse: The updated workspace.
+
+ Raises:
+ HTTPException: If the workspace with the given ID is not found.
+ """
+
+ workspace = await get_workspace(workspace_id)
+ if workspace is not None:
+ updated_workspace = await db_manager_ee.update_workspace(payload, workspace)
+ return updated_workspace
+ raise HTTPException(
+ status_code=404, detail=f"Workspace by id {workspace_id} not found"
+ )
+
+
+async def get_all_workspace_roles() -> List[WorkspaceRole]:
+ """
+ Retrieve all workspace roles.
+
+ Returns:
+ List[WorkspaceRole]: A list of all workspace roles in the DB.
+ """
+
+ workspace_roles_from_db = await db_manager_ee.get_all_workspace_roles()
+ return workspace_roles_from_db
+
+
+async def get_all_workspace_permissions() -> List[Permission]:
+ """
+ Retrieve all workspace permissions.
+
+ Returns:
+ List[Permission]: A list of all workspace permissions in the DB.
+ """
+
+ workspace_permissions_from_db = await converters.get_all_workspace_permissions()
+ return workspace_permissions_from_db
+
+
+async def invite_user_to_workspace(
+ payload: List[InviteRequest],
+ org_id: str,
+ project_id: str,
+ workspace_id: str,
+ user_uid: str,
+) -> JSONResponse:
+ """
+ Invite a user to a workspace.
+
+ Args:
+ user_uid (str): The user uid.
+ org_id (str): The ID of the organization that the workspace belongs to.
+ project_id (str): The ID of the project that belongs to the workspace.
+ workspace_id (str): The ID of the workspace.
+ payload (InviteRequest): The payload containing the email address of the user to invite.
+
+ Returns:
+ JSONResponse: The response containing the invitation details.
+
+ Raises:
+ HTTPException: If there is an error retrieving the workspace.
+ """
+
+ try:
+ workspace = await get_workspace(workspace_id)
+ organization = await db_manager_ee.get_organization(org_id)
+ user_performing_action = await db_manager.get_user(user_uid)
+
+ for payload_invite in payload:
+ # Check that the user is not inviting themselves
+ if payload_invite.email == user_performing_action.email:
+ return JSONResponse(
+ status_code=400,
+ content={"error": "You cannot invite yourself to a workspace"},
+ )
+
+ # Check if the user is already a member of the workspace
+ if await db_manager_ee.check_user_in_workspace_with_email(
+ payload_invite.email, str(workspace.id)
+ ):
+ return JSONResponse(
+ status_code=400,
+ content={"error": "User is already a member of the workspace"},
+ )
+
+ # Check if the email address already has a valid, unused invitation for the workspace
+ existing_invitation, existing_role = await check_existing_invitation(
+ project_id, payload_invite.email
+ )
+ if not existing_invitation and not existing_role:
+ # Create a new invitation
+ invitation = await create_invitation(
+ payload_invite.roles[0], project_id, payload_invite.email
+ )
+
+ # Send the invitation email
+ send_email = await send_invitation_email(
+ payload_invite.email,
+ invitation.token, # type: ignore
+ project_id,
+ workspace,
+ organization,
+ user_performing_action,
+ )
+
+ if not send_email:
+ return JSONResponse(
+ {"detail": "Failed to invite user to organization"},
+ status_code=400,
+ )
+ else:
+ return JSONResponse(
+ status_code=200,
+ content={
+ "message": "Invitation already exists",
+ },
+ )
+
+ return JSONResponse(
+ {"message": "Invited users to organization"}, status_code=200
+ )
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+async def resend_user_workspace_invite(
+ payload: ReseendInviteRequest,
+ project_id: str,
+ org_id: str,
+ workspace_id: str,
+ user_uid: str,
+) -> JSONResponse:
+ """
+ Resend an invitation to a user to a workspace.
+
+ Args:
+ org_id (str): The ID of the organization that the workspace belongs to.
+ project_id (str): The ID of the project.
+ workspace_id (str): The ID of the workspace.
+ payload (ReseendInviteRequest): The payload containing the email address of the user to invite.
+
+ Returns:
+ JSONResponse: The response containing the invitation details.
+
+ Raises:
+ HTTPException: If there is an error retrieving the workspace.
+ """
+
+ try:
+ workspace = await get_workspace(workspace_id)
+ organization = await db_manager_ee.get_organization(org_id)
+ user_performing_action = await db_manager.get_user(user_uid)
+
+ # Check if the email address already has a valid, unused invitation for the workspace
+ existing_invitation, existing_role = await check_existing_invitation(
+ project_id, payload.email
+ )
+ if existing_invitation:
+ invitation = existing_invitation
+ elif existing_role:
+ # Create a new invitation
+ invitation = await create_invitation(
+ existing_role, project_id, payload.email
+ )
+ else:
+ raise HTTPException(
+ status_code=404,
+ detail="No existing invitation found for the user",
+ )
+
+ # Send the invitation email
+ send_email = await send_invitation_email(
+ payload.email,
+ invitation.token,
+ project_id,
+ workspace,
+ organization,
+ user_performing_action,
+ )
+
+ if send_email:
+ return JSONResponse(
+ {"message": "Invited user to organization"}, status_code=200
+ )
+ else:
+ return JSONResponse(
+ {"detail": "Failed to invite user to organization"}, status_code=400
+ )
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+async def accept_workspace_invitation(
+ token: str,
+ project_id: str,
+ organization: OrganizationDB,
+ workspace: WorkspaceDB,
+ user: UserDB,
+) -> bool:
+ """
+ Accept an invitation to a workspace.
+
+ Args:
+ token (str): The invitation token.
+ project_id (str): The ID of the project.
+ organization_id (str): The ID of the organization that the workspace belongs to.
+ workspace_id (str): The ID of the workspace.
+ user_uid (str): The user uid.
+
+ Returns:
+ bool: True if the user was successfully added to the workspace, False otherwise
+
+ Raises:
+ HTTPException: If there is an error retrieving the workspace.
+ """
+
+ try:
+ # Check if the user is already a member of the workspace
+ if await db_manager_ee.check_user_in_workspace_with_email(
+ user.email, str(workspace.id)
+ ):
+ raise HTTPException(
+ status_code=409,
+ detail="User is already a member of the workspace",
+ )
+
+ invitation = await check_valid_invitation(project_id, user.email, token)
+ if invitation is not None:
+ assert (
+ invitation.role is not None
+ ), "Invitation does not have any workspace role"
+ await db_manager_ee.add_user_to_workspace_and_org(
+ organization, workspace, user, project_id, invitation.role
+ )
+
+ await db_manager_ee.mark_invitation_as_used(
+ project_id, str(user.id), invitation
+ )
+ return True
+
+ else:
+ # Existing invitation is expired
+ raise Exception("Invitation has expired or does not exist")
+ except Exception as e:
+ raise e
+
+
+async def remove_user_from_workspace(
+ workspace_id: str,
+ email: str,
+) -> WorkspaceResponse:
+ """
+ Remove a user from a workspace.
+
+ Args:
+ workspace_id (str): The ID of the workspace.
+ payload (UserRole): The payload containing the user ID and role to remove.
+
+ Returns:
+ WorkspaceResponse: The updated workspace.
+ """
+
+ remove_user = await db_manager_ee.remove_user_from_workspace(workspace_id, email)
+ return remove_user
diff --git a/api/ee/src/tasks/__init__.py b/api/ee/src/tasks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/tasks/evaluations/__init__.py b/api/ee/src/tasks/evaluations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/src/tasks/evaluations/batch.py b/api/ee/src/tasks/evaluations/batch.py
new file mode 100644
index 0000000000..cf65107b6a
--- /dev/null
+++ b/api/ee/src/tasks/evaluations/batch.py
@@ -0,0 +1,254 @@
+from typing import Dict, List, Optional
+from uuid import UUID
+import asyncio
+import traceback
+from json import dumps
+
+from celery import shared_task, states, Task
+
+from fastapi import Request
+
+from oss.src.utils.helpers import parse_url, get_slug_from_name_and_id
+from oss.src.utils.logging import get_module_logger
+from oss.src.services.auth_helper import sign_secret_token
+from ee.src.services import llm_apps_service
+from oss.src.models.shared_models import InvokationResult
+from oss.src.services.db_manager import (
+ fetch_app_by_id,
+ fetch_app_variant_by_id,
+ fetch_app_variant_revision_by_id,
+ get_deployment_by_id,
+ get_project_by_id,
+)
+from oss.src.core.secrets.utils import get_llm_providers_secrets
+from ee.src.utils.entitlements import check_entitlements, Counter
+
+from oss.src.dbs.postgres.queries.dbes import (
+ QueryArtifactDBE,
+ QueryVariantDBE,
+ QueryRevisionDBE,
+)
+from oss.src.dbs.postgres.testcases.dbes import (
+ TestcaseBlobDBE,
+)
+from oss.src.dbs.postgres.testsets.dbes import (
+ TestsetArtifactDBE,
+ TestsetVariantDBE,
+ TestsetRevisionDBE,
+)
+from oss.src.dbs.postgres.workflows.dbes import (
+ WorkflowArtifactDBE,
+ WorkflowVariantDBE,
+ WorkflowRevisionDBE,
+)
+
+from oss.src.dbs.postgres.tracing.dao import TracingDAO
+from oss.src.dbs.postgres.blobs.dao import BlobsDAO
+from oss.src.dbs.postgres.git.dao import GitDAO
+from oss.src.dbs.postgres.evaluations.dao import EvaluationsDAO
+
+from oss.src.core.tracing.service import TracingService
+from oss.src.core.queries.service import QueriesService
+from oss.src.core.testcases.service import TestcasesService
+from oss.src.core.testsets.service import TestsetsService
+from oss.src.core.testsets.service import SimpleTestsetsService
+from oss.src.core.workflows.service import WorkflowsService
+from oss.src.core.evaluators.service import EvaluatorsService
+from oss.src.core.evaluators.service import SimpleEvaluatorsService
+from oss.src.core.evaluations.service import EvaluationsService
+from oss.src.core.annotations.service import AnnotationsService
+
+# from oss.src.apis.fastapi.tracing.utils import make_hash_id
+from oss.src.apis.fastapi.tracing.router import TracingRouter
+from oss.src.apis.fastapi.testsets.router import SimpleTestsetsRouter
+from oss.src.apis.fastapi.evaluators.router import SimpleEvaluatorsRouter
+from oss.src.apis.fastapi.annotations.router import AnnotationsRouter
+
+from oss.src.core.annotations.types import (
+ AnnotationOrigin,
+ AnnotationKind,
+ AnnotationChannel,
+)
+from oss.src.apis.fastapi.annotations.models import (
+ AnnotationCreate,
+ AnnotationCreateRequest,
+)
+
+from oss.src.core.evaluations.types import (
+ EvaluationStatus,
+ EvaluationRun,
+ EvaluationRunCreate,
+ EvaluationRunEdit,
+ EvaluationScenarioCreate,
+ EvaluationScenarioEdit,
+ EvaluationResultCreate,
+ EvaluationMetricsCreate,
+)
+
+from oss.src.core.shared.dtos import Reference
+from oss.src.core.tracing.dtos import (
+ Filtering,
+ Windowing,
+ Formatting,
+ Format,
+ Focus,
+ TracingQuery,
+)
+from oss.src.core.workflows.dtos import (
+ WorkflowServiceData,
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowServiceInterface,
+ WorkflowRevisionData,
+ WorkflowRevision,
+ WorkflowVariant,
+ Workflow,
+)
+
+from oss.src.core.queries.dtos import (
+ QueryRevision,
+ QueryVariant,
+ Query,
+)
+
+from oss.src.core.workflows.dtos import Tree
+
+from oss.src.core.evaluations.utils import get_metrics_keys_from_schema
+
+
+log = get_module_logger(__name__)
+
+
+# DBS --------------------------------------------------------------------------
+
+tracing_dao = TracingDAO()
+
+testcases_dao = BlobsDAO(
+ BlobDBE=TestcaseBlobDBE,
+)
+
+queries_dao = GitDAO(
+ ArtifactDBE=QueryArtifactDBE,
+ VariantDBE=QueryVariantDBE,
+ RevisionDBE=QueryRevisionDBE,
+)
+
+testsets_dao = GitDAO(
+ ArtifactDBE=TestsetArtifactDBE,
+ VariantDBE=TestsetVariantDBE,
+ RevisionDBE=TestsetRevisionDBE,
+)
+
+workflows_dao = GitDAO(
+ ArtifactDBE=WorkflowArtifactDBE,
+ VariantDBE=WorkflowVariantDBE,
+ RevisionDBE=WorkflowRevisionDBE,
+)
+
+evaluations_dao = EvaluationsDAO()
+
+# CORE -------------------------------------------------------------------------
+
+tracing_service = TracingService(
+ tracing_dao=tracing_dao,
+)
+
+queries_service = QueriesService(
+ queries_dao=queries_dao,
+)
+
+testcases_service = TestcasesService(
+ testcases_dao=testcases_dao,
+)
+
+testsets_service = TestsetsService(
+ testsets_dao=testsets_dao,
+ testcases_service=testcases_service,
+)
+
+simple_testsets_service = SimpleTestsetsService(
+ testsets_service=testsets_service,
+)
+
+testsets_service = TestsetsService(
+ testsets_dao=testsets_dao,
+ testcases_service=testcases_service,
+)
+
+workflows_service = WorkflowsService(
+ workflows_dao=workflows_dao,
+)
+
+evaluators_service = EvaluatorsService(
+ workflows_service=workflows_service,
+)
+
+simple_evaluators_service = SimpleEvaluatorsService(
+ evaluators_service=evaluators_service,
+)
+
+evaluations_service = EvaluationsService(
+ evaluations_dao=evaluations_dao,
+ tracing_service=tracing_service,
+ queries_service=queries_service,
+ testsets_service=testsets_service,
+ evaluators_service=evaluators_service,
+)
+
+# APIS -------------------------------------------------------------------------
+
+tracing_router = TracingRouter(
+ tracing_service=tracing_service,
+)
+
+simple_testsets_router = SimpleTestsetsRouter(
+ simple_testsets_service=simple_testsets_service,
+) # TODO: REMOVE/REPLACE ONCE TRANSFER IS MOVED TO 'core'
+
+simple_evaluators_router = SimpleEvaluatorsRouter(
+ simple_evaluators_service=simple_evaluators_service,
+) # TODO: REMOVE/REPLACE ONCE TRANSFER IS MOVED TO 'core'
+
+annotations_service = AnnotationsService(
+ tracing_router=tracing_router,
+ evaluators_service=evaluators_service,
+ simple_evaluators_service=simple_evaluators_service,
+)
+
+annotations_router = AnnotationsRouter(
+ annotations_service=annotations_service,
+) # TODO: REMOVE/REPLACE ONCE ANNOTATE IS MOVED TO 'core'
+
+# ------------------------------------------------------------------------------
+
+
+@shared_task(
+ name="src.tasks.evaluations.batch.evaluate_testsets",
+ queue="src.tasks.evaluations.batch.evaluate_testsets",
+ bind=True,
+)
+def evaluate_testsets(
+ self,
+ *,
+ project_id: UUID,
+ user_id: UUID,
+ #
+ run_id: UUID,
+):
+ pass
+
+
+@shared_task(
+ name="src.tasks.evaluations.batch.evaluate_queries",
+ queue="src.tasks.evaluations.batch.evaluate_queries",
+ bind=True,
+)
+def evaluate_queries(
+ self: Task,
+ *,
+ project_id: UUID,
+ user_id: UUID,
+ #
+ run_id: UUID,
+):
+ pass
diff --git a/api/ee/src/tasks/evaluations/legacy.py b/api/ee/src/tasks/evaluations/legacy.py
new file mode 100644
index 0000000000..50d211f713
--- /dev/null
+++ b/api/ee/src/tasks/evaluations/legacy.py
@@ -0,0 +1,1391 @@
+from typing import Dict, List, Optional
+from uuid import UUID
+from json import dumps
+from asyncio import get_event_loop
+
+from celery import shared_task, states
+
+from fastapi import Request
+
+from oss.src.utils.helpers import parse_url, get_slug_from_name_and_id
+from oss.src.utils.logging import get_module_logger
+from oss.src.services.auth_helper import sign_secret_token
+from ee.src.services import llm_apps_service
+from oss.src.models.shared_models import InvokationResult
+from oss.src.services.db_manager import (
+ fetch_app_by_id,
+ fetch_app_variant_by_id,
+ fetch_app_variant_revision_by_id,
+ fetch_evaluator_config,
+ get_deployment_by_id,
+ get_project_by_id,
+)
+from oss.src.core.secrets.utils import get_llm_providers_secrets
+from ee.src.utils.entitlements import check_entitlements, Counter
+
+from oss.src.dbs.postgres.queries.dbes import (
+ QueryArtifactDBE,
+ QueryVariantDBE,
+ QueryRevisionDBE,
+)
+from oss.src.dbs.postgres.testcases.dbes import (
+ TestcaseBlobDBE,
+)
+from oss.src.dbs.postgres.testsets.dbes import (
+ TestsetArtifactDBE,
+ TestsetVariantDBE,
+ TestsetRevisionDBE,
+)
+from oss.src.dbs.postgres.workflows.dbes import (
+ WorkflowArtifactDBE,
+ WorkflowVariantDBE,
+ WorkflowRevisionDBE,
+)
+
+from oss.src.dbs.postgres.tracing.dao import TracingDAO
+from oss.src.dbs.postgres.blobs.dao import BlobsDAO
+from oss.src.dbs.postgres.git.dao import GitDAO
+from oss.src.dbs.postgres.evaluations.dao import EvaluationsDAO
+
+from oss.src.core.tracing.service import TracingService
+from oss.src.core.queries.service import QueriesService
+from oss.src.core.testcases.service import TestcasesService
+from oss.src.core.testsets.service import TestsetsService, SimpleTestsetsService
+from oss.src.core.workflows.service import WorkflowsService
+from oss.src.core.evaluators.service import EvaluatorsService
+from oss.src.core.evaluators.service import SimpleEvaluatorsService
+from oss.src.core.evaluations.service import EvaluationsService
+from oss.src.core.annotations.service import AnnotationsService
+
+from oss.src.apis.fastapi.tracing.utils import make_hash_id
+from oss.src.apis.fastapi.tracing.router import TracingRouter
+from oss.src.apis.fastapi.testsets.router import SimpleTestsetsRouter
+from oss.src.apis.fastapi.evaluators.router import SimpleEvaluatorsRouter
+from oss.src.apis.fastapi.annotations.router import AnnotationsRouter
+
+from oss.src.core.annotations.types import (
+ AnnotationOrigin,
+ AnnotationKind,
+ AnnotationChannel,
+)
+from oss.src.apis.fastapi.annotations.models import (
+ AnnotationCreate,
+ AnnotationCreateRequest,
+)
+
+from oss.src.core.evaluations.types import (
+ EvaluationStatus,
+ EvaluationRunDataMappingStep,
+ EvaluationRunDataMappingColumn,
+ EvaluationRunDataMapping,
+ EvaluationRunDataStepInput,
+ EvaluationRunDataStep,
+ EvaluationRunData,
+ EvaluationRunFlags,
+ EvaluationRun,
+ EvaluationRunCreate,
+ EvaluationRunEdit,
+ EvaluationScenarioCreate,
+ EvaluationScenarioEdit,
+ EvaluationResultCreate,
+ EvaluationMetricsCreate,
+)
+
+from oss.src.core.shared.dtos import Reference
+from oss.src.core.workflows.dtos import (
+ WorkflowServiceData,
+ WorkflowServiceRequest,
+ WorkflowServiceResponse,
+ WorkflowServiceInterface,
+ WorkflowRevisionData,
+ WorkflowRevision,
+ WorkflowVariant,
+ Workflow,
+)
+
+from oss.src.core.queries.dtos import (
+ QueryRevision,
+ QueryVariant,
+ Query,
+)
+
+from oss.src.core.workflows.dtos import Tree
+
+from oss.src.core.evaluations.utils import get_metrics_keys_from_schema
+
+
+log = get_module_logger(__name__)
+
+
+# DBS --------------------------------------------------------------------------
+
+tracing_dao = TracingDAO()
+
+testcases_dao = BlobsDAO(
+ BlobDBE=TestcaseBlobDBE,
+)
+
+queries_dao = GitDAO(
+ ArtifactDBE=QueryArtifactDBE,
+ VariantDBE=QueryVariantDBE,
+ RevisionDBE=QueryRevisionDBE,
+)
+
+testsets_dao = GitDAO(
+ ArtifactDBE=TestsetArtifactDBE,
+ VariantDBE=TestsetVariantDBE,
+ RevisionDBE=TestsetRevisionDBE,
+)
+
+workflows_dao = GitDAO(
+ ArtifactDBE=WorkflowArtifactDBE,
+ VariantDBE=WorkflowVariantDBE,
+ RevisionDBE=WorkflowRevisionDBE,
+)
+
+evaluations_dao = EvaluationsDAO()
+
+# CORE -------------------------------------------------------------------------
+
+tracing_service = TracingService(
+ tracing_dao=tracing_dao,
+)
+
+queries_service = QueriesService(
+ queries_dao=queries_dao,
+)
+
+testcases_service = TestcasesService(
+ testcases_dao=testcases_dao,
+)
+
+testsets_service = TestsetsService(
+ testsets_dao=testsets_dao,
+ testcases_service=testcases_service,
+)
+
+simple_testsets_service = SimpleTestsetsService(
+ testsets_service=testsets_service,
+)
+
+workflows_service = WorkflowsService(
+ workflows_dao=workflows_dao,
+)
+
+evaluators_service = EvaluatorsService(
+ workflows_service=workflows_service,
+)
+
+simple_evaluators_service = SimpleEvaluatorsService(
+ evaluators_service=evaluators_service,
+)
+
+evaluations_service = EvaluationsService(
+ evaluations_dao=evaluations_dao,
+ tracing_service=tracing_service,
+ queries_service=queries_service,
+ testsets_service=testsets_service,
+ evaluators_service=evaluators_service,
+)
+
+# APIS -------------------------------------------------------------------------
+
+tracing_router = TracingRouter(
+ tracing_service=tracing_service,
+)
+
+simple_testsets_router = SimpleTestsetsRouter(
+ simple_testsets_service=simple_testsets_service,
+) # TODO: REMOVE/REPLACE ONCE TRANSFER IS MOVED TO 'core'
+
+simple_evaluators_router = SimpleEvaluatorsRouter(
+ simple_evaluators_service=simple_evaluators_service,
+) # TODO: REMOVE/REPLACE ONCE TRANSFER IS MOVED TO 'core'
+
+annotations_service = AnnotationsService(
+ tracing_router=tracing_router,
+ evaluators_service=evaluators_service,
+ simple_evaluators_service=simple_evaluators_service,
+)
+
+annotations_router = AnnotationsRouter(
+ annotations_service=annotations_service,
+) # TODO: REMOVE/REPLACE ONCE ANNOTATE IS MOVED TO 'core'
+
+# ------------------------------------------------------------------------------
+
+
+async def setup_evaluation(
+ *,
+ project_id: UUID,
+ user_id: UUID,
+ #
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ #
+ testset_id: Optional[str] = None,
+ query_id: Optional[str] = None,
+ #
+ revision_id: Optional[str] = None,
+ #
+ autoeval_ids: Optional[List[str]] = None,
+) -> Optional[EvaluationRun]:
+ request = Request(scope={"type": "http", "http_version": "1.1", "scheme": "http"})
+ request.state.project_id = project_id
+ request.state.user_id = user_id
+
+ run = None
+
+ # --------------------------------------------------------------------------
+ log.info("[SETUP] ", project_id=project_id, user_id=user_id)
+ log.info("[TESTSET] ", ids=[testset_id])
+ log.info("[QUERY] ", ids=[query_id])
+ log.info("[INVOCATON] ", ids=[revision_id])
+ log.info("[ANNOTATION]", ids=autoeval_ids)
+ # --------------------------------------------------------------------------
+
+ try:
+ # create evaluation run ------------------------------------------------
+ runs_create = [
+ EvaluationRunCreate(
+ name=name,
+ description=description,
+ #
+ flags=(
+ EvaluationRunFlags(
+ is_closed=None,
+ is_live=True,
+ is_active=True,
+ )
+ if query_id
+ else None
+ ),
+ #
+ status=EvaluationStatus.PENDING,
+ )
+ ]
+
+ runs = await evaluations_service.create_runs(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ runs=runs_create,
+ )
+
+ assert len(runs) == 1, "Failed to create evaluation run."
+
+ run = runs[0]
+ # ----------------------------------------------------------------------
+
+ # just-in-time transfer of testset -------------------------------------
+ testset_input_steps_keys = list()
+
+ testset_references = dict()
+ testset = None
+
+ if testset_id:
+ testset_ref = Reference(id=UUID(testset_id))
+
+ testset_response = await simple_testsets_router.transfer_simple_testset(
+ request=request,
+ testset_id=UUID(testset_id),
+ )
+
+ assert (
+ testset_response.count != 0
+ ), f"Testset with id {testset_id} not found!"
+
+ testset = testset_response.testset
+ testcases = testset.data.testcases
+
+ testset_references["artifact"] = testset_ref
+
+ testset_input_steps_keys.append(
+ get_slug_from_name_and_id(testset.name, testset.id)
+ )
+ # ----------------------------------------------------------------------
+
+ # fetch query ----------------------------------------------------------
+ query_input_steps_keys = list()
+
+ query_references = dict()
+ query_revision = None
+
+ if query_id:
+ query_ref = Reference(id=UUID(query_id))
+
+ query = await queries_service.fetch_query(
+ project_id=project_id,
+ #
+ query_ref=query_ref,
+ )
+
+ assert query is not None, f"Query with id {query_id} not found!"
+
+ query_references["artifact"] = Reference(
+ id=query.id,
+ slug=query.slug,
+ )
+
+ query_revision = await queries_service.fetch_query_revision(
+ project_id=project_id,
+ #
+ query_ref=query_ref,
+ )
+
+ assert (
+ query_revision is not None
+ ), f"Query revision with id {query_id} not found!"
+
+ query_revision_ref = Reference(
+ id=query_revision.id,
+ slug=query_revision.slug,
+ )
+
+ query_references["revision"] = query_revision_ref
+
+ query_variant = await queries_service.fetch_query_variant(
+ project_id=project_id,
+ query_variant_ref=Reference(
+ id=query_revision.variant_id,
+ ),
+ )
+
+ assert (
+ query_variant is not None
+ ), f"Query variant with id {query_revision.variant_id} not found!"
+
+ query_variant_ref = Reference(
+ id=query_variant.id,
+ slug=query_variant.slug,
+ )
+
+ query_references["variant"] = query_variant_ref
+
+ query_input_steps_keys.append(query_revision.slug)
+ # ----------------------------------------------------------------------
+
+ # fetch application ----------------------------------------------------
+ invocation_steps_keys = list()
+
+ application_references = dict()
+
+ if revision_id:
+ revision = await fetch_app_variant_revision_by_id(revision_id)
+
+ assert (
+ revision is not None
+ ), f"App revision with id {revision_id} not found!"
+
+ application_references["revision"] = Reference(
+ id=UUID(str(revision.id)),
+ )
+
+ variant = await fetch_app_variant_by_id(str(revision.variant_id))
+
+ assert (
+ variant is not None
+ ), f"App variant with id {revision.variant_id} not found!"
+
+ application_references["variant"] = Reference(
+ id=UUID(str(variant.id)),
+ )
+
+ app = await fetch_app_by_id(str(variant.app_id))
+
+ assert app is not None, f"App with id {variant.app_id} not found!"
+
+ application_references["artifact"] = Reference(
+ id=UUID(str(app.id)),
+ )
+
+ deployment = await get_deployment_by_id(str(revision.base.deployment_id))
+
+ assert (
+ deployment is not None
+ ), f"Deployment with id {revision.base.deployment_id} not found!"
+
+ uri = parse_url(url=deployment.uri)
+
+ assert uri is not None, f"Invalid URI for deployment {deployment.id}!"
+
+ revision_parameters = revision.config_parameters
+
+ assert (
+ revision_parameters is not None
+ ), f"Revision parameters for variant {variant.id} not found!"
+
+ invocation_steps_keys.append(
+ get_slug_from_name_and_id(app.app_name, revision.id)
+ )
+ # ----------------------------------------------------------------------
+
+ # fetch evaluators -----------------------------------------------------
+ annotation_steps_keys = []
+
+ if autoeval_ids:
+ autoeval_configs = []
+
+ for autoeval_id in autoeval_ids:
+ autoeval_config = await fetch_evaluator_config(autoeval_id)
+
+ autoeval_configs.append(autoeval_config)
+
+ for autoeval_config in autoeval_configs:
+ annotation_steps_keys.append(
+ get_slug_from_name_and_id(autoeval_config.name, autoeval_config.id)
+ )
+ # ----------------------------------------------------------------------
+
+ # just-in-time transfer of evaluators ----------------------------------
+ annotation_metrics_keys = {key: {} for key in annotation_steps_keys}
+ evaluator_references = dict()
+
+ for jdx, autoeval_id in enumerate(autoeval_ids):
+ annotation_step_key = annotation_steps_keys[jdx]
+
+ evaluator_response = (
+ await simple_evaluators_router.transfer_simple_evaluator(
+ request=request,
+ evaluator_id=UUID(autoeval_id),
+ )
+ )
+
+ evaluator = evaluator_response.evaluator
+
+ assert evaluator is not None, f"Evaluator with id {autoeval_id} not found!"
+
+ evaluator_references[annotation_step_key] = {}
+
+ evaluator_references[annotation_step_key]["artifact"] = Reference(
+ id=evaluator.id,
+ slug=evaluator.slug,
+ )
+
+ metrics_keys = get_metrics_keys_from_schema(
+ schema=(evaluator.data.schemas.get("outputs")),
+ )
+
+ annotation_metrics_keys[annotation_step_key] = [
+ {
+ "path": metric_key.get("path", "").replace("outputs.", "", 1),
+ "type": metric_key.get("type", ""),
+ }
+ for metric_key in metrics_keys
+ ]
+ # ----------------------------------------------------------------------
+
+ # fetch evaluator workflows --------------------------------------------
+ evaluators = dict()
+
+ for annotation_step_key, references in evaluator_references.items():
+ evaluators[annotation_step_key] = {}
+
+ workflow_ref = references["artifact"]
+
+ workflow = await workflows_service.fetch_workflow(
+ project_id=project_id,
+ #
+ workflow_ref=workflow_ref,
+ )
+
+ evaluators[annotation_step_key]["workflow"] = workflow
+
+ workflow_revision = await workflows_service.fetch_workflow_revision(
+ project_id=project_id,
+ #
+ workflow_ref=workflow_ref,
+ )
+
+ assert (
+ workflow_revision is not None
+ ), f"Workflow revision with id {workflow_ref.id} not found!"
+
+ workflow_revision_ref = Reference(
+ id=workflow_revision.id,
+ slug=workflow_revision.slug,
+ )
+
+ evaluator_references[annotation_step_key][
+ "revision"
+ ] = workflow_revision_ref
+
+ evaluators[annotation_step_key]["revision"] = workflow_revision
+
+ workflow_variant = await workflows_service.fetch_workflow_variant(
+ project_id=project_id,
+ workflow_variant_ref=Reference(
+ id=workflow_revision.variant_id,
+ ),
+ )
+
+ assert (
+ workflow_variant is not None
+ ), f"Workflow variant with id {workflow_revision.variant_id} not found!"
+
+ workflow_variant_ref = Reference(
+ id=workflow_variant.id,
+ slug=workflow_variant.slug,
+ )
+
+ evaluator_references[annotation_step_key]["variant"] = workflow_variant_ref
+
+ evaluators[annotation_step_key]["variant"] = workflow_variant
+
+ # ----------------------------------------------------------------------
+
+ # initialize steps/mappings in run -------------------------------------
+ testset_input_step = (
+ EvaluationRunDataStep(
+ key=testset_input_steps_keys[0],
+ type="input",
+ origin="auto",
+ references={
+ "testset": testset_references["artifact"],
+ # "testset_variant":
+ # "testset_revision":
+ },
+ )
+ if testset and testset.id
+ else None
+ )
+
+ query_input_step = (
+ EvaluationRunDataStep(
+ key=query_input_steps_keys[0],
+ type="input",
+ origin="auto",
+ references={
+ "query": query_references["artifact"],
+ "query_variant": query_references["variant"],
+ "query_revision": query_references["revision"],
+ },
+ )
+ if query_id
+ else None
+ )
+
+ invocation_step = (
+ EvaluationRunDataStep(
+ key=invocation_steps_keys[0],
+ type="invocation",
+ origin="auto",
+ references={
+ "application": application_references["artifact"],
+ "application_variant": application_references["variant"],
+ "application_revision": application_references["revision"],
+ },
+ inputs=[
+ EvaluationRunDataStepInput(
+ key=testset_input_steps_keys[0],
+ ),
+ ],
+ )
+ if revision_id
+ else None
+ )
+
+ annotation_steps = [
+ EvaluationRunDataStep(
+ key=step_key,
+ type="annotation",
+ origin="auto",
+ references={
+ "evaluator": evaluator_references[step_key]["artifact"],
+ "evaluator_variant": evaluator_references[step_key]["variant"],
+ "evaluator_revision": evaluator_references[step_key]["revision"],
+ },
+ inputs=(
+ [
+ EvaluationRunDataStepInput(
+ key=testset_input_steps_keys[0],
+ ),
+ EvaluationRunDataStepInput(
+ key=invocation_steps_keys[0],
+ ),
+ ]
+ if testset_id and revision_id
+ else [
+ EvaluationRunDataStepInput(
+ key=query_input_steps_keys[0],
+ ),
+ ]
+ ),
+ )
+ for step_key in annotation_steps_keys
+ ]
+
+ steps: List[EvaluationRunDataStep] = list()
+
+ if testset_id and testset_input_step:
+ steps.append(testset_input_step)
+ if query_id and query_input_step:
+ steps.append(query_input_step)
+ if revision_id and invocation_step:
+ steps.append(invocation_step)
+
+ steps.extend(annotation_steps)
+
+ testset_input_mappings = (
+ [
+ EvaluationRunDataMapping(
+ column=EvaluationRunDataMappingColumn(
+ kind="testset",
+ name=key,
+ ),
+ step=EvaluationRunDataMappingStep(
+ key=testset_input_steps_keys[0],
+ path=f"data.{key}",
+ ),
+ )
+ for key in testcases[0].data.keys()
+ ]
+ if testset_id
+ else []
+ )
+
+ query_input_mappings = (
+ [
+ EvaluationRunDataMapping(
+ column=EvaluationRunDataMappingColumn(
+ kind="query",
+ name="data",
+ ),
+ step=EvaluationRunDataMappingStep(
+ key=query_input_steps_keys[0],
+ path="attributes.ag.data",
+ ),
+ )
+ ]
+ if query_id
+ else []
+ )
+
+ invocation_mappings = (
+ [
+ EvaluationRunDataMapping(
+ column=EvaluationRunDataMappingColumn(
+ kind="invocation",
+ name="outputs",
+ ),
+ step=EvaluationRunDataMappingStep(
+ key=step_key,
+ path="attributes.ag.data.outputs",
+ ),
+ )
+ for step_key in invocation_steps_keys
+ ]
+ if invocation_steps_keys
+ else []
+ )
+
+ annotation_mappings = [
+ EvaluationRunDataMapping(
+ column=EvaluationRunDataMappingColumn(
+ kind="annotation",
+ name=metric_key["path"],
+ ),
+ step=EvaluationRunDataMappingStep(
+ key=step_key,
+ path=f"attributes.ag.data.outputs{'.' + metric_key['path'] if metric_key['path'] else ''}",
+ ),
+ )
+ for step_key in annotation_steps_keys
+ for metric_key in annotation_metrics_keys[step_key]
+ ]
+
+ mappings: List[EvaluationRunDataMapping] = (
+ testset_input_mappings
+ + query_input_mappings
+ + invocation_mappings
+ + annotation_mappings
+ )
+
+ run_edit = EvaluationRunEdit(
+ id=run.id,
+ #
+ name=run.name,
+ description=run.description,
+ #
+ flags=run.flags,
+ tags=run.tags,
+ meta=run.meta,
+ #
+ status=EvaluationStatus.RUNNING,
+ #
+ data=EvaluationRunData(
+ steps=steps,
+ mappings=mappings,
+ ),
+ )
+
+ run = await evaluations_service.edit_run(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run=run_edit,
+ )
+
+ assert run, f"Failed to edit evaluation run {run_edit.id}!"
+ # ----------------------------------------------------------------------
+
+ log.info("[DONE] ", run_id=run.id)
+
+ except: # pylint: disable=bare-except
+ if run and run.id:
+ log.error("[FAIL] ", run_id=run.id, exc_info=True)
+
+ await evaluations_service.delete_run(
+ project_id=project_id,
+ #
+ run_id=run.id,
+ )
+ else:
+ log.error("[FAIL]", exc_info=True)
+
+ run = None
+
+ return run
+
+
+@shared_task(
+ name="src.tasks.evaluations.legacy.annotate",
+ queue="src.tasks.evaluations.legacy.annotate",
+ bind=True,
+)
+def annotate(
+ self,
+ *,
+ project_id: UUID,
+ user_id: UUID,
+ #
+ run_id: UUID,
+ #
+ testset_id: str,
+ revision_id: str,
+ autoeval_ids: Optional[List[str]],
+ #
+ run_config: Dict[str, int],
+):
+ """
+ Annotates an application revision applied to a testset using auto evaluator(s).
+
+ Args:
+ self: The task instance.
+ project_id (str): The ID of the project.
+ user_id (str): The ID of the user.
+ run_id (str): The ID of the evaluation run.
+ testset_id (str): The ID of the testset.
+ revision_id (str): The ID of the application revision.
+ autoeval_ids (List[str]): The IDs of the evaluators configurations.
+ run_config (Dict[str, int]): Configuration for evaluation run.
+
+ Returns:
+ None
+ """
+ request = Request(
+ scope={
+ "type": "http",
+ "http_version": "1.1",
+ "scheme": "http",
+ }
+ )
+ request.state.project_id = str(project_id)
+ request.state.user_id = str(user_id)
+
+ loop = get_event_loop()
+
+ run = None
+
+ try:
+ # ----------------------------------------------------------------------
+ log.info("[SCOPE] ", run_id=run_id, project_id=project_id, user_id=user_id)
+ log.info("[TESTSET] ", run_id=run_id, ids=[testset_id])
+ log.info("[INVOCATON] ", run_id=run_id, ids=[revision_id])
+ log.info("[ANNOTATION]", run_id=run_id, ids=autoeval_ids)
+ # ----------------------------------------------------------------------
+
+ # fetch project --------------------------------------------------------
+ project = loop.run_until_complete(
+ get_project_by_id(
+ project_id=str(project_id),
+ ),
+ )
+ # ----------------------------------------------------------------------
+
+ # fetch secrets --------------------------------------------------------
+ secrets = loop.run_until_complete(
+ get_llm_providers_secrets(
+ project_id=str(project_id),
+ ),
+ )
+ # ----------------------------------------------------------------------
+
+ # prepare credentials --------------------------------------------------
+ secret_token = loop.run_until_complete(
+ sign_secret_token(
+ user_id=str(user_id),
+ project_id=str(project_id),
+ workspace_id=str(project.workspace_id),
+ organization_id=str(project.organization_id),
+ )
+ )
+
+ credentials = f"Secret {secret_token}"
+ # ----------------------------------------------------------------------
+
+ # fetch run ------------------------------------------------------------
+ run = loop.run_until_complete(
+ evaluations_service.fetch_run(
+ project_id=project_id,
+ #
+ run_id=run_id,
+ )
+ )
+
+ assert run, f"Evaluation run with id {run_id} not found!"
+
+ assert run.data, f"Evaluation run with id {run_id} has no data!"
+
+ assert run.data.steps, f"Evaluation run with id {run_id} has no steps!"
+
+ steps = run.data.steps
+
+ invocation_steps = [step for step in steps if step.type == "invocation"]
+ annotation_steps = [step for step in steps if step.type == "annotation"]
+
+ invocation_steps_keys = [step.key for step in invocation_steps]
+ annotation_steps_keys = [step.key for step in annotation_steps]
+
+ nof_annotations = len(annotation_steps)
+ # ----------------------------------------------------------------------
+
+ # fetch testset --------------------------------------------------------
+ testset_response = loop.run_until_complete(
+ simple_testsets_router.fetch_simple_testset(
+ request=request,
+ testset_id=testset_id,
+ )
+ )
+
+ assert testset_response.count != 0, f"Testset with id {testset_id} not found!"
+
+ testset = testset_response.testset
+
+ testcases = testset.data.testcases
+ testcases_data = [
+ {**testcase.data, "id": str(testcase.id)} for testcase in testcases
+ ] # INEFFICIENT: might want to have testcase_id in testset data (caution with hashing)
+ nof_testcases = len(testcases)
+
+ testset_step_key = get_slug_from_name_and_id(testset.name, testset.id)
+ # ----------------------------------------------------------------------
+
+ # fetch application ----------------------------------------------------
+ revision = loop.run_until_complete(
+ fetch_app_variant_revision_by_id(revision_id),
+ )
+
+ assert revision is not None, f"App revision with id {revision_id} not found!"
+
+ variant = loop.run_until_complete(
+ fetch_app_variant_by_id(str(revision.variant_id)),
+ )
+
+ assert (
+ variant is not None
+ ), f"App variant with id {revision.variant_id} not found!"
+
+ app = loop.run_until_complete(
+ fetch_app_by_id(str(variant.app_id)),
+ )
+
+ assert app is not None, f"App with id {variant.app_id} not found!"
+
+ deployment = loop.run_until_complete(
+ get_deployment_by_id(str(revision.base.deployment_id)),
+ )
+
+ assert (
+ deployment is not None
+ ), f"Deployment with id {revision.base.deployment_id} not found!"
+
+ uri = parse_url(url=deployment.uri)
+
+ assert uri is not None, f"Invalid URI for deployment {deployment.id}!"
+
+ revision_parameters = revision.config_parameters
+
+ assert (
+ revision_parameters is not None
+ ), f"Revision parameters for variant {variant.id} not found!"
+ # ----------------------------------------------------------------------
+
+ # fetch evaluators -----------------------------------------------------
+ evaluator_references = {step.key: step.references for step in annotation_steps}
+
+ evaluators = {
+ evaluator_key: loop.run_until_complete(
+ workflows_service.fetch_workflow_revision(
+ project_id=project_id,
+ #
+ workflow_revision_ref=evaluator_refs.get("evaluator_revision"),
+ )
+ )
+ for evaluator_key, evaluator_refs in evaluator_references.items()
+ }
+ # ----------------------------------------------------------------------
+
+ # prepare headers ------------------------------------------------------
+ headers = {}
+ if credentials:
+ headers = {"Authorization": credentials}
+ headers["ngrok-skip-browser-warning"] = "1"
+
+ openapi_parameters = None
+ max_recursive_depth = 5
+ runtime_prefix = uri
+ route_path = ""
+
+ while max_recursive_depth > 0 and not openapi_parameters:
+ try:
+ openapi_parameters = loop.run_until_complete(
+ llm_apps_service.get_parameters_from_openapi(
+ runtime_prefix + "/openapi.json",
+ route_path,
+ headers,
+ ),
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ openapi_parameters = None
+
+ if not openapi_parameters:
+ max_recursive_depth -= 1
+ if not runtime_prefix.endswith("/"):
+ route_path = "/" + runtime_prefix.split("/")[-1] + route_path
+ runtime_prefix = "/".join(runtime_prefix.split("/")[:-1])
+ else:
+ route_path = ""
+ runtime_prefix = runtime_prefix[:-1]
+
+ openapi_parameters = loop.run_until_complete(
+ llm_apps_service.get_parameters_from_openapi(
+ runtime_prefix + "/openapi.json",
+ route_path,
+ headers,
+ ),
+ )
+ # ----------------------------------------------------------------------
+
+ # create scenarios -----------------------------------------------------
+ scenarios_create = [
+ EvaluationScenarioCreate(
+ run_id=run_id,
+ #
+ status=EvaluationStatus.RUNNING,
+ )
+ for _ in range(nof_testcases)
+ ]
+
+ scenarios = loop.run_until_complete(
+ evaluations_service.create_scenarios(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ scenarios=scenarios_create,
+ )
+ )
+
+ assert (
+ len(scenarios) == nof_testcases
+ ), f"Failed to create evaluation scenarios for run {run_id}!"
+ # ----------------------------------------------------------------------
+
+ # create input steps ---------------------------------------------------
+ steps_create = [
+ EvaluationResultCreate(
+ run_id=run_id,
+ scenario_id=scenario.id,
+ step_key=testset_step_key,
+ #
+ status=EvaluationStatus.SUCCESS,
+ #
+ testcase_id=testcases[idx].id,
+ )
+ for idx, scenario in enumerate(scenarios)
+ ]
+
+ steps = loop.run_until_complete(
+ evaluations_service.create_results(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ results=steps_create,
+ )
+ )
+
+ assert (
+ len(steps) == nof_testcases
+ ), f"Failed to create evaluation steps for run {run_id}!"
+ # ----------------------------------------------------------------------
+
+ # flatten testcases ----------------------------------------------------
+ _testcases = [testcase.model_dump(mode="json") for testcase in testcases]
+
+ log.info(
+ "[BATCH] ",
+ run_id=run_id,
+ ids=[testset_id],
+ count=len(_testcases),
+ size=len(dumps(_testcases).encode("utf-8")),
+ )
+ # ----------------------------------------------------------------------
+
+ # invoke application ---------------------------------------------------
+ invocations: List[InvokationResult] = loop.run_until_complete(
+ llm_apps_service.batch_invoke(
+ project_id=str(project_id),
+ user_id=str(user_id),
+ testset_data=testcases_data, # type: ignore
+ parameters=revision_parameters, # type: ignore
+ uri=uri,
+ rate_limit_config=run_config,
+ application_id=str(app.id), # DO NOT REMOVE
+ references={
+ "testset": {"id": testset_id},
+ "application": {"id": str(app.id)},
+ "application_variant": {"id": str(variant.id)},
+ "application_revision": {"id": str(revision.id)},
+ },
+ )
+ )
+ # ----------------------------------------------------------------------
+
+ # create invocation steps ----------------------------------------------
+ steps_create = [
+ EvaluationResultCreate(
+ run_id=run_id,
+ scenario_id=scenario.id,
+ step_key=invocation_steps_keys[0],
+ #
+ status=(
+ EvaluationStatus.SUCCESS
+ if not invocations[idx].result.error
+ else EvaluationStatus.FAILURE
+ ),
+ #
+ trace_id=invocations[idx].trace_id,
+ error=(
+ invocations[idx].result.error.model_dump(mode="json")
+ if invocations[idx].result.error
+ else None
+ ),
+ )
+ for idx, scenario in enumerate(scenarios)
+ ]
+
+ steps = loop.run_until_complete(
+ evaluations_service.create_results(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ results=steps_create,
+ )
+ )
+
+ assert (
+ len(steps) == nof_testcases
+ ), f"Failed to create evaluation steps for run {run_id}!"
+ # ----------------------------------------------------------------------
+
+ run_has_errors = 0
+ run_status = EvaluationStatus.SUCCESS
+
+ # run evaluators -------------------------------------------------------
+ for idx in range(nof_testcases):
+ scenario = scenarios[idx]
+ testcase = testcases[idx]
+ invocation = invocations[idx]
+
+ scenario_has_errors = 0
+ scenario_status = EvaluationStatus.SUCCESS
+
+ # skip the iteration if error in the invocation --------------------
+ if invocation.result.error:
+ log.error(
+ f"There is an error in invocation {invocation.trace_id} so we skip its evaluation"
+ )
+
+ scenario_has_errors += 1
+ run_has_errors += 1
+ scenario_status = EvaluationStatus.ERRORS
+ run_status = EvaluationStatus.ERRORS
+
+ error = invocation.result.error.model_dump(mode="json") is not None
+ # ------------------------------------------------------------------
+
+ # proceed with the evaluation otherwise ----------------------------
+ else:
+ # run the evaluators if no error in the invocation -------------
+ for jdx in range(nof_annotations):
+ annotation_step_key = annotation_steps_keys[jdx]
+
+ step_has_errors = 0
+ step_status = EvaluationStatus.SUCCESS
+
+ references = {
+ **evaluator_references[annotation_step_key],
+ "testset": {"id": testset_id},
+ "testcase": {"id": str(testcase.id)},
+ }
+ links = {
+ invocation_steps_keys[0]: {
+ "trace_id": invocation.trace_id,
+ "span_id": invocation.span_id,
+ }
+ }
+
+ # invoke annotation workflow -------------------------------
+ workflow_revision = evaluators[annotation_step_key]
+
+ workflows_service_request = WorkflowServiceRequest(
+ version="2025.07.14",
+ flags={
+ "is_annotation": True,
+ "inline": True,
+ },
+ tags=None,
+ meta=None,
+ data=WorkflowServiceData(
+ inputs=testcase.data,
+ # trace=
+ trace_parameters=revision_parameters,
+ trace_outputs=invocation.result.value["data"],
+ tree=(
+ Tree(
+ version=invocation.result.value.get("version"),
+ nodes=invocation.result.value["tree"].get("nodes"),
+ )
+ if "tree" in invocation.result.value
+ else None
+ ),
+ ),
+ references=references,
+ links=links,
+ credentials=credentials,
+ secrets=secrets,
+ )
+
+ workflows_service_response = loop.run_until_complete(
+ workflows_service.invoke_workflow(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ request=workflows_service_request,
+ revision=workflow_revision,
+ )
+ )
+ # ----------------------------------------------------------
+
+ # run evaluator --------------------------------------------
+ trace_id = None
+ error = None
+
+ has_error = workflows_service_response.status.code != 200
+
+ # if error in evaluator, no annotation, only step ----------
+ if has_error:
+ log.warn(
+ f"There is an error in annotation {annotation_step_key} for invocation {invocation.trace_id}."
+ )
+
+ step_has_errors += 1
+ scenario_has_errors += 1
+ run_has_errors += 1
+ step_status = EvaluationStatus.FAILURE
+ scenario_status = EvaluationStatus.ERRORS
+ run_status = EvaluationStatus.ERRORS
+
+ error = workflows_service_response.status.model_dump(
+ mode="json"
+ )
+
+ # ----------------------------------------------------------
+
+ # else, first annotation, then step ------------------------
+ else:
+ outputs = workflows_service_response.data.outputs or {}
+
+ annotation_create_request = AnnotationCreateRequest(
+ annotation=AnnotationCreate(
+ origin=AnnotationOrigin.AUTO,
+ kind=AnnotationKind.EVAL,
+ channel=AnnotationChannel.API, # hardcoded
+ #
+ data={"outputs": outputs},
+ #
+ references=references,
+ links=links,
+ )
+ )
+
+ annotation_response = loop.run_until_complete(
+ annotations_router.create_annotation(
+ request=request,
+ annotation_create_request=annotation_create_request,
+ )
+ )
+
+ assert (
+ annotation_response.count != 0
+ ), f"Failed to create annotation for invocation {invocation.trace_id} and evaluator {references.get('evaluator').get('id')}"
+
+ trace_id = annotation_response.annotation.trace_id
+ # ----------------------------------------------------------
+
+ steps_create = [
+ EvaluationResultCreate(
+ run_id=run_id,
+ scenario_id=scenario.id,
+ step_key=annotation_step_key,
+ #
+ status=step_status,
+ #
+ trace_id=trace_id,
+ error=error,
+ )
+ ]
+
+ steps = loop.run_until_complete(
+ evaluations_service.create_results(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ results=steps_create,
+ )
+ )
+
+ assert (
+ len(steps) == 1
+ ), f"Failed to create evaluation step for scenario with id {scenario.id}!"
+ # ------------------------------------------------------------------
+
+ scenario_edit = EvaluationScenarioEdit(
+ id=scenario.id,
+ tags=scenario.tags,
+ meta=scenario.meta,
+ status=scenario_status,
+ )
+
+ scenario = loop.run_until_complete(
+ evaluations_service.edit_scenario(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ scenario=scenario_edit,
+ )
+ )
+
+ assert (
+ scenario
+ ), f"Failed to edit evaluation scenario with id {scenario.id}!"
+
+ if scenario_status != EvaluationStatus.FAILURE:
+ try:
+ metrics = loop.run_until_complete(
+ evaluations_service.refresh_metrics(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run_id=run_id,
+ scenario_id=scenario.id,
+ )
+ )
+
+ if not metrics:
+ log.warning(
+ f"Refreshing metrics failed for {run_id} | {scenario.id}"
+ )
+
+ except Exception as e:
+ log.warning(
+ f"Refreshing metrics failed for {run_id} | {scenario.id}",
+ exc_info=True,
+ )
+ # ----------------------------------------------------------------------
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error(
+ f"An error occurred during evaluation: {e}",
+ exc_info=True,
+ )
+
+ self.update_state(state=states.FAILURE)
+
+ run_status = EvaluationStatus.FAILURE
+
+ if not run:
+ log.info("[FAIL] ", run_id=run_id, project_id=project_id, user_id=user_id)
+
+ if run_status != EvaluationStatus.FAILURE:
+ try:
+ metrics = loop.run_until_complete(
+ evaluations_service.refresh_metrics(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run_id=run_id,
+ )
+ )
+
+ if not metrics:
+ log.warning(f"Refreshing metrics failed for {run_id}")
+
+ self.update_state(state=states.FAILURE)
+
+ run_status = EvaluationStatus.FAILURE
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.warning(f"Refreshing metrics failed for {run_id}", exc_info=True)
+
+ self.update_state(state=states.FAILURE)
+
+ run_status = EvaluationStatus.FAILURE
+
+ # edit evaluation run status -----------------------------------------------
+ run_edit = EvaluationRunEdit(
+ id=run_id,
+ #
+ name=run.name,
+ description=run.description,
+ #
+ tags=run.tags,
+ meta=run.meta,
+ #
+ status=run_status,
+ #
+ data=run.data,
+ )
+
+ loop.run_until_complete(
+ evaluations_service.edit_run(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run=run_edit,
+ )
+ )
+
+ # edit meters to avoid conting failed evaluations --------------------------
+ if run_status == EvaluationStatus.FAILURE:
+ loop.run_until_complete(
+ check_entitlements(
+ organization_id=project.organization_id,
+ key=Counter.EVALUATIONS,
+ delta=-1,
+ )
+ )
+
+ log.info("[DONE] ", run_id=run_id, project_id=project_id, user_id=user_id)
+
+ return
diff --git a/api/ee/src/tasks/evaluations/live.py b/api/ee/src/tasks/evaluations/live.py
new file mode 100644
index 0000000000..0095206d42
--- /dev/null
+++ b/api/ee/src/tasks/evaluations/live.py
@@ -0,0 +1,771 @@
+from typing import List, Dict, Any
+from uuid import UUID
+import asyncio
+from datetime import datetime
+
+from celery import shared_task
+from fastapi import Request
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.services.auth_helper import sign_secret_token
+from oss.src.services.db_manager import get_project_by_id
+from oss.src.core.secrets.utils import get_llm_providers_secrets
+
+from oss.src.dbs.postgres.queries.dbes import (
+ QueryArtifactDBE,
+ QueryVariantDBE,
+ QueryRevisionDBE,
+)
+from oss.src.dbs.postgres.testcases.dbes import (
+ TestcaseBlobDBE,
+)
+from oss.src.dbs.postgres.testsets.dbes import (
+ TestsetArtifactDBE,
+ TestsetVariantDBE,
+ TestsetRevisionDBE,
+)
+from oss.src.dbs.postgres.workflows.dbes import (
+ WorkflowArtifactDBE,
+ WorkflowVariantDBE,
+ WorkflowRevisionDBE,
+)
+
+from oss.src.dbs.postgres.tracing.dao import TracingDAO
+from oss.src.dbs.postgres.blobs.dao import BlobsDAO
+from oss.src.dbs.postgres.git.dao import GitDAO
+from oss.src.dbs.postgres.evaluations.dao import EvaluationsDAO
+
+from oss.src.core.tracing.service import TracingService
+from oss.src.core.queries.service import QueriesService
+from oss.src.core.testcases.service import TestcasesService
+from oss.src.core.testsets.service import TestsetsService
+from oss.src.core.testsets.service import SimpleTestsetsService
+from oss.src.core.workflows.service import WorkflowsService
+from oss.src.core.evaluators.service import EvaluatorsService
+from oss.src.core.evaluators.service import SimpleEvaluatorsService
+from oss.src.core.evaluations.service import EvaluationsService
+from oss.src.core.annotations.service import AnnotationsService
+
+# from oss.src.apis.fastapi.tracing.utils import make_hash_id
+from oss.src.apis.fastapi.tracing.router import TracingRouter
+from oss.src.apis.fastapi.annotations.router import AnnotationsRouter
+
+from oss.src.core.annotations.types import (
+ AnnotationOrigin,
+ AnnotationKind,
+ AnnotationChannel,
+)
+from oss.src.apis.fastapi.annotations.models import (
+ AnnotationCreate,
+ AnnotationCreateRequest,
+)
+
+from oss.src.core.evaluations.types import (
+ EvaluationStatus,
+ EvaluationScenarioCreate,
+ EvaluationScenarioEdit,
+ EvaluationResultCreate,
+)
+from oss.src.core.shared.dtos import (
+ Reference,
+ Link,
+)
+from oss.src.core.tracing.dtos import (
+ Filtering,
+ Windowing,
+ Formatting,
+ Format,
+ Focus,
+ TracingQuery,
+ OTelSpansTree as Trace,
+ LogicalOperator,
+ SimpleTraceReferences,
+)
+from oss.src.core.workflows.dtos import (
+ WorkflowServiceData,
+ WorkflowServiceRequest,
+)
+from oss.src.core.queries.dtos import (
+ QueryRevision,
+)
+from oss.src.core.evaluators.dtos import (
+ EvaluatorRevision,
+)
+
+log = get_module_logger(__name__)
+
+
+# DBS --------------------------------------------------------------------------
+
+tracing_dao = TracingDAO()
+
+testcases_dao = BlobsDAO(
+ BlobDBE=TestcaseBlobDBE,
+)
+
+queries_dao = GitDAO(
+ ArtifactDBE=QueryArtifactDBE,
+ VariantDBE=QueryVariantDBE,
+ RevisionDBE=QueryRevisionDBE,
+)
+
+testsets_dao = GitDAO(
+ ArtifactDBE=TestsetArtifactDBE,
+ VariantDBE=TestsetVariantDBE,
+ RevisionDBE=TestsetRevisionDBE,
+)
+
+workflows_dao = GitDAO(
+ ArtifactDBE=WorkflowArtifactDBE,
+ VariantDBE=WorkflowVariantDBE,
+ RevisionDBE=WorkflowRevisionDBE,
+)
+
+evaluations_dao = EvaluationsDAO()
+
+# CORE -------------------------------------------------------------------------
+
+tracing_service = TracingService(
+ tracing_dao=tracing_dao,
+)
+
+queries_service = QueriesService(
+ queries_dao=queries_dao,
+)
+
+testcases_service = TestcasesService(
+ testcases_dao=testcases_dao,
+)
+
+testsets_service = TestsetsService(
+ testsets_dao=testsets_dao,
+ testcases_service=testcases_service,
+)
+
+simple_testsets_service = SimpleTestsetsService(
+ testsets_service=testsets_service,
+)
+
+workflows_service = WorkflowsService(
+ workflows_dao=workflows_dao,
+)
+
+evaluators_service = EvaluatorsService(
+ workflows_service=workflows_service,
+)
+
+simple_evaluators_service = SimpleEvaluatorsService(
+ evaluators_service=evaluators_service,
+)
+
+evaluations_service = EvaluationsService(
+ evaluations_dao=evaluations_dao,
+ tracing_service=tracing_service,
+ queries_service=queries_service,
+ testsets_service=testsets_service,
+ evaluators_service=evaluators_service,
+)
+
+# APIS -------------------------------------------------------------------------
+
+tracing_router = TracingRouter(
+ tracing_service=tracing_service,
+)
+
+annotations_service = AnnotationsService(
+ tracing_router=tracing_router,
+ evaluators_service=evaluators_service,
+ simple_evaluators_service=simple_evaluators_service,
+)
+
+annotations_router = AnnotationsRouter(
+ annotations_service=annotations_service,
+) # TODO: REMOVE/REPLACE ONCE ANNOTATE IS MOVED TO 'core'
+
+# ------------------------------------------------------------------------------
+
+
+@shared_task(
+ name="src.tasks.evaluations.live.evaluate",
+ queue="src.tasks.evaluations.live.evaluate",
+ bind=True,
+)
+def evaluate(
+ self,
+ *,
+ project_id: UUID,
+ user_id: UUID,
+ #
+ run_id: UUID,
+ #
+ newest: datetime,
+ oldest: datetime,
+):
+ request = Request(scope={"type": "http", "http_version": "1.1", "scheme": "http"})
+
+ request.state.project_id = str(project_id)
+ request.state.user_id = str(user_id)
+
+ loop = asyncio.get_event_loop()
+
+ # count in minutes
+ timestamp = oldest
+ interval = int((newest - oldest).total_seconds() / 60)
+
+ try:
+ # ----------------------------------------------------------------------
+ log.info(
+ "[SCOPE] ",
+ run_id=run_id,
+ project_id=project_id,
+ user_id=user_id,
+ )
+
+ log.info(
+ "[RANGE] ",
+ run_id=run_id,
+ timestamp=timestamp,
+ interval=interval,
+ )
+ # ----------------------------------------------------------------------
+
+ # fetch project --------------------------------------------------------
+ project = loop.run_until_complete(
+ get_project_by_id(project_id=str(project_id)),
+ )
+ # ----------------------------------------------------------------------
+
+ # fetch provider keys from secrets -------------------------------------
+ secrets = loop.run_until_complete(
+ get_llm_providers_secrets(str(project_id)),
+ )
+ # ----------------------------------------------------------------------
+
+ # prepare credentials --------------------------------------------------
+ secret_token = loop.run_until_complete(
+ sign_secret_token(
+ user_id=str(user_id),
+ project_id=str(project_id),
+ workspace_id=str(project.workspace_id),
+ organization_id=str(project.organization_id),
+ )
+ )
+
+ credentials = f"Secret {secret_token}"
+ # ----------------------------------------------------------------------
+
+ # fetch evaluation run -------------------------------------------------
+ run = loop.run_until_complete(
+ evaluations_service.fetch_run(
+ project_id=project_id,
+ run_id=run_id,
+ )
+ )
+
+ assert run, f"Evaluation run with id {run_id} not found!"
+
+ assert run.data, f"Evaluation run with id {run_id} has no data!"
+
+ assert run.data.steps, f"Evaluation run with id {run_id} has no steps!"
+
+ steps = run.data.steps
+
+ input_steps = {
+ step.key: step for step in steps if step.type == "input" # --------
+ }
+ invocation_steps = {
+ step.key: step for step in steps if step.type == "invocation"
+ }
+ annotation_steps = {
+ step.key: step for step in steps if step.type == "annotation"
+ }
+
+ input_steps_keys = list(input_steps.keys())
+ invocation_steps_keys = list(invocation_steps.keys())
+ annotation_steps_keys = list(annotation_steps.keys())
+
+ nof_inputs = len(input_steps_keys)
+ nof_invocations = len(invocation_steps_keys)
+ nof_annotations = len(annotation_steps_keys)
+ # ----------------------------------------------------------------------
+
+ # initialize query variables -------------------------------------------
+ query_revision_refs: Dict[str, Reference] = dict()
+ #
+ query_revisions: Dict[str, QueryRevision] = dict()
+ query_references: Dict[str, Dict[str, Reference]] = dict()
+ #
+ query_traces: Dict[str, Dict[str, Trace]] = dict()
+ # ----------------------------------------------------------------------
+
+ # initialize evaluator variables ---------------------------------------
+ evaluator_revision_refs: Dict[str, Reference] = dict()
+ #
+ evaluator_revisions: Dict[str, EvaluatorRevision] = dict()
+ evaluator_references: Dict[str, Dict[str, Reference]] = dict()
+ # ----------------------------------------------------------------------
+
+ # get query steps references -------------------------------------------
+ for input_step_key in input_steps_keys:
+ query_refs = input_steps[input_step_key].references
+ query_revision_ref = query_refs.get("query_revision")
+
+ if query_revision_ref:
+ query_revision_refs[input_step_key] = query_revision_ref
+
+ # ----------------------------------------------------------------------
+
+ # get evaluator steps references ---------------------------------------
+ for annotation_step_key in annotation_steps_keys:
+ evaluator_refs = annotation_steps[annotation_step_key].references
+ evaluator_revision_ref = evaluator_refs.get("evaluator_revision")
+
+ if evaluator_revision_ref:
+ evaluator_revision_refs[annotation_step_key] = evaluator_revision_ref
+ # ----------------------------------------------------------------------
+
+ # fetch query revisions ------------------------------------------------
+ for (
+ query_step_key,
+ query_revision_ref,
+ ) in query_revision_refs.items():
+ query_revision = loop.run_until_complete(
+ queries_service.fetch_query_revision(
+ project_id=project_id,
+ #
+ query_revision_ref=query_revision_ref,
+ )
+ )
+
+ if (
+ not query_revision
+ or not query_revision.id
+ or not query_revision.slug
+ or not query_revision.data
+ ):
+ log.warn(
+ f"Query revision with ref {query_revision_ref.model_dump(mode='json')} not found!"
+ )
+ continue
+
+ query_step = input_steps[query_step_key]
+
+ query_revisions[query_step_key] = query_revision
+ query_references[query_step_key] = query_step.references
+ # ----------------------------------------------------------------------
+
+ # fetch evaluator revisions --------------------------------------------
+ for (
+ evaluator_step_key,
+ evaluator_revision_ref,
+ ) in evaluator_revision_refs.items():
+ evaluator_revision = loop.run_until_complete(
+ evaluators_service.fetch_evaluator_revision(
+ project_id=project_id,
+ #
+ evaluator_revision_ref=evaluator_revision_ref,
+ )
+ )
+
+ if (
+ not evaluator_revision
+ or not evaluator_revision.id
+ or not evaluator_revision.slug
+ or not evaluator_revision.data
+ ):
+ log.warn(
+ f"Evaluator revision with ref {evaluator_revision_ref.model_dump(mode='json')} not found!"
+ )
+ continue
+
+ evaluator_step = annotation_steps[evaluator_step_key]
+
+ evaluator_revisions[evaluator_step_key] = evaluator_revision
+ evaluator_references[evaluator_step_key] = evaluator_step.references
+ # ----------------------------------------------------------------------
+
+ # run query revisions --------------------------------------------------
+ for query_step_key, query_revision in query_revisions.items():
+ formatting = Formatting(
+ focus=Focus.TRACE,
+ format=Format.AGENTA,
+ )
+ filtering = Filtering(
+ operator=LogicalOperator.AND,
+ conditions=list(),
+ )
+ windowing = Windowing(
+ oldest=oldest,
+ newest=newest,
+ next=None,
+ limit=None,
+ order="ascending",
+ interval=None,
+ rate=None,
+ )
+
+ if query_revision.data:
+ if query_revision.data.filtering:
+ filtering = query_revision.data.filtering
+
+ if query_revision.data.windowing:
+ windowing.rate = query_revision.data.windowing.rate
+
+ query = TracingQuery(
+ formatting=formatting,
+ filtering=filtering,
+ windowing=windowing,
+ )
+
+ tracing_response = loop.run_until_complete(
+ tracing_router.query_spans(
+ request=request,
+ #
+ query=query,
+ )
+ )
+
+ nof_traces = tracing_response.count
+
+ log.info(
+ "[TRACES] ",
+ run_id=run_id,
+ count=nof_traces,
+ )
+
+ query_traces[query_step_key] = tracing_response.traces or dict()
+ # ----------------------------------------------------------------------
+
+ # run online evaluation ------------------------------------------------
+ for query_step_key in query_traces.keys():
+ if not query_traces[query_step_key].keys():
+ continue
+
+ # create scenarios -------------------------------------------------
+
+ nof_traces = len(query_traces[query_step_key].keys())
+
+ scenarios_create = [
+ EvaluationScenarioCreate(
+ run_id=run_id,
+ timestamp=timestamp,
+ interval=interval,
+ #
+ status=EvaluationStatus.RUNNING,
+ )
+ for _ in range(nof_traces)
+ ]
+
+ scenarios = loop.run_until_complete(
+ evaluations_service.create_scenarios(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ scenarios=scenarios_create,
+ )
+ )
+
+ if len(scenarios) != nof_traces:
+ log.error(
+ "[LIVE] Could not create evaluation scenarios",
+ run_id=run_id,
+ )
+ continue
+ # ------------------------------------------------------------------
+
+ # create query steps -----------------------------------------------
+ query_trace_ids = list(query_traces[query_step_key].keys())
+ scenario_ids = [scenario.id for scenario in scenarios if scenario.id]
+
+ results_create = [
+ EvaluationResultCreate(
+ run_id=run_id,
+ scenario_id=scenario_id,
+ step_key=query_step_key,
+ repeat_idx=1,
+ timestamp=timestamp,
+ interval=interval,
+ #
+ status=EvaluationStatus.SUCCESS,
+ #
+ trace_id=query_trace_id,
+ )
+ for scenario_id, query_trace_id in zip(scenario_ids, query_trace_ids)
+ ]
+
+ results = loop.run_until_complete(
+ evaluations_service.create_results(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ results=results_create,
+ )
+ )
+
+ assert (
+ len(results) == nof_traces
+ ), f"Failed to create evaluation results for run {run_id}!"
+ # ------------------------------------------------------------------
+
+ scenario_has_errors: Dict[int, int] = dict()
+ scenario_status: Dict[int, EvaluationStatus] = dict()
+
+ # iterate over query traces ----------------------------------------
+ for idx, trace in enumerate(query_traces[query_step_key].values()):
+ scenario_has_errors[idx] = 0
+ scenario_status[idx] = EvaluationStatus.SUCCESS
+
+ scenario = scenarios[idx]
+ scenario_id = scenario_ids[idx]
+ query_trace_id = query_trace_ids[idx]
+
+ if not isinstance(trace.spans, dict):
+ log.warn(
+ f"Trace with id {query_trace_id} has no root spans",
+ run_id=run_id,
+ )
+ scenario_has_errors[idx] += 1
+ scenario_status[idx] = EvaluationStatus.ERRORS
+ continue
+
+ root_span = list(trace.spans.values())[0]
+
+ if isinstance(root_span, list):
+ log.warn(
+ f"More than one root span for trace with id {query_trace_id}",
+ run_id=run_id,
+ )
+ scenario_has_errors[idx] += 1
+ scenario_status[idx] = EvaluationStatus.ERRORS
+ continue
+
+ query_span_id = root_span.span_id
+
+ log.info(
+ "[TRACE] ",
+ run_id=run_id,
+ trace_id=query_trace_id,
+ )
+
+ # run evaluator revisions --------------------------------------
+ for (
+ evaluator_step_key,
+ evaluator_revision,
+ ) in evaluator_revisions.items():
+ step_has_errors = 0
+ step_status = EvaluationStatus.SUCCESS
+
+ references: dict = evaluator_references[evaluator_step_key]
+ links: dict = dict(
+ query_step_key=Link(
+ trace_id=query_trace_id,
+ span_id=query_span_id,
+ )
+ )
+
+ parameters: dict = (
+ evaluator_revision.data.parameters or {}
+ if evaluator_revision.data
+ else {}
+ )
+ inputs: dict = {}
+ outputs: Any = None
+
+ trace_attributes: dict = root_span.attributes or {}
+ trace_ag_attributes: dict = trace_attributes.get("ag", {})
+ trace_data: dict = trace_ag_attributes.get("data", {})
+ trace_parameters: dict = trace_data.get("parameters", {})
+ trace_inputs: dict = trace_data.get("inputs", {})
+ trace_outputs: Any = trace_data.get("outputs")
+
+ workflow_service_data = WorkflowServiceData(
+ #
+ parameters=parameters,
+ inputs=inputs,
+ #
+ trace_parameters=trace_parameters,
+ trace_inputs=trace_inputs,
+ trace_outputs=trace_outputs,
+ #
+ trace=trace,
+ )
+
+ workflow_service_request = WorkflowServiceRequest(
+ version="2025.07.14",
+ #
+ flags={
+ "is_annotation": True,
+ "inline": True,
+ },
+ tags=None,
+ meta=None,
+ #
+ data=workflow_service_data,
+ #
+ references=references,
+ links=links,
+ #
+ credentials=credentials,
+ secrets=secrets,
+ )
+
+ workflow_revision = evaluator_revision
+
+ workflows_service_response = loop.run_until_complete(
+ workflows_service.invoke_workflow(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ request=workflow_service_request,
+ revision=workflow_revision,
+ )
+ )
+
+ evaluator_trace_id = None
+ error = None
+
+ has_error = workflows_service_response.status.code != 200
+
+ # if error in evaluator, no annotation, only step ----------
+ if has_error:
+ log.warn(
+ f"There is an error in evaluator {evaluator_step_key} for query {query_trace_id}."
+ )
+
+ step_has_errors += 1
+ step_status = EvaluationStatus.FAILURE
+ scenario_has_errors[idx] += 1
+ scenario_status[idx] = EvaluationStatus.ERRORS
+
+ error = workflows_service_response.status.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ # ----------------------------------------------------------
+
+ # else, first annotation, then step ------------------------
+ else:
+ outputs = (
+ workflows_service_response.data.outputs
+ if workflows_service_response.data
+ else None
+ )
+
+ annotation_create_request = AnnotationCreateRequest(
+ annotation=AnnotationCreate(
+ origin=AnnotationOrigin.AUTO,
+ kind=AnnotationKind.EVAL,
+ channel=AnnotationChannel.API,
+ #
+ data={"outputs": outputs},
+ #
+ references=SimpleTraceReferences(**references),
+ links=links,
+ )
+ )
+
+ annotation_response = loop.run_until_complete(
+ annotations_router.create_annotation(
+ request=request,
+ annotation_create_request=annotation_create_request,
+ )
+ )
+
+ if (
+ not annotation_response.count
+ or not annotation_response.annotation
+ ):
+ log.warn(
+ f"Failed to create annotation for query {query_trace_id} and evaluator {evaluator_revision.id}"
+ )
+ step_has_errors += 1
+ step_status = EvaluationStatus.FAILURE
+ scenario_has_errors[idx] += 1
+ scenario_status[idx] = EvaluationStatus.ERRORS
+ continue
+
+ evaluator_trace_id = annotation_response.annotation.trace_id
+ # ----------------------------------------------------------
+
+ results_create = [
+ EvaluationResultCreate(
+ run_id=run_id,
+ scenario_id=scenario_id,
+ step_key=evaluator_step_key,
+ repeat_idx=1,
+ timestamp=timestamp,
+ interval=interval,
+ #
+ status=step_status,
+ #
+ trace_id=evaluator_trace_id,
+ error=error,
+ )
+ ]
+
+ results = loop.run_until_complete(
+ evaluations_service.create_results(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ results=results_create,
+ )
+ )
+
+ assert (
+ len(results) == 1
+ ), f"Failed to create evaluation result for scenario with id {scenario.id}!"
+ # --------------------------------------------------------------
+
+ scenario_edit = EvaluationScenarioEdit(
+ id=scenario.id,
+ tags=scenario.tags,
+ meta=scenario.meta,
+ status=scenario_status[idx],
+ )
+
+ scenario = loop.run_until_complete(
+ evaluations_service.edit_scenario(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ scenario=scenario_edit,
+ )
+ )
+
+ if not scenario or not scenario.id:
+ log.error(
+ f"Failed to update evaluation scenario with id {scenario_id}!",
+ run_id=run_id,
+ )
+
+ loop.run_until_complete(
+ evaluations_service.refresh_metrics(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run_id=run_id,
+ scenario_id=scenario_id,
+ )
+ )
+ # ------------------------------------------------------------------
+
+ loop.run_until_complete(
+ evaluations_service.refresh_metrics(
+ project_id=project_id,
+ user_id=user_id,
+ #
+ run_id=run_id,
+ timestamp=timestamp,
+ interval=interval,
+ )
+ )
+ except Exception as e: # pylint: disable=broad-exception-caught
+ log.error(e, exc_info=True)
+
+ log.info(
+ "[DONE] ",
+ run_id=run_id,
+ )
+
+ return
diff --git a/api/ee/src/utils/entitlements.py b/api/ee/src/utils/entitlements.py
new file mode 100644
index 0000000000..13360aad77
--- /dev/null
+++ b/api/ee/src/utils/entitlements.py
@@ -0,0 +1,169 @@
+from typing import Union, Optional, Callable
+from uuid import UUID
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.caching import get_cache, set_cache
+
+log = get_module_logger(__name__)
+
+from fastapi.responses import JSONResponse
+
+from ee.src.core.subscriptions.service import SubscriptionsService
+from ee.src.core.entitlements.types import (
+ Tracker,
+ Flag,
+ Counter,
+ Gauge,
+ Plan,
+ ENTITLEMENTS,
+)
+from ee.src.core.meters.service import MetersService
+from ee.src.core.meters.types import MeterDTO
+from ee.src.dbs.postgres.meters.dao import MetersDAO
+from ee.src.dbs.postgres.subscriptions.dao import SubscriptionsDAO
+
+meters_service = MetersService(
+ meters_dao=MetersDAO(),
+)
+
+subscriptions_service = SubscriptionsService(
+ subscriptions_dao=SubscriptionsDAO(),
+ meters_service=meters_service,
+)
+
+
+class EntitlementsException(Exception):
+ pass
+
+
+NOT_ENTITLED_RESPONSE: Callable[
+ [Tracker], JSONResponse
+] = lambda tracker=None: JSONResponse(
+ status_code=403,
+ content={
+ "detail": (
+ "You have reached your monthly quota limit. Please upgrade your plan to continue."
+ if tracker == Tracker.COUNTERS
+ else (
+ "You have reached your quota limit. Please upgrade your plan to continue."
+ if tracker == Tracker.GAUGES
+ else (
+ "You do not have access to this feature. Please upgrade your plan to continue."
+ if tracker == Tracker.FLAGS
+ else "You do not have access to this feature."
+ )
+ )
+ ),
+ },
+)
+
+
+async def check_entitlements(
+ organization_id: UUID,
+ key: Union[Flag, Counter, Gauge],
+ delta: Optional[int] = None,
+) -> tuple[bool, Optional[MeterDTO], Optional[Callable]]:
+ flag = None
+ try:
+ flag = Flag(key)
+ except ValueError:
+ pass
+
+ counter = None
+ try:
+ counter = Counter(key)
+ except ValueError:
+ pass
+
+ gauge = None
+ try:
+ gauge = Gauge(key)
+ except ValueError:
+ pass
+
+ if flag is None and counter is None and gauge is None:
+ raise EntitlementsException(f"Invalid key [{key}]")
+
+ cache_key = {
+ "organization_id": organization_id,
+ }
+
+ subscription_data = await get_cache(
+ namespace="entitlements:subscription",
+ key=cache_key,
+ )
+
+ if subscription_data is None:
+ subscription = await subscriptions_service.read(organization_id=organization_id)
+
+ if not subscription:
+ raise EntitlementsException(
+ f"No subscription found for organization [{organization_id}]"
+ )
+
+ subscription_data = {
+ "plan": subscription.plan.value,
+ "anchor": subscription.anchor,
+ }
+
+ await set_cache(
+ namespace="entitlements:subscription",
+ key=cache_key,
+ value=subscription_data,
+ )
+
+ plan = Plan(subscription_data.get("plan"))
+ anchor = subscription_data.get("anchor")
+
+ if plan not in ENTITLEMENTS:
+ raise EntitlementsException(f"Missing plan [{plan}] in entitlements")
+
+ if flag:
+ if flag not in ENTITLEMENTS[plan][Tracker.FLAGS]:
+ raise EntitlementsException(f"Invalid flag: {flag} for plan [{plan}]")
+
+ check = ENTITLEMENTS[plan][Tracker.FLAGS][flag]
+
+ if flag.name != "RBAC":
+ # TODO: remove this line
+ log.info(
+ f"adjusting: {organization_id} | | {'allow' if check else 'deny '} | {flag.name}"
+ )
+
+ return check is True, None, None
+
+ quota = None
+
+ if counter:
+ if counter not in ENTITLEMENTS[plan][Tracker.COUNTERS]:
+ raise EntitlementsException(f"Invalid counter: {counter} for plan [{plan}]")
+
+ quota = ENTITLEMENTS[plan][Tracker.COUNTERS][counter]
+
+ if gauge:
+ if gauge not in ENTITLEMENTS[plan][Tracker.GAUGES]:
+ raise EntitlementsException(f"Invalid gauge: {gauge} for plan [{plan}]")
+
+ quota = ENTITLEMENTS[plan][Tracker.GAUGES][gauge]
+
+ if not quota:
+ raise EntitlementsException(f"No quota found for key [{key}] in plan [{plan}]")
+
+ meter = MeterDTO(
+ organization_id=organization_id,
+ key=key,
+ delta=delta,
+ )
+
+ check, meter, _ = await meters_service.adjust(
+ meter=meter,
+ quota=quota,
+ anchor=anchor,
+ )
+
+ # TODO: remove this line
+ log.info(
+ f"adjusting: {organization_id} | {(('0' if (meter.month != 0 and meter.month < 10) else '') + str(meter.month)) if meter.month != 0 else ' '}.{meter.year if meter.year else ' '} | {'allow' if check else 'deny '} | {meter.key}: {meter.value-meter.synced} [{meter.value}]"
+ )
+
+ return check is True, meter, _
diff --git a/api/ee/src/utils/permissions.py b/api/ee/src/utils/permissions.py
new file mode 100644
index 0000000000..312bcb05b6
--- /dev/null
+++ b/api/ee/src/utils/permissions.py
@@ -0,0 +1,304 @@
+from typing import Dict, List, Union, Optional
+
+from fastapi import HTTPException
+from fastapi.responses import JSONResponse
+
+from oss.src.utils.logging import get_module_logger
+from oss.src.utils.caching import get_cache, set_cache
+
+from ee.src.models.db_models import (
+ OrganizationDB,
+ WorkspaceDB,
+ Permission,
+ WorkspaceRole,
+ ProjectDB,
+)
+from oss.src.services import db_manager
+from ee.src.services import db_manager_ee
+from ee.src.utils.entitlements import check_entitlements, Flag
+from ee.src.services.selectors import get_user_org_and_workspace_id
+
+
+log = get_module_logger(__name__)
+
+FORBIDDEN_EXCEPTION = HTTPException(
+ status_code=403,
+ detail="You do not have access to perform this action. Please contact your organization admin.",
+)
+
+
+async def check_user_org_access(
+ kwargs: dict, organization_id: str, check_owner=False
+) -> bool:
+ if check_owner: # Check that the user is the owner of the organization
+ user = await db_manager.get_user_with_id(user_id=kwargs["id"])
+ organization = await db_manager_ee.get_organization(organization_id)
+ if not organization:
+ log.error("Organization not found")
+ raise Exception("Organization not found")
+ return organization.owner == str(user.id) # type: ignore
+ else:
+ user_organizations: List = kwargs["organization_ids"]
+ user_exists_in_organizations = organization_id in user_organizations
+ return user_exists_in_organizations
+
+
+async def check_user_access_to_workspace(
+ user_org_workspace_data: Dict[str, Union[str, list]],
+ workspace: WorkspaceDB,
+ organization: OrganizationDB,
+) -> bool:
+ """
+ Check if a user has access to a specific workspace and the workspace organization.
+
+ Args:
+ user_org_workspace_data (Dict[str, Union[str, list]]): User-specific information.
+ workspace (WorkspaceDB): The workspace to check.
+ organization (OrganizationDB): The organization to check.
+
+ Returns:
+ bool: True if the user has access, False otherwise.
+
+ Raises:
+ ValueError: If the workspace does not belong to the organization.
+ """
+
+ workspace_organization_id = str(workspace.organization_id)
+ if (
+ workspace is None
+ or organization is None
+ or workspace_organization_id != str(organization.id)
+ ):
+ raise ValueError("Workspace does not belong to the provided organization")
+
+ # Check that the user belongs to the organization
+ has_organization_access = await check_user_org_access(
+ user_org_workspace_data, workspace_organization_id
+ )
+ if not has_organization_access:
+ log.error("User does not belong and have access to the organization")
+ return False
+
+ # Check that the user belongs to the workspace
+ user_id = user_org_workspace_data.get("id")
+ if user_id is None:
+ log.error("User ID is missing in user_org_workspace_data")
+ return False
+
+ workspace_members = workspace.get_all_members()
+ if user_id not in workspace_members:
+ log.error("User does not belong to the workspace")
+ return False
+
+ # Check that the workspace is in the user's workspaces
+ has_access_to_workspace = any(
+ str(workspace.id) == workspace_id
+ for workspace_id in user_org_workspace_data["workspace_ids"]
+ )
+ return has_access_to_workspace
+
+
+async def check_action_access(
+ user_uid: str,
+ project_id: str = None,
+ permission: Permission = None,
+ role: str = None,
+) -> bool:
+ """
+ Check if a user belongs to a workspace and has a certain permission.
+
+ Args:
+ user_id (str): The user's ID.
+ object_id (str): The ID of the object to check.
+ type (str): The type of the object to check.
+ permission (Permission): The permission to check.
+ role (str): The role to check.
+
+ Returns:
+ bool: True if the user belongs to the workspace and has the specified permission, False otherwise.
+ """
+
+ if permission is None and role is None:
+ raise Exception("Either permission or role must be provided")
+ elif permission is not None and role is not None:
+ raise Exception("Only one of permission or role must be provided")
+
+ cache_key = {
+ "permission": permission.value if permission else None,
+ "role": role,
+ }
+
+ has_permission = await get_cache(
+ project_id=project_id,
+ user_id=user_uid,
+ namespace="check_action_access",
+ key=cache_key,
+ )
+
+ if has_permission is not None:
+ return has_permission
+
+ user_org_workspace_data: dict = await get_user_org_and_workspace_id(user_uid)
+ has_permission = await check_rbac_permission(
+ user_org_workspace_data=user_org_workspace_data,
+ project_id=project_id,
+ role=role,
+ permission=permission,
+ )
+
+ await set_cache(
+ project_id=project_id,
+ user_id=user_uid,
+ namespace="check_action_access",
+ key=cache_key,
+ value=has_permission,
+ )
+
+ return has_permission
+
+
+# async def check_apikey_action_access(
+# api_key: str, user_id: str, permission: Permission
+# ):
+# """
+# Check if an api key belongs to a user for a workspace and has the right permission.
+
+# Args:
+# api_key (str): The api key
+# user_id (str): The user (owner) ID of the api_key
+# permission (Permission): The permission to check for.
+# """
+
+# api_key_prefix = api_key.split(".")[0]
+# api_key_db = await db_manager.get_user_api_key_by_prefix(
+# api_key_prefix=api_key_prefix, user_id=user_id
+# )
+# if api_key_db is None:
+# raise HTTPException(
+# 404, {"message": f"API Key with prefix {api_key_prefix} not found"}
+# )
+
+# project_db = await db_manager.get_project_by_id(
+# project_id=str(api_key_db.project_id)
+# )
+# if project_db is None:
+# raise HTTPException(
+# 404,
+# {"message": f"Project with ID {str(api_key_db.workspace_id)} not found"},
+# )
+
+# has_access = await check_project_has_role_or_permission(
+# project_db, str(api_key_db.created_by_id), None, permission
+# )
+# if not has_access:
+# raise HTTPException(
+# 403,
+# {
+# "message": "You do not have access to perform this action. Please contact your organization admin."
+# },
+# )
+
+
+async def check_rbac_permission(
+ user_org_workspace_data: Dict[str, Union[str, list]],
+ project_id: str = None,
+ permission: Permission = None,
+ role: str = None,
+) -> bool:
+ """
+ Check if a user belongs to a workspace and has a certain permission.
+
+ Args:
+ user_org_workspace_data (Dict[str, Union[str, list]]): User-specific information containing the id, uid, list of user organization and list of user workspace.
+ project_id (str): The ID of the project.
+ permission (Permission): The permission to check for.
+ role (str): The role to check for.
+
+ Returns:
+ bool: True if the user belongs to the workspace and has the specified permission, False otherwise.
+ """
+
+ assert (
+ project_id is not None
+ ), "Project_ID is required to check object-level permissions"
+
+ # Assert that either permission or role is provided, but not both
+ assert (permission is not None) or (
+ role is not None
+ ), "Either 'permission' or 'role' must be provided, but neither is provided"
+ assert not (
+ (permission is not None) and (role is not None)
+ ), "'permission' and 'role' cannot both be provided at the same time"
+
+ if project_id is not None:
+ project = await db_manager.get_project_by_id(project_id)
+ if project is None:
+ raise Exception("Project not found")
+
+ workspace = await db_manager.get_workspace(str(project.workspace_id))
+ organization = await db_manager_ee.get_organization(
+ str(project.organization_id)
+ )
+
+ workspace_has_access = await check_user_access_to_workspace(
+ user_org_workspace_data=user_org_workspace_data,
+ workspace=workspace,
+ organization=organization,
+ )
+ if not workspace_has_access:
+ log.error("User does not have access to the workspace")
+ return False
+
+ user_id = user_org_workspace_data["id"]
+ assert isinstance(user_id, str), "User ID must be a string"
+ has_access = await check_project_has_role_or_permission(
+ project, user_id, role, permission
+ )
+ return has_access
+
+
+async def check_project_has_role_or_permission(
+ # organization_id: str,
+ project: ProjectDB,
+ user_id: str,
+ role: Optional[str] = None,
+ permission: Optional[str] = None,
+):
+ """Check if a user has the provided role or permission in a project.
+
+ Args:
+ project (ProjectDB): The project to check if the user has permissions to
+ user_id (str): The ID of the user
+ role (Optional[str], optional): The role to check for. Defaults to None.
+ permission (Optional[str], optional): The permission to check for. Defaults to None.
+ """
+
+ check, _, _ = await check_entitlements(
+ organization_id=project.organization_id,
+ key=Flag.RBAC,
+ )
+
+ if not check:
+ return True
+
+ assert (
+ role is not None or permission is not None
+ ), "Either role or permission must be provided"
+
+ project_members = await db_manager_ee.get_project_members(
+ project_id=str(project.id)
+ )
+ if project.is_owner(user_id, project_members):
+ return True
+
+ if role is not None:
+ if role not in list(WorkspaceRole):
+ raise Exception("Invalid role specified")
+ return project.has_role(user_id, role, project_members)
+
+ if permission is not None:
+ if permission not in list(Permission):
+ raise Exception("Invalid permission specified")
+ return project.has_permission(user_id, permission, project_members)
+
+ return False
diff --git a/api/ee/tests/__init__.py b/api/ee/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/tests/manual/billing.http b/api/ee/tests/manual/billing.http
new file mode 100644
index 0000000000..6158dac23f
--- /dev/null
+++ b/api/ee/tests/manual/billing.http
@@ -0,0 +1,52 @@
+
+@host = http://localhost
+@base_url = {{host}}/api/billing
+@api_key = xxx.xxx
+###
+
+# @name open_portal
+POST {{base_url}}/stripe/portals/
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name open_checkout
+POST {{base_url}}/stripe/checkouts/?plan=cloud_v0_pro&success_url=http://localhost/
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name fetch_plans
+GET {{base_url}}/plans
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name switch_plans
+POST {{base_url}}/plans/switch?plan=cloud_v0_pro
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name fetch_subscription
+GET {{base_url}}/subscription
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name cancel_subscription
+POST {{base_url}}/subscription/cancel
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name fetch_usage
+GET {{base_url}}/usage
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
diff --git a/api/ee/tests/manual/evaluations/live.http b/api/ee/tests/manual/evaluations/live.http
new file mode 100644
index 0000000000..6a43280046
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/live.http
@@ -0,0 +1,131 @@
+@auth_key = {{$dotenv.AGENTA_AUTH_KEY}} || change-me
+@api_url = {{$dotenv AGENTA_API_URL}}
+@api_key = {{$dotenv AGENTA_API_KEY}}
+
+
+###
+# @name create_account
+POST {{api_url}}/admin/account
+Content-Type: application/json
+Authorization: Access {{auth_key}}
+
+###
+@user_id = {{create_account.response.body.user.id}}
+# @authorization = {{create_account.response.body.scopes[0].credentials}}
+@authorization = ApiKey {{api_key}}
+
+###
+# @name list_queries
+POST {{api_url}}/preview/simple/queries/query
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{}
+
+###
+# @name create_query
+POST {{api_url}}/preview/simple/queries/
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{
+ "query": {
+ "slug": "{{$guid}}",
+ "name": "Test Query",
+ "description": "This is a test query",
+ "tags": {
+ "my_key": "my_value"
+ },
+ "data": {
+ "filtering": {
+ "conditions": [
+ {
+ "field": "attributes",
+ "key": "ag.type.trace",
+ "operator": "is",
+ "value": "invocation"
+ }
+ ]
+ }
+ }
+ }
+}
+
+###
+# @name fetch_query_revision
+POST {{api_url}}/preview/queries/revisions/retrieve
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{
+ "query_ref": {
+ "id": "{{create_query.response.body.query.id}}"
+ }
+}
+
+###
+# @name list_evaluators
+POST {{api_url}}/preview/simple/evaluators/query
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{}
+
+###
+# @name fetch_evaluator_revision
+POST {{api_url}}/preview/evaluators/revisions/retrieve
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{
+ "evaluator_ref": {
+ "id": "{{list_evaluators.response.body.evaluators[2].id}}"
+ }
+}
+
+###
+# @name list_evaluations
+POST {{api_url}}/preview/simple/evaluations/query
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{}
+
+###
+# @name create_evaluation
+POST {{api_url}}/preview/simple/evaluations/
+Content-Type: application/json
+Authorization: {{authorization}}
+
+{
+ "evaluation": {
+ "name": "Test JIT Evaluation",
+ "description": "This is a test jit evaluation",
+ "tags": {
+ "my_key": "my_value"
+ },
+ "flags": {
+ "is_live": true
+ },
+ "data": {
+ "query_steps": [
+ "{{fetch_query_revision.response.body.query_revision.id}}"
+ ],
+ "evaluator_steps": [
+ "{{fetch_evaluator_revision.response.body.evaluator_revision.evaluator_id}}"
+ ]
+ }
+ }
+}
+
+###
+# @name stop_evaluation
+POST {{api_url}}/preview/simple/evaluations/{{create_evaluation.response.body.evaluation.id}}/stop
+Content-Type: application/json
+Authorization: {{authorization}}
+
+###
+# @name start_evaluation
+POST {{api_url}}/preview/simple/evaluations/{{create_evaluation.response.body.evaluation.id}}/start
+Content-Type: application/json
+Authorization: {{authorization}}
diff --git a/api/ee/tests/manual/evaluations/sdk/client.py b/api/ee/tests/manual/evaluations/sdk/client.py
new file mode 100644
index 0000000000..c930eee323
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/client.py
@@ -0,0 +1,32 @@
+from os import getenv
+
+import requests
+
+BASE_TIMEOUT = 10
+
+AGENTA_API_KEY = getenv("AGENTA_API_KEY")
+AGENTA_API_URL = getenv("AGENTA_API_URL")
+
+
+def authed_api():
+ """
+ Preconfigured requests for authenticated endpoints (supports all methods).
+ """
+
+ api_url = AGENTA_API_URL
+ credentials = f"ApiKey {AGENTA_API_KEY}"
+
+ def _request(method: str, endpoint: str, **kwargs):
+ url = f"{api_url}{endpoint}"
+ headers = kwargs.pop("headers", {})
+ headers.setdefault("Authorization", credentials)
+
+ return requests.request(
+ method=method,
+ url=url,
+ headers=headers,
+ timeout=BASE_TIMEOUT,
+ **kwargs,
+ )
+
+ return _request
diff --git a/api/ee/tests/manual/evaluations/sdk/definitions.py b/api/ee/tests/manual/evaluations/sdk/definitions.py
new file mode 100644
index 0000000000..4768515ef3
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/definitions.py
@@ -0,0 +1,1818 @@
+from enum import Enum
+from uuid import UUID, uuid4
+from re import match
+from datetime import datetime
+from typing import Dict, List, Optional, Union, Literal, Callable, Any, TypeAliasType
+
+from pydantic import BaseModel, field_validator, Field
+
+# oss.src.core.shared.dtos -----------------------------------------------------
+
+from typing import Optional, Dict, List, Union, Literal
+from uuid import UUID
+from datetime import datetime
+from re import match
+
+from pydantic import BaseModel, field_validator
+
+from typing_extensions import TypeAliasType
+
+
+BoolJson: TypeAliasType = TypeAliasType( # type: ignore
+ "BoolJson",
+ Union[bool, Dict[str, "BoolJson"]], # type: ignore
+)
+
+StringJson: TypeAliasType = TypeAliasType( # type: ignore
+ "StringJson",
+ Union[str, Dict[str, "StringJson"]], # type: ignore
+)
+
+FullJson: TypeAliasType = TypeAliasType( # type: ignore
+ "FullJson",
+ Union[str, int, float, bool, None, Dict[str, "FullJson"], List["FullJson"]], # type: ignore
+)
+
+NumericJson: TypeAliasType = TypeAliasType( # type: ignore
+ "NumericJson",
+ Union[int, float, Dict[str, "NumericJson"]], # type: ignore
+)
+
+NoListJson: TypeAliasType = TypeAliasType( # type: ignore
+ "NoListJson",
+ Union[str, int, float, bool, None, Dict[str, "NoListJson"]], # type: ignore
+)
+
+Json = Dict[str, FullJson] # type: ignore
+
+Data = Dict[str, FullJson] # type: ignore
+
+Flags = Dict[str, bool | str]
+
+Tags = Dict[str, NoListJson] # type: ignore
+
+Meta = Dict[str, FullJson] # type: ignore
+
+Hashes = Dict[str, StringJson] # type: ignore
+
+
+class Metadata(BaseModel):
+ flags: Optional[Flags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+ tags: Optional[Tags] = None # type: ignore
+
+
+class Windowing(BaseModel):
+ # RANGE
+ newest: Optional[datetime] = None
+ oldest: Optional[datetime] = None
+ # TOKEN
+ next: Optional[UUID] = None
+ # LIMIT
+ limit: Optional[int] = None
+ # ORDER
+ order: Optional[Literal["ascending", "descending"]] = None
+ # SAMPLES
+ rate: Optional[float] = None
+ # BUCKETS
+ interval: Optional[int] = None
+
+ @field_validator("rate")
+ def check_rate(cls, v):
+ if v is not None and (v < 0.0 or v > 1.0):
+ raise ValueError("Sampling rate must be between 0.0 and 1.0.")
+ return v
+
+ @field_validator("interval")
+ def check_interval(cls, v):
+ if v is not None and v <= 0:
+ raise ValueError("Bucket interval must be a positive integer.")
+ return v
+
+
+class Lifecycle(BaseModel):
+ created_at: Optional[datetime] = None
+ updated_at: Optional[datetime] = None
+ deleted_at: Optional[datetime] = None
+
+ created_by_id: Optional[UUID] = None
+ updated_by_id: Optional[UUID] = None
+ deleted_by_id: Optional[UUID] = None
+
+
+class TraceID(BaseModel):
+ trace_id: Optional[str] = None
+
+
+class SpanID(BaseModel):
+ span_id: Optional[str] = None
+
+
+class Identifier(BaseModel):
+ id: Optional[UUID] = None
+
+
+class Slug(BaseModel):
+ slug: Optional[str] = None
+
+ @field_validator("slug")
+ def check_url_safety(cls, v):
+ if v is not None:
+ if not match(r"^[a-zA-Z0-9_-]+$", v):
+ raise ValueError("slug must be URL-safe.")
+ return v
+
+
+class Version(BaseModel):
+ version: Optional[str] = None
+
+
+class Header(BaseModel):
+ name: Optional[str] = None
+ description: Optional[str] = None
+
+
+class Commit(BaseModel):
+ author: Optional[UUID] = None
+ date: Optional[datetime] = None
+ message: Optional[str] = None
+
+
+class Reference(Identifier, Slug, Version):
+ pass
+
+
+class Link(TraceID, SpanID):
+ pass
+
+
+def sync_alias(primary: str, alias: str, instance: BaseModel) -> None:
+ primary_val = getattr(instance, primary)
+ alias_val = getattr(instance, alias)
+ if primary_val and alias_val is None:
+ object.__setattr__(instance, alias, primary_val)
+ elif alias_val and primary_val is None:
+ object.__setattr__(instance, primary, alias_val)
+
+
+class AliasConfig(BaseModel):
+ model_config = {
+ "populate_by_name": True,
+ "from_attributes": True,
+ }
+
+
+Metrics = Dict[str, NumericJson] # type: ignore
+
+
+class LegacyLifecycleDTO(BaseModel):
+ created_at: Optional[str] = None
+ updated_at: Optional[str] = None
+ updated_by_id: Optional[str] = None
+ # DEPRECATING
+ updated_by: Optional[str] = None # email
+
+
+class Status(BaseModel):
+ code: Optional[int] = 500
+ type: Optional[str] = None
+ message: Optional[str] = "An unexpected error occurred. Please try again later."
+ stacktrace: Optional[str] = None
+
+
+Mappings = Dict[str, str]
+
+Schema = Dict[str, FullJson] # type: ignore
+
+# ------------------------------------------------------------------------------
+
+# oss.src.core.git.dtos --------------------------------------------------------
+
+from typing import Optional, List
+from uuid import UUID
+
+from pydantic import BaseModel
+
+
+# artifacts --------------------------------------------------------------------
+
+
+class Artifact(Identifier, Slug, Lifecycle, Header, Metadata):
+ pass
+
+
+class ArtifactCreate(Slug, Header, Metadata):
+ pass
+
+
+class ArtifactEdit(Identifier, Header, Metadata):
+ pass
+
+
+class ArtifactQuery(Metadata):
+ pass
+
+
+# variants ---------------------------------------------------------------------
+
+
+class Variant(Identifier, Slug, Lifecycle, Header, Metadata):
+ artifact_id: Optional[UUID] = None
+
+
+class VariantCreate(Slug, Header, Metadata):
+ artifact_id: Optional[UUID] = None
+
+
+class VariantEdit(Identifier, Header, Metadata):
+ pass
+
+
+class VariantQuery(Metadata):
+ pass
+
+
+# revisions --------------------------------------------------------------------
+
+
+class Revision(Identifier, Slug, Version, Lifecycle, Header, Metadata, Commit):
+ data: Optional[Data] = None
+
+ artifact_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = None
+
+
+class RevisionCreate(Slug, Header, Metadata):
+ artifact_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = None
+
+
+class RevisionEdit(Identifier, Header, Metadata):
+ pass
+
+
+class RevisionQuery(Metadata):
+ authors: Optional[List[UUID]] = None
+
+
+class RevisionCommit(Slug, Header, Metadata):
+ data: Optional[Data] = None
+
+ message: Optional[str] = None
+
+ artifact_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = None
+
+
+class RevisionsLog(BaseModel):
+ artifact_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = None
+ revision_id: Optional[UUID] = None
+
+ depth: Optional[int] = None
+
+
+# forks ------------------------------------------------------------------------
+
+
+class RevisionFork(Slug, Header, Metadata):
+ data: Optional[Data] = None
+
+ message: Optional[str] = None
+
+
+class VariantFork(Slug, Header, Metadata):
+ pass
+
+
+class ArtifactFork(RevisionsLog):
+ variant: Optional[VariantFork] = None
+ revision: Optional[RevisionFork] = None
+
+
+# ------------------------------------------------------------------------------
+
+
+Origin = Literal["custom", "human", "auto"]
+# Target = Union[List[UUID], Dict[UUID, Origin], List[Callable]]
+Target = Union[
+ List[List[Dict[str, Any]]], # testcases_data
+ List[Callable], # workflow_handlers
+ List[UUID], # entity_ids
+ Dict[UUID, Origin], # entity_ids with origins
+]
+
+
+# oss.src.core.evaluations.types
+
+
+class EvaluationStatus(str, Enum):
+ PENDING = "pending"
+ QUEUED = "queued"
+ RUNNING = "running"
+ SUCCESS = "success"
+ FAILURE = "failure"
+ ERRORS = "errors"
+ CANCELLED = "cancelled"
+
+
+class EvaluationRunFlags(BaseModel):
+ is_closed: Optional[bool] = None # Indicates if the run is modifiable
+ is_live: Optional[bool] = None # Indicates if the run has live queries
+ is_active: Optional[bool] = None # Indicates if the run is currently active
+
+
+class SimpleEvaluationFlags(EvaluationRunFlags):
+ pass
+
+
+SimpleEvaluationStatus = EvaluationStatus
+
+
+class SimpleEvaluationData(BaseModel):
+ status: Optional[SimpleEvaluationStatus] = None
+
+ query_steps: Optional[Target] = None
+ testset_steps: Optional[Target] = None
+ application_steps: Optional[Target] = None
+ evaluator_steps: Optional[Target] = None
+
+ repeats: Optional[int] = None
+
+
+class EvaluationRun(BaseModel):
+ id: UUID
+
+
+class EvaluationScenario(BaseModel):
+ id: UUID
+
+ run_id: UUID
+
+
+class EvaluationResult(BaseModel):
+ id: UUID
+
+ run_id: UUID
+ scenario_id: UUID
+ step_key: str
+
+ testcase_id: Optional[UUID] = None
+ trace_id: Optional[UUID] = None
+ error: Optional[dict] = None
+
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+
+
+class EvaluationMetrics(Identifier, Lifecycle):
+ flags: Optional[Dict[str, Any]] = None
+ tags: Optional[Dict[str, Any]] = None
+ meta: Optional[Dict[str, Any]] = None
+
+ status: Optional[EvaluationStatus] = None
+
+ timestamp: Optional[datetime] = None
+ interval: Optional[int] = None
+
+ data: Optional[Data] = None
+
+ scenario_id: Optional[UUID] = None
+
+ run_id: UUID
+
+
+# oss.src.core.tracing.dtos
+
+import random
+import string
+from enum import Enum
+from datetime import datetime, timezone
+from typing import List, Dict, Any, Union, Optional, Literal
+from uuid import UUID
+
+from pydantic import BaseModel, model_validator, Field
+
+
+class TraceType(Enum):
+ INVOCATION = "invocation"
+ ANNOTATION = "annotation"
+ #
+ UNKNOWN = "unknown"
+
+
+class SpanType(Enum):
+ AGENT = "agent"
+ CHAIN = "chain"
+ WORKFLOW = "workflow"
+ TASK = "task"
+ TOOL = "tool"
+ EMBEDDING = "embedding"
+ QUERY = "query"
+ LLM = "llm"
+ COMPLETION = "completion"
+ CHAT = "chat"
+ RERANK = "rerank"
+ #
+ UNKNOWN = "unknown"
+
+
+class AgMetricEntryAttributes(BaseModel):
+ # cumulative: 'cum' can't be used though
+ cumulative: Optional[Metrics] = None
+ # incremental 'inc' could be used, since 'unit' may be confusing
+ incremental: Optional[Metrics] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgMetricsAttributes(BaseModel):
+ duration: Optional[AgMetricEntryAttributes] = None
+ errors: Optional[AgMetricEntryAttributes] = None
+ tokens: Optional[AgMetricEntryAttributes] = None
+ costs: Optional[AgMetricEntryAttributes] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgTypeAttributes(BaseModel):
+ trace: Optional[TraceType] = TraceType.INVOCATION
+ span: Optional[SpanType] = SpanType.TASK
+
+
+class AgDataAttributes(BaseModel):
+ parameters: Optional[Dict[str, Any]] = None
+ inputs: Optional[Dict[str, Any]] = None
+ outputs: Optional[Any] = None
+ internals: Optional[Dict[str, Any]] = None
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+class AgAttributes(BaseModel):
+ type: AgTypeAttributes = Field(default_factory=AgTypeAttributes)
+ data: AgDataAttributes = Field(default_factory=AgDataAttributes)
+
+ metrics: Optional[AgMetricsAttributes] = None
+ flags: Optional[Flags] = None # type: ignore
+ tags: Optional[Tags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+ exception: Optional[Data] = None # type: ignore
+ references: Optional[Dict[str, "OTelReference"]] = None
+ unsupported: Optional[Data] = None # type: ignore
+
+ model_config = {"ser_json_exclude_none": True}
+
+
+## --- SUB-ENTITIES --- ##
+
+
+class OTelStatusCode(Enum):
+ STATUS_CODE_UNSET = "STATUS_CODE_UNSET"
+ STATUS_CODE_OK = "STATUS_CODE_OK"
+ STATUS_CODE_ERROR = "STATUS_CODE_ERROR"
+
+
+class OTelSpanKind(Enum):
+ SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED"
+ SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL"
+ SPAN_KIND_SERVER = "SPAN_KIND_SERVER"
+ SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT"
+ SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER"
+ SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER"
+
+
+OTelAttributes = Json # type: ignore
+OTelMetrics = Metrics # type: ignore
+OTelTags = Tags # type: ignore
+
+Attributes = OTelAttributes # type: ignore
+
+
+class OTelEvent(BaseModel):
+ name: str
+ timestamp: Union[datetime, int]
+
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelEvents = List[OTelEvent]
+
+
+class OTelHash(Identifier):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelHashes = List[OTelHash]
+
+
+class OTelLink(TraceID, SpanID):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelLinks = List[OTelLink]
+
+
+class OTelReference(Reference):
+ attributes: Optional[OTelAttributes] = None
+
+
+OTelReferences = List[OTelReference]
+
+
+class OTelSpansTree(BaseModel):
+ spans: Optional["OTelNestedSpans"] = None
+
+
+OTelSpansTrees = List[OTelSpansTree]
+
+
+class OTelFlatSpan(Lifecycle):
+ trace_id: str
+ span_id: str
+ parent_id: Optional[str] = None
+
+ trace_type: Optional[TraceType] = None
+ span_type: Optional[SpanType] = None
+
+ span_kind: Optional[OTelSpanKind] = None
+ span_name: Optional[str] = None
+
+ start_time: Optional[Union[datetime, int]] = None
+ end_time: Optional[Union[datetime, int]] = None
+
+ status_code: Optional[OTelStatusCode] = None
+ status_message: Optional[str] = None
+
+ attributes: Optional[OTelAttributes] = None
+ references: Optional[OTelReferences] = None
+ links: Optional[OTelLinks] = None
+ hashes: Optional[OTelHashes] = None
+
+ exception: Optional[Data] = None # type: ignore
+
+ events: Optional[OTelEvents] = None
+
+ @model_validator(mode="after")
+ def set_defaults(self):
+ if self.trace_type is None:
+ self.trace_type = TraceType.INVOCATION
+ if self.span_type is None:
+ self.span_type = SpanType.TASK
+ if self.span_kind is None:
+ self.span_kind = OTelSpanKind.SPAN_KIND_UNSPECIFIED
+ if self.status_code is None:
+ self.status_code = OTelStatusCode.STATUS_CODE_UNSET
+ if self.end_time is None and self.start_time is not None:
+ self.end_time = self.start_time
+ if self.start_time is None and self.end_time is not None:
+ self.start_time = self.end_time
+ if self.start_time is None and self.end_time is None:
+ now = datetime.now(timezone.utc)
+ self.start_time = now
+ self.end_time = now
+ if self.span_name is None:
+ self.span_name = "".join(
+ random.choices(string.ascii_letters + string.digits, k=8)
+ )
+ return self
+
+
+class OTelSpan(OTelFlatSpan, OTelSpansTree):
+ pass
+
+
+OTelFlatSpans = List[OTelFlatSpan]
+OTelNestedSpans = Dict[str, Union[OTelSpan, List[OTelSpan]]]
+OTelTraceTree = Dict[str, OTelSpansTree]
+OTelTraceTrees = List[OTelTraceTree]
+OTelSpans = List[OTelSpan]
+
+
+class Fields(str, Enum):
+ TRACE_ID = "trace_id"
+ SPAN_ID = "span_id"
+ PARENT_ID = "parent_id"
+ SPAN_NAME = "span_name"
+ SPAN_KIND = "span_kind"
+ START_TIME = "start_time"
+ END_TIME = "end_time"
+ STATUS_CODE = "status_code"
+ STATUS_MESSAGE = "status_message"
+ ATTRIBUTES = "attributes"
+ EVENTS = "events"
+ LINKS = "links"
+ REFERENCES = "references"
+ CREATED_AT = "created_at"
+ UPDATED_AT = "updated_at"
+ DELETED_AT = "deleted_at"
+ CREATED_BY_ID = "created_by_id"
+ UPDATED_BY_ID = "updated_by_id"
+ DELETED_BY_ID = "deleted_by_id"
+ CONTENT = "content"
+
+
+class LogicalOperator(str, Enum):
+ AND = "and"
+ OR = "or"
+ NOT = "not"
+ NAND = "nand"
+ NOR = "nor"
+
+
+class ComparisonOperator(str, Enum):
+ IS = "is"
+ IS_NOT = "is_not"
+
+
+class NumericOperator(str, Enum):
+ EQ = "eq"
+ NEQ = "neq"
+ GT = "gt"
+ LT = "lt"
+ GTE = "gte"
+ LTE = "lte"
+ BETWEEN = "btwn"
+
+
+class StringOperator(str, Enum):
+ STARTSWITH = "startswith"
+ ENDSWITH = "endswith"
+ CONTAINS = "contains"
+ MATCHES = "matches"
+ LIKE = "like"
+
+
+class DictOperator(str, Enum):
+ HAS = "has"
+ HAS_NOT = "has_not"
+
+
+class ListOperator(str, Enum):
+ IN = "in"
+ NOT_IN = "not_in"
+
+
+class ExistenceOperator(str, Enum):
+ EXISTS = "exists"
+ NOT_EXISTS = "not_exists"
+
+
+class TextOptions(BaseModel):
+ case_sensitive: Optional[bool] = False
+ exact_match: Optional[bool] = False
+
+
+class ListOptions(BaseModel):
+ all: Optional[bool] = False
+
+
+class Condition(BaseModel):
+ field: str
+ key: Optional[str] = None
+ value: Optional[Union[str, int, float, bool, list, dict]] = None
+ operator: Optional[
+ Union[
+ ComparisonOperator,
+ NumericOperator,
+ StringOperator,
+ ListOperator,
+ DictOperator,
+ ExistenceOperator,
+ ]
+ ] = ComparisonOperator.IS
+ options: Optional[Union[TextOptions, ListOptions]] = None
+
+
+class Filtering(BaseModel):
+ operator: Optional[LogicalOperator] = LogicalOperator.AND
+ conditions: List[Union[Condition, "Filtering"]] = list()
+
+
+class Focus(str, Enum):
+ TRACE = "trace"
+ SPAN = "span"
+
+
+class Format(str, Enum):
+ AGENTA = "agenta"
+ OPENTELEMETRY = "opentelemetry"
+
+
+class Formatting(BaseModel):
+ focus: Optional[Focus] = Focus.SPAN
+ format: Optional[Format] = Format.AGENTA
+
+
+class TracingQuery(BaseModel):
+ formatting: Optional[Formatting] = None
+ windowing: Optional[Windowing] = None
+ filtering: Optional[Filtering] = None
+
+
+_C_OPS = list(ComparisonOperator)
+_N_OPS = list(NumericOperator)
+_S_OPS = list(StringOperator)
+_L_OPS = list(ListOperator)
+_D_OPS = list(DictOperator)
+_E_OPS = list(ExistenceOperator)
+
+
+class FilteringException(Exception):
+ pass
+
+
+class Analytics(BaseModel):
+ count: Optional[int] = 0
+ duration: Optional[float] = 0.0
+ costs: Optional[float] = 0.0
+ tokens: Optional[float] = 0.0
+
+ def plus(self, other: "Analytics") -> "Analytics":
+ self.count += other.count
+ self.duration += other.duration
+ self.costs += other.costs
+ self.tokens += other.tokens
+
+ return self
+
+
+class Bucket(BaseModel):
+ timestamp: datetime
+ interval: int
+ total: Analytics
+ errors: Analytics
+
+
+Trace = OTelSpansTree
+
+# oss.src.core.observability.dtos
+
+from enum import Enum
+from uuid import UUID
+from datetime import datetime
+from typing import List, Dict, Any, Union, Optional
+
+from pydantic import BaseModel
+
+
+## --- SUB-ENTITIES --- ##
+
+
+class RootDTO(BaseModel):
+ id: UUID
+
+
+class TreeType(Enum):
+ INVOCATION = "invocation"
+ ANNOTATION = "annotation"
+ #
+ UNKNOWN = "unknown"
+
+
+class TreeDTO(BaseModel):
+ id: UUID
+ type: Optional[TreeType] = None
+
+
+class NodeType(Enum):
+ # --- VARIANTS --- #
+ ## SPAN_KIND_SERVER
+ AGENT = "agent"
+ WORKFLOW = "workflow"
+ CHAIN = "chain"
+ ## SPAN_KIND_INTERNAL
+ TASK = "task"
+ ## SPAN_KIND_CLIENT
+ TOOL = "tool"
+ EMBEDDING = "embedding"
+ QUERY = "query"
+ COMPLETION = "completion" # LEGACY
+ CHAT = "chat"
+ RERANK = "rerank"
+ # --- VARIANTS --- #
+
+
+class NodeDTO(BaseModel):
+ id: UUID
+ name: str
+ type: Optional[NodeType] = None
+
+
+class ParentDTO(BaseModel):
+ id: UUID
+
+
+class TimeDTO(BaseModel):
+ start: datetime
+ end: datetime
+
+
+class StatusCode(Enum):
+ UNSET = "UNSET"
+ OK = "OK"
+ ERROR = "ERROR"
+
+
+class StatusDTO(BaseModel):
+ code: StatusCode
+ message: Optional[str] = None
+
+ class Config:
+ use_enum_values = True
+
+
+Attributes = Dict[str, Any]
+
+
+class ExceptionDTO(BaseModel):
+ timestamp: datetime
+ type: str
+ message: Optional[str] = None
+ stacktrace: Optional[str] = None
+ attributes: Optional[Attributes] = None
+
+ class Config:
+ json_encoders = {
+ UUID: lambda v: str(v), # pylint: disable=unnecessary-lambda
+ datetime: lambda dt: dt.isoformat(),
+ }
+
+
+Data = Dict[str, Any]
+Metrics = Dict[str, Any]
+Meta = Dict[str, Any]
+Refs = Dict[str, Any]
+
+
+class LinkDTO(BaseModel):
+ type: TreeType # Yes, this is correct
+ id: UUID # node_id, this is correct
+ tree_id: Optional[UUID] = None
+
+ class Config:
+ use_enum_values = True
+ json_encoders = {
+ UUID: lambda v: str(v), # pylint: disable=unnecessary-lambda
+ }
+
+
+class OTelSpanKind(Enum):
+ SPAN_KIND_UNSPECIFIED = "SPAN_KIND_UNSPECIFIED"
+ # INTERNAL
+ SPAN_KIND_INTERNAL = "SPAN_KIND_INTERNAL"
+ # SYNCHRONOUS
+ SPAN_KIND_SERVER = "SPAN_KIND_SERVER"
+ SPAN_KIND_CLIENT = "SPAN_KIND_CLIENT"
+ # ASYNCHRONOUS
+ SPAN_KIND_PRODUCER = "SPAN_KIND_PRODUCER"
+ SPAN_KIND_CONSUMER = "SPAN_KIND_CONSUMER"
+
+
+class OTelStatusCode(Enum):
+ STATUS_CODE_OK = "STATUS_CODE_OK"
+ STATUS_CODE_ERROR = "STATUS_CODE_ERROR"
+ STATUS_CODE_UNSET = "STATUS_CODE_UNSET"
+
+
+class OTelContextDTO(BaseModel):
+ trace_id: str
+ span_id: str
+
+
+class OTelEventDTO(BaseModel):
+ name: str
+ timestamp: str
+
+ attributes: Optional[Attributes] = None
+
+
+class OTelLinkDTO(BaseModel):
+ context: OTelContextDTO
+
+ attributes: Optional[Attributes] = None
+
+
+class OTelExtraDTO(BaseModel):
+ kind: Optional[str] = None
+
+ attributes: Optional[Attributes] = None
+ events: Optional[List[OTelEventDTO]] = None
+ links: Optional[List[OTelLinkDTO]] = None
+
+
+## --- ENTITIES --- ##
+
+
+class SpanDTO(BaseModel):
+ trace_id: str
+ span_id: str
+
+ lifecycle: Optional[LegacyLifecycleDTO] = None
+
+ root: RootDTO
+ tree: TreeDTO
+ node: NodeDTO
+
+ parent: Optional[ParentDTO] = None
+
+ time: TimeDTO
+ status: StatusDTO
+
+ exception: Optional[ExceptionDTO] = None
+
+ data: Optional[Data] = None
+ metrics: Optional[Metrics] = None
+ meta: Optional[Meta] = None
+ refs: Optional[Refs] = None
+
+ links: Optional[List[LinkDTO]] = None
+
+ otel: Optional[OTelExtraDTO] = None
+
+ nodes: Optional[Dict[str, Union["SpanDTO", List["SpanDTO"]]]] = None
+
+ model_config = {
+ "json_encoders": {
+ UUID: lambda v: str(v),
+ datetime: lambda dt: dt.isoformat(),
+ },
+ }
+
+ def encode(self, data: Any) -> Any:
+ if isinstance(data, dict):
+ return {k: self.encode(v) for k, v in data.items()}
+ elif isinstance(data, list):
+ return [self.encode(item) for item in data]
+ for type_, encoder in self.model_config["json_encoders"].items(): # type: ignore
+ if isinstance(data, type_):
+ return encoder(data)
+ return data
+
+ def model_dump(self, *args, **kwargs) -> dict:
+ return self.encode(
+ super().model_dump(
+ *args,
+ **kwargs,
+ exclude_none=True,
+ )
+ )
+
+
+class OTelSpanDTO(BaseModel):
+ context: OTelContextDTO
+
+ name: str
+ kind: OTelSpanKind = OTelSpanKind.SPAN_KIND_UNSPECIFIED
+
+ start_time: datetime
+ end_time: datetime
+
+ status_code: OTelStatusCode = OTelStatusCode.STATUS_CODE_UNSET
+ status_message: Optional[str] = None
+
+ attributes: Optional[Attributes] = None
+ events: Optional[List[OTelEventDTO]] = None
+
+ parent: Optional[OTelContextDTO] = None
+ links: Optional[List[OTelLinkDTO]] = None
+
+
+# oss.src.apis.fastapi.observability.models
+
+from typing import List, Optional
+from datetime import datetime
+
+
+class AgentaNodeDTO(SpanDTO):
+ pass
+
+
+class Tree(BaseModel):
+ version: str
+ nodes: List[AgentaNodeDTO]
+
+
+# oss.src.core.blobs.dtos
+
+
+class Blob(Identifier, Lifecycle):
+ flags: Optional[Flags] = None # type: ignore
+ tags: Optional[Tags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+
+ data: Optional[Data] = None # type: ignore
+
+ set_id: Optional[UUID] = None
+
+
+# oss.src.core.testcases.dtos
+# oss.src.core.testsets.dtos
+
+
+class TestsetIdAlias(AliasConfig):
+ testset_id: Optional[UUID] = None
+ set_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="testset_id",
+ )
+
+
+class TestsetVariantIdAlias(AliasConfig):
+ testset_variant_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="testset_variant_id",
+ )
+
+
+class Testcase(Blob, TestsetIdAlias):
+ def model_post_init(self, __context) -> None:
+ sync_alias("testset_id", "set_id", self)
+
+
+class TestsetFlags(BaseModel):
+ has_testcases: Optional[bool] = None
+ has_traces: Optional[bool] = None
+
+
+class TestsetRevisionData(BaseModel):
+ testcase_ids: Optional[List[UUID]] = None
+ testcases: Optional[List[Testcase]] = None
+
+
+class SimpleTestset(
+ Identifier,
+ Slug,
+ Lifecycle,
+ Header,
+):
+ flags: Optional[TestsetFlags] = None
+ tags: Optional[Tags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+
+ data: Optional[TestsetRevisionData] = None
+
+
+class Testset(Artifact):
+ flags: Optional[TestsetFlags] = None # type: ignore
+
+
+class TestsetRevision(
+ Revision,
+ TestsetIdAlias,
+ TestsetVariantIdAlias,
+):
+ flags: Optional[TestsetFlags] = None # type: ignore
+
+ data: Optional[TestsetRevisionData] = None # type: ignore
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("testset_id", "artifact_id", self)
+ sync_alias("testset_variant_id", "variant_id", self)
+
+
+class SimpleTestsetCreate(Slug, Header):
+ tags: Optional[Tags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+ data: Optional[TestsetRevisionData] = None
+
+
+class SimpleTestsetEdit(
+ Identifier,
+ Header,
+):
+ # flags: Optional[TestsetFlags] = None
+ tags: Optional[Tags] = None # type: ignore
+ meta: Optional[Meta] = None # type: ignore
+
+ data: Optional[TestsetRevisionData] = None
+
+
+class TestsetResponse(BaseModel):
+ count: int = 0
+ testset: Optional[Testset] = None
+
+
+class TestsetRevisionResponse(BaseModel):
+ count: int = 0
+ testset_revision: Optional[TestsetRevision] = None
+
+
+class SimpleTestsetResponse(BaseModel):
+ count: int = 0
+ testset: Optional[SimpleTestset] = None
+
+
+# oss.src.core.workflows.dtos
+from typing import Optional, Dict, Any
+from uuid import UUID, uuid4
+from urllib.parse import urlparse
+
+from pydantic import (
+ BaseModel,
+ Field,
+ model_validator,
+ ValidationError,
+)
+
+from jsonschema import (
+ Draft202012Validator,
+ Draft201909Validator,
+ Draft7Validator,
+ Draft4Validator,
+ Draft6Validator,
+)
+from jsonschema.exceptions import SchemaError
+
+# aliases ----------------------------------------------------------------------
+
+
+class WorkflowIdAlias(AliasConfig):
+ workflow_id: Optional[UUID] = None
+ artifact_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_id",
+ )
+
+
+class WorkflowVariantIdAlias(AliasConfig):
+ workflow_variant_id: Optional[UUID] = None
+ variant_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_variant_id",
+ )
+
+
+class WorkflowRevisionIdAlias(AliasConfig):
+ workflow_revision_id: Optional[UUID] = None
+ revision_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_revision_id",
+ )
+
+
+# globals ----------------------------------------------------------------------
+
+
+class WorkflowFlags(BaseModel):
+ is_custom: Optional[bool] = None
+ is_evaluator: Optional[bool] = None
+ is_human: Optional[bool] = None
+
+
+# workflows --------------------------------------------------------------------
+
+
+class Workflow(Artifact):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowCreate(ArtifactCreate):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowEdit(ArtifactEdit):
+ flags: Optional[WorkflowFlags] = None
+
+
+# workflow variants ------------------------------------------------------------
+
+
+class WorkflowVariant(
+ Variant,
+ WorkflowIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+
+
+class WorkflowVariantCreate(
+ VariantCreate,
+ WorkflowIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+
+
+class WorkflowVariantEdit(VariantEdit):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowVariantQuery(VariantQuery):
+ flags: Optional[WorkflowFlags] = None
+
+
+# workflow revisions -----------------------------------------------------------
+
+
+class WorkflowServiceVersion(BaseModel):
+ version: Optional[str] = None
+
+
+class WorkflowServiceInterface(WorkflowServiceVersion):
+ uri: Optional[str] = None # str (Enum) w/ validation
+ url: Optional[str] = None # str w/ validation
+ headers: Optional[Dict[str, Reference | str]] = None # either hardcoded or a secret
+ handler: Optional[Callable] = None
+
+ schemas: Optional[Dict[str, Schema]] = None # json-schema instead of pydantic
+ mappings: Optional[Mappings] = None # used in the workflow interface
+
+
+class WorkflowServiceConfiguration(WorkflowServiceInterface):
+ script: Optional[str] = None # str w/ validation
+ parameters: Optional[Data] = None # configuration values
+
+
+class WorkflowRevisionData(WorkflowServiceConfiguration):
+ # LEGACY FIELDS
+ service: Optional[dict] = None # url, schema, kind, etc
+ configuration: Optional[dict] = None # parameters, variables, etc
+
+ @model_validator(mode="after")
+ def validate_all(self) -> "WorkflowRevisionData":
+ errors = []
+
+ if self.service and self.service.get("agenta") and self.service.get("format"):
+ _format = self.service.get("format") # pylint: disable=redefined-builtin
+
+ try:
+ validator_class = self._get_validator_class_from_schema(_format) # type: ignore
+ validator_class.check_schema(_format) # type: ignore
+ except SchemaError as e:
+ errors.append(
+ {
+ "loc": ("format",),
+ "msg": f"Invalid JSON Schema: {e.message}",
+ "type": "value_error",
+ "ctx": {"error": str(e)},
+ "input": _format,
+ }
+ )
+
+ if self.service and self.service.get("agenta") and self.service.get("url"):
+ url = self.service.get("url")
+
+ if not self._is_valid_http_url(url):
+ errors.append(
+ {
+ "loc": ("url",),
+ "msg": "Invalid HTTP(S) URL",
+ "type": "value_error.url",
+ "ctx": {"error": "Invalid URL format"},
+ "input": url,
+ }
+ )
+
+ if errors:
+ raise ValidationError.from_exception_data(
+ self.__class__.__name__,
+ errors,
+ )
+
+ return self
+
+ @staticmethod
+ def _get_validator_class_from_schema(schema: dict):
+ """Detect JSON Schema draft from $schema or fallback to 2020-12."""
+ schema_uri = schema.get(
+ "$schema", "https://json-schema.org/draft/2020-12/schema"
+ )
+
+ if "2020-12" in schema_uri:
+ return Draft202012Validator
+ elif "2019-09" in schema_uri:
+ return Draft201909Validator
+ elif "draft-07" in schema_uri:
+ return Draft7Validator
+ elif "draft-06" in schema_uri:
+ return Draft6Validator
+ elif "draft-04" in schema_uri:
+ return Draft4Validator
+ else:
+ # fallback default if unknown $schema
+ return Draft202012Validator
+
+ @staticmethod
+ def _is_valid_http_url(url: str) -> bool:
+ parsed = urlparse(url)
+ return parsed.scheme in ("http", "https") and bool(parsed.netloc)
+
+
+class WorkflowRevision(
+ Revision,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ data: Optional[WorkflowRevisionData] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+
+
+class WorkflowRevisionCreate(
+ RevisionCreate,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+
+
+class WorkflowRevisionEdit(RevisionEdit):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowRevisionQuery(RevisionQuery):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowRevisionCommit(
+ RevisionCommit,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+):
+ flags: Optional[WorkflowFlags] = None
+
+ data: Optional[WorkflowRevisionData] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+
+
+class WorkflowRevisionsLog(
+ RevisionsLog,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+ WorkflowRevisionIdAlias,
+):
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+ sync_alias("workflow_revision_id", "revision_id", self)
+
+
+# forks ------------------------------------------------------------------------
+
+
+class WorkflowRevisionFork(RevisionFork):
+ flags: Optional[WorkflowFlags] = None
+
+ data: Optional[WorkflowRevisionData] = None
+
+
+class WorkflowRevisionForkAlias(AliasConfig):
+ workflow_revision: Optional[WorkflowRevisionFork] = None
+
+ revision: Optional[RevisionFork] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_revision",
+ )
+
+
+class WorkflowVariantFork(VariantFork):
+ flags: Optional[WorkflowFlags] = None
+
+
+class WorkflowVariantForkAlias(AliasConfig):
+ workflow_variant: Optional[WorkflowVariantFork] = None
+
+ variant: Optional[VariantFork] = Field(
+ default=None,
+ exclude=True,
+ alias="workflow_variant",
+ )
+
+
+class WorkflowFork(
+ ArtifactFork,
+ WorkflowIdAlias,
+ WorkflowVariantIdAlias,
+ WorkflowVariantForkAlias,
+ WorkflowRevisionIdAlias,
+ WorkflowRevisionForkAlias,
+):
+ def model_post_init(self, __context) -> None:
+ sync_alias("workflow_id", "artifact_id", self)
+ sync_alias("workflow_variant_id", "variant_id", self)
+ sync_alias("workflow_variant", "variant", self)
+ sync_alias("workflow_revision_id", "revision_id", self)
+ sync_alias("workflow_revision", "revision", self)
+
+
+# workflow services ------------------------------------------------------------
+
+
+class WorkflowServiceData(BaseModel):
+ parameters: Optional[Data] = None
+ inputs: Optional[Data] = None
+ outputs: Optional[Data | str] = None
+ #
+ trace_parameters: Optional[Data] = None
+ trace_inputs: Optional[Data] = None
+ trace_outputs: Optional[Data | str] = None
+ #
+ trace: Optional[Trace] = None
+ # LEGACY -- used for workflow execution traces
+ tree: Optional[Tree] = None
+
+
+class WorkflowServiceRequest(Version, Metadata):
+ tags: Optional[Tags] = None
+ meta: Optional[Meta] = None
+
+ data: Optional[WorkflowServiceData] = None
+
+ references: Optional[Dict[str, Reference]] = None
+ links: Optional[Dict[str, Link]] = None
+
+ credentials: Optional[str] = None # Fix typing
+ secrets: Optional[Dict[str, Any]] = None # Fix typing
+
+
+class WorkflowServiceResponse(Identifier, Version):
+ data: Optional[WorkflowServiceData] = None
+
+ links: Optional[Dict[str, Link]] = None
+
+ trace_id: Optional[str] = None
+
+ status: Status = Status()
+
+ def __init__(self, **data):
+ super().__init__(**data)
+
+ self.id = uuid4() if not self.id else self.id
+ self.version = "2025.07.14" if not self.version else self.version
+
+
+class SuccessStatus(Status):
+ code: int = 200
+
+
+class HandlerNotFoundStatus(Status):
+ code: int = 501
+ type: str = "https://docs.agenta.ai/errors#v1:uri:handler-not-found"
+
+ def __init__(self, uri: Optional[str] = None):
+ super().__init__()
+ self.message = f"The handler at '{uri}' is not implemented or not available."
+
+
+class RevisionDataNotFoundStatus(Status):
+ code: int = 404
+ type: str = "https://docs.agenta.ai/errors#v1:uri:revision-data-not-found"
+
+ def __init__(self, uri: Optional[str] = None):
+ super().__init__()
+ self.message = f"The revision data at '{uri}' could not be found."
+
+
+class RequestDataNotFoundStatus(Status):
+ code: int = 404
+ type: str = "https://docs.agenta.ai/errors#v1:uri:request-data-not-found"
+
+ def __init__(self, uri: Optional[str] = None):
+ super().__init__()
+ self.message = f"The request data at '{uri}' could not be found."
+
+
+ERRORS_BASE_URL = "https://docs.agenta.ai/errors"
+
+
+class ErrorStatus(Exception):
+ code: int
+ type: str
+ message: str
+ stacktrace: Optional[str] = None
+
+ def __init__(
+ self,
+ code: int,
+ type: str,
+ message: str,
+ stacktrace: Optional[str] = None,
+ ):
+ super().__init__()
+ self.code = code
+ self.type = type
+ self.message = message
+ self.stacktrace = stacktrace
+
+ def __str__(self):
+ return f"[EVAL] {self.code} - {self.message} ({self.type})" + (
+ f"\nStacktrace: {self.stacktrace}" if self.stacktrace else ""
+ )
+
+ def __repr__(self):
+ return f"ErrorStatus(code={self.code}, type='{self.type}', message='{self.message}')"
+
+
+# ------------------------------------------------------------------------------
+
+
+class EvaluatorRevision(BaseModel):
+ id: Optional[UUID] = None
+ slug: Optional[str] = None
+ version: Optional[str] = None
+
+ data: Optional[WorkflowRevisionData] = None
+
+
+class ApplicationServiceRequest(WorkflowServiceRequest):
+ pass
+
+
+class ApplicationServiceResponse(WorkflowServiceResponse):
+ pass
+
+
+class EvaluatorServiceRequest(WorkflowServiceRequest):
+ pass
+
+
+class EvaluatorServiceResponse(WorkflowServiceResponse):
+ pass
+
+
+# oss.src.core.evaluators.dtos
+
+
+class EvaluatorIdAlias(AliasConfig):
+ evaluator_id: Optional[UUID] = None
+ workflow_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="evaluator_id",
+ )
+
+
+class EvaluatorVariantIdAlias(AliasConfig):
+ evaluator_variant_id: Optional[UUID] = None
+ workflow_variant_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="evaluator_variant_id",
+ )
+
+
+class EvaluatorRevisionData(WorkflowRevisionData):
+ pass
+
+
+class EvaluatorFlags(WorkflowFlags):
+ def __init__(self, **data):
+ data["is_evaluator"] = True
+
+ super().__init__(**data)
+
+
+class SimpleEvaluatorFlags(EvaluatorFlags):
+ pass
+
+
+class SimpleEvaluatorData(EvaluatorRevisionData):
+ pass
+
+
+class Evaluator(Workflow):
+ flags: Optional[EvaluatorFlags] = None
+
+
+class SimpleEvaluatorRevision(
+ WorkflowRevision,
+ EvaluatorIdAlias,
+ EvaluatorVariantIdAlias,
+):
+ flags: Optional[EvaluatorFlags] = None
+
+ data: Optional[EvaluatorRevisionData] = None
+
+
+class SimpleEvaluator(Identifier, Slug, Lifecycle, Header, Metadata):
+ flags: Optional[SimpleEvaluatorFlags] = None
+
+ data: Optional[SimpleEvaluatorData] = None
+
+
+class SimpleEvaluatorCreate(Slug, Header, Metadata):
+ flags: Optional[SimpleEvaluatorFlags] = None
+
+ data: Optional[SimpleEvaluatorData] = None
+
+
+class SimpleEvaluatorEdit(Identifier, Header, Metadata):
+ flags: Optional[SimpleEvaluatorFlags] = None
+
+ data: Optional[SimpleEvaluatorData] = None
+
+
+class SimpleEvaluatorResponse(BaseModel):
+ count: int = 0
+ evaluator: Optional[SimpleEvaluator] = None
+
+
+class EvaluatorRevisionResponse(BaseModel):
+ count: int = 0
+ evaluator_revision: Optional[EvaluatorRevision] = None
+
+
+# oss.src.core.applications.dtos
+
+# aliases ----------------------------------------------------------------------
+
+
+class ApplicationIdAlias(AliasConfig):
+ application_id: Optional[UUID] = None
+ workflow_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="application_id",
+ )
+
+
+class ApplicationVariantIdAlias(AliasConfig):
+ application_variant_id: Optional[UUID] = None
+ workflow_variant_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="application_variant_id",
+ )
+
+
+class ApplicationRevisionIdAlias(AliasConfig):
+ application_revision_id: Optional[UUID] = None
+ workflow_revision_id: Optional[UUID] = Field(
+ default=None,
+ exclude=True,
+ alias="application_revision_id",
+ )
+
+
+# globals ----------------------------------------------------------------------
+
+
+class ApplicationFlags(WorkflowFlags):
+ def __init__(self, **data):
+ data["is_evaluator"] = True
+
+ super().__init__(**data)
+
+
+# applications -------------------------------------------------------------------
+
+
+class Application(Workflow):
+ flags: Optional[ApplicationFlags] = None
+
+
+class ApplicationCreate(WorkflowCreate):
+ flags: Optional[ApplicationFlags] = None
+
+
+class ApplicationEdit(WorkflowEdit):
+ flags: Optional[ApplicationFlags] = None
+
+
+# application variants -----------------------------------------------------------
+
+
+class ApplicationVariant(
+ WorkflowVariant,
+ ApplicationIdAlias,
+):
+ flags: Optional[ApplicationFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("application_id", "workflow_id", self)
+
+
+class ApplicationVariantCreate(
+ WorkflowVariantCreate,
+ ApplicationIdAlias,
+):
+ flags: Optional[ApplicationFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("application_id", "workflow_id", self)
+
+
+class ApplicationVariantEdit(WorkflowVariantEdit):
+ flags: Optional[ApplicationFlags] = None
+
+
+# application revisions -----------------------------------------------------
+
+
+class ApplicationRevisionData(WorkflowRevisionData):
+ pass
+
+
+class ApplicationRevision(
+ WorkflowRevision,
+ ApplicationIdAlias,
+ ApplicationVariantIdAlias,
+):
+ flags: Optional[ApplicationFlags] = None
+
+ data: Optional[ApplicationRevisionData] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("application_id", "workflow_id", self)
+ sync_alias("application_variant_id", "workflow_variant_id", self)
+
+
+class ApplicationRevisionCreate(
+ WorkflowRevisionCreate,
+ ApplicationIdAlias,
+ ApplicationVariantIdAlias,
+):
+ flags: Optional[ApplicationFlags] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("application_id", "workflow_id", self)
+ sync_alias("application_variant_id", "workflow_variant_id", self)
+
+
+class ApplicationRevisionEdit(WorkflowRevisionEdit):
+ flags: Optional[ApplicationFlags] = None
+
+
+class ApplicationRevisionCommit(
+ WorkflowRevisionCommit,
+ ApplicationIdAlias,
+ ApplicationVariantIdAlias,
+):
+ flags: Optional[ApplicationFlags] = None
+
+ data: Optional[ApplicationRevisionData] = None
+
+ def model_post_init(self, __context) -> None:
+ sync_alias("application_id", "workflow_id", self)
+ sync_alias("application_variant_id", "workflow_variant_id", self)
+
+
+class ApplicationRevisionResponse(BaseModel):
+ count: int = 0
+ application_revision: Optional[ApplicationRevision] = None
+
+
+class ApplicationRevisionsResponse(BaseModel):
+ count: int = 0
+ application_revisions: List[ApplicationRevision] = []
+
+
+# simple applications ------------------------------------------------------------
+
+
+class LegacyApplicationFlags(WorkflowFlags):
+ pass
+
+
+class LegacyApplicationData(WorkflowRevisionData):
+ pass
+
+
+class LegacyApplication(Identifier, Slug, Lifecycle, Header, Metadata):
+ flags: Optional[LegacyApplicationFlags] = None
+
+ data: Optional[LegacyApplicationData] = None
+
+
+class LegacyApplicationCreate(Slug, Header, Metadata):
+ flags: Optional[LegacyApplicationFlags] = None
+
+ data: Optional[LegacyApplicationData] = None
+
+
+class LegacyApplicationEdit(Identifier, Header, Metadata):
+ flags: Optional[LegacyApplicationFlags] = None
+
+ data: Optional[LegacyApplicationData] = None
+
+
+class LegacyApplicationResponse(BaseModel):
+ count: int = 0
+ application: Optional[LegacyApplication] = None
+
+
+# end of oss.src.core.applications.dtos
diff --git a/api/ee/tests/manual/evaluations/sdk/entities.py b/api/ee/tests/manual/evaluations/sdk/entities.py
new file mode 100644
index 0000000000..12c714db95
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/entities.py
@@ -0,0 +1,447 @@
+import asyncio
+from typing import List, Dict, Any, Callable, Optional
+from uuid import uuid4, UUID
+
+from definitions import (
+ Testcase,
+ TestsetRevisionData,
+ TestsetRevision,
+ ApplicationRevision,
+ EvaluatorRevision,
+ #
+ SimpleTestsetCreate,
+ SimpleTestsetEdit,
+ #
+ SimpleTestsetResponse,
+ TestsetRevisionResponse,
+ #
+ Evaluator,
+ #
+ SimpleEvaluatorData,
+ SimpleEvaluatorCreate,
+ SimpleEvaluatorEdit,
+ #
+ EvaluatorRevisionData,
+ SimpleEvaluatorResponse,
+ EvaluatorRevisionResponse,
+ #
+ ApplicationRevisionResponse,
+ #
+ LegacyApplicationData,
+ LegacyApplicationCreate,
+ LegacyApplicationEdit,
+ #
+ LegacyApplicationResponse,
+)
+from services import (
+ REGISTRY,
+ register_handler,
+ retrieve_handler,
+)
+
+from client import authed_api
+
+
+client = authed_api()
+
+APPLICATION_REVISION_ID = uuid4()
+APPLICATION_REVISION = ApplicationRevision(
+ id=APPLICATION_REVISION_ID,
+ slug=str(APPLICATION_REVISION_ID)[-12:],
+ version="0",
+)
+
+EVALUATOR_REVISION_ID = uuid4()
+EVALUATOR_REVISION = EvaluatorRevision(
+ id=EVALUATOR_REVISION_ID,
+ slug=str(EVALUATOR_REVISION_ID)[-12:],
+ version="0",
+)
+
+
+async def _retrieve_testset(
+ testset_id: Optional[UUID] = None,
+ testset_revision_id: Optional[UUID] = None,
+) -> Optional[TestsetRevision]:
+ response = client(
+ method="POST",
+ endpoint="/preview/testsets/revisions/retrieve",
+ params={
+ "testset_id": testset_id,
+ "testset_revision_id": testset_revision_id,
+ },
+ )
+
+ response.raise_for_status()
+
+ testset_revision_response = TestsetRevisionResponse(**response.json())
+
+ testset_revision = testset_revision_response.testset_revision
+
+ return testset_revision
+
+
+async def retrieve_testset(
+ testset_revision_id: Optional[UUID] = None,
+) -> Optional[TestsetRevision]:
+ response = await _retrieve_testset(
+ testset_revision_id=testset_revision_id,
+ )
+
+ return response
+
+
+async def upsert_testset(
+ testcases_data: List[Dict[str, Any]],
+ #
+ testset_revision_id: Optional[UUID] = None,
+ #
+ testset_id: Optional[UUID] = None,
+ testset_name: Optional[str] = None,
+ testset_description: Optional[str] = None,
+) -> Optional[UUID]:
+ testset_revision_data = TestsetRevisionData(
+ testcases=[
+ Testcase(
+ data=testcase_data,
+ )
+ for testcase_data in testcases_data
+ ]
+ )
+
+ retrieve_response = None
+
+ if testset_revision_id:
+ retrieve_response = await _retrieve_testset(
+ testset_revision_id=testset_revision_id,
+ )
+ elif testset_id:
+ retrieve_response = await _retrieve_testset(
+ testset_id=testset_id,
+ )
+
+ if retrieve_response and retrieve_response.id:
+ testset_edit_request = SimpleTestsetEdit(
+ id=testset_id,
+ name=testset_name,
+ description=testset_description,
+ data=testset_revision_data,
+ )
+
+ response = client(
+ method="PUT",
+ endpoint=f"/preview/simple/testsets/{testset_id}",
+ json={
+ "testset": testset_edit_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print(f"[ERROR]: Failed to update testset: {e}")
+ return None
+
+ else:
+ testset_create_request = SimpleTestsetCreate(
+ name=testset_name,
+ description=testset_description,
+ slug=uuid4().hex,
+ data=testset_revision_data,
+ )
+
+ response = client(
+ method="POST",
+ endpoint="/preview/simple/testsets/",
+ json={
+ "testset": testset_create_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print(f"[ERROR]: Failed to create testset: {e}")
+ return None
+
+ testset_response = SimpleTestsetResponse(**response.json())
+
+ testset = testset_response.testset
+
+ if not testset or not testset.id:
+ return None
+
+ testset_revision = await _retrieve_testset(
+ testset_id=testset.id,
+ )
+
+ if not testset_revision or not testset_revision.id:
+ return None
+
+ return testset_revision.id
+
+
+async def _retrieve_application(
+ application_id: Optional[UUID] = None,
+ application_revision_id: Optional[UUID] = None,
+) -> Optional[ApplicationRevision]:
+ response = client(
+ method="POST",
+ endpoint=f"/preview/legacy/applications/revisions/retrieve",
+ params={
+ "application_id": application_id,
+ "application_revision_id": application_revision_id,
+ },
+ )
+ response.raise_for_status()
+
+ application_revision_response = ApplicationRevisionResponse(**response.json())
+
+ application_revision = application_revision_response.application_revision
+
+ if not application_revision or not application_revision.id:
+ return None
+
+ if not application_revision.data or not application_revision.data.uri:
+ return None
+
+ application_revision.data.handler = retrieve_handler(application_revision.data.uri)
+
+ return application_revision
+
+
+async def retrieve_application(
+ application_revision_id: Optional[UUID] = None,
+) -> Optional[ApplicationRevision]:
+ response = await _retrieve_application(
+ application_revision_id=application_revision_id,
+ )
+
+ return response
+
+
+async def upsert_application(
+ application_handler: Callable,
+ application_script: Optional[str] = None,
+ application_parameters: Optional[Dict[str, Any]] = None,
+ #
+ application_revision_id: Optional[UUID] = None,
+ #
+ application_id: Optional[UUID] = None,
+ application_name: Optional[str] = None,
+ application_description: Optional[str] = None,
+) -> Optional[UUID]:
+ legacy_application_data = LegacyApplicationData(
+ uri=register_handler(application_handler),
+ script=application_script,
+ parameters=application_parameters,
+ )
+
+ retrieve_response = None
+
+ if application_revision_id:
+ retrieve_response = await _retrieve_application(
+ application_revision_id=application_revision_id,
+ )
+ elif application_id:
+ retrieve_response = await _retrieve_application(
+ application_id=application_id,
+ )
+
+ if retrieve_response and retrieve_response.id:
+ application_edit_request = LegacyApplicationEdit(
+ id=application_id,
+ name=application_name,
+ description=application_description,
+ data=legacy_application_data,
+ )
+
+ response = client(
+ method="PUT",
+ endpoint=f"/preview/legacy/applications/{application_id}",
+ json={
+ "application": application_edit_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print("[ERROR]: Failed to update application:", e)
+ return None
+
+ else:
+ application_create_request = LegacyApplicationCreate(
+ name=application_name,
+ description=application_description,
+ slug=uuid4().hex,
+ data=legacy_application_data,
+ )
+
+ response = client(
+ method="POST",
+ endpoint="/preview/legacy/applications/",
+ json={
+ "application": application_create_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print("[ERROR]: Failed to create application:", e)
+ return None
+
+ application_response = LegacyApplicationResponse(**response.json())
+
+ application = application_response.application
+
+ if not application or not application.id:
+ return None
+
+ application_revision = await _retrieve_application(
+ application_id=application.id,
+ )
+
+ if not application_revision or not application_revision.id:
+ return None
+
+ return application_revision.id
+
+
+async def _retrieve_evaluator(
+ evaluator_id: Optional[UUID] = None,
+ evaluator_revision_id: Optional[UUID] = None,
+) -> Optional[EvaluatorRevision]:
+ response = client(
+ method="POST",
+ endpoint=f"/preview/evaluators/revisions/retrieve",
+ params={
+ "evaluator_id": evaluator_id,
+ "evaluator_revision_id": evaluator_revision_id,
+ },
+ )
+ response.raise_for_status()
+
+ evaluator_revision_response = EvaluatorRevisionResponse(**response.json())
+
+ evaluator_revision = evaluator_revision_response.evaluator_revision
+
+ return evaluator_revision
+
+
+async def retrieve_evaluator(
+ evaluator_revision_id: Optional[UUID] = None,
+) -> Optional[EvaluatorRevision]:
+ response = await _retrieve_evaluator(
+ evaluator_revision_id=evaluator_revision_id,
+ )
+
+ return response
+
+
+async def upsert_evaluator(
+ evaluator_handler: Callable,
+ evaluator_script: Optional[str] = None,
+ evaluator_parameters: Optional[Dict[str, Any]] = None,
+ #
+ evaluator_revision_id: Optional[UUID] = None,
+ #
+ evaluator_id: Optional[UUID] = None,
+ evaluator_name: Optional[str] = None,
+ evaluator_description: Optional[str] = None,
+) -> Optional[UUID]:
+ simple_evaluator_data = SimpleEvaluatorData(
+ uri=register_handler(evaluator_handler),
+ script=evaluator_script,
+ parameters=evaluator_parameters,
+ )
+
+ retrieve_response = None
+
+ if evaluator_revision_id:
+ retrieve_response = await _retrieve_evaluator(
+ evaluator_revision_id=evaluator_revision_id,
+ )
+ elif evaluator_id:
+ retrieve_response = await _retrieve_evaluator(
+ evaluator_id=evaluator_id,
+ )
+
+ if retrieve_response and retrieve_response.id:
+ evaluator_edit_request = SimpleEvaluatorEdit(
+ id=evaluator_id,
+ name=evaluator_name,
+ description=evaluator_description,
+ data=simple_evaluator_data,
+ )
+
+ response = client(
+ method="PUT",
+ endpoint=f"/preview/simple/evaluators/{evaluator_id}",
+ json={
+ "evaluator": evaluator_edit_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print("[ERROR]: Failed to update evaluator:", e)
+ return None
+
+ else:
+ evaluator_create_request = SimpleEvaluatorCreate(
+ name=evaluator_name,
+ description=evaluator_description,
+ slug=uuid4().hex,
+ data=simple_evaluator_data,
+ )
+
+ response = client(
+ method="POST",
+ endpoint="/preview/simple/evaluators/",
+ json={
+ "evaluator": evaluator_create_request.model_dump(
+ mode="json",
+ exclude_none=True,
+ )
+ },
+ )
+
+ try:
+ response.raise_for_status()
+ except Exception as e:
+ print("[ERROR]: Failed to create evaluator:", e)
+ return None
+
+ evaluator_response = SimpleEvaluatorResponse(**response.json())
+
+ evaluator = evaluator_response.evaluator
+
+ if not evaluator or not evaluator.id:
+ return None
+
+ evaluator_revision = await _retrieve_evaluator(
+ evaluator_id=evaluator.id,
+ )
+
+ if not evaluator_revision or not evaluator_revision.id:
+ return None
+
+ return evaluator_revision.id
diff --git a/api/ee/tests/manual/evaluations/sdk/evaluate.py b/api/ee/tests/manual/evaluations/sdk/evaluate.py
new file mode 100644
index 0000000000..e312474144
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/evaluate.py
@@ -0,0 +1,340 @@
+from typing import Dict, List
+from uuid import UUID
+from copy import deepcopy
+
+from definitions import (
+ Origin,
+ Link,
+ Reference,
+ SimpleEvaluationFlags,
+ SimpleEvaluationStatus,
+ SimpleEvaluationData,
+ TestsetRevision,
+ ApplicationRevision,
+ EvaluatorRevision,
+ WorkflowServiceData,
+ ApplicationServiceRequest,
+ ApplicationServiceResponse,
+ EvaluatorServiceRequest,
+ EvaluatorServiceResponse,
+)
+from evaluations import (
+ create_run,
+ add_scenario,
+ log_result,
+ compute_metrics,
+ get_slug_from_name_and_id,
+)
+
+# from mock_entities import (
+# upsert_testset,
+# retrieve_testset,
+# upsert_application,
+# retrieve_application,
+# upsert_evaluator,
+# retrieve_evaluator,
+# )
+
+from entities import (
+ upsert_testset,
+ retrieve_testset,
+ upsert_application,
+ retrieve_application,
+ upsert_evaluator,
+ retrieve_evaluator,
+)
+
+from services import (
+ invoke_application,
+ invoke_evaluator,
+)
+
+EvaluateSpecs = SimpleEvaluationData
+
+
+# @debug
+async def evaluate(
+ data: SimpleEvaluationData,
+):
+ data = deepcopy(data)
+
+ if data.testset_steps:
+ if isinstance(data.testset_steps, list):
+ testset_steps: Dict[str, Origin] = {}
+
+ if all(
+ isinstance(testset_revision_id, UUID)
+ for testset_revision_id in data.testset_steps
+ ):
+ for testset_revision_id in data.testset_steps:
+ if isinstance(testset_revision_id, UUID):
+ testset_steps[str(testset_revision_id)] = "custom"
+
+ elif all(
+ isinstance(testcases_data, List)
+ for testcases_data in data.testset_steps
+ ):
+ for testcases_data in data.testset_steps:
+ if isinstance(testcases_data, List):
+ if all(isinstance(step, Dict) for step in testcases_data):
+ testset_revision_id = await upsert_testset(
+ testcases_data=testcases_data,
+ )
+ testset_steps[str(testset_revision_id)] = "custom"
+
+ data.testset_steps = testset_steps
+
+ if not data.testset_steps or not isinstance(data.testset_steps, dict):
+ print("[failure] missing or invalid testset steps")
+ return None
+
+ if data.application_steps:
+ if isinstance(data.application_steps, list):
+ application_steps: Dict[str, Origin] = {}
+
+ if all(
+ isinstance(application_revision_id, UUID)
+ for application_revision_id in data.application_steps
+ ):
+ for application_revision_id in data.application_steps:
+ if isinstance(application_revision_id, UUID):
+ application_steps[str(application_revision_id)] = "custom"
+
+ elif all(
+ callable(application_handler)
+ for application_handler in data.application_steps
+ ):
+ for application_handler in data.application_steps:
+ if callable(application_handler):
+ application_revision_id = await upsert_application(
+ application_handler=application_handler,
+ )
+ application_steps[str(application_revision_id)] = "custom"
+
+ data.application_steps = application_steps
+
+ if not data.application_steps or not isinstance(data.application_steps, dict):
+ print("[failure] missing or invalid application steps")
+ return None
+
+ if data.evaluator_steps:
+ if isinstance(data.evaluator_steps, list):
+ evaluator_steps: Dict[str, Origin] = {}
+
+ if all(
+ isinstance(evaluator_revision_id, UUID)
+ for evaluator_revision_id in data.evaluator_steps
+ ):
+ for evaluator_revision_id in data.evaluator_steps:
+ if isinstance(evaluator_revision_id, UUID):
+ evaluator_steps[str(evaluator_revision_id)] = "custom"
+
+ elif all(
+ callable(evaluator_handler)
+ for evaluator_handler in data.evaluator_steps
+ ):
+ for evaluator_handler in data.evaluator_steps:
+ if callable(evaluator_handler):
+ evaluator_revision_id = await upsert_evaluator(
+ evaluator_handler=evaluator_handler,
+ )
+ evaluator_steps[str(evaluator_revision_id)] = "custom"
+
+ data.evaluator_steps = evaluator_steps
+
+ if not data.evaluator_steps or not isinstance(data.evaluator_steps, dict):
+ print("[failure] missing or invalid evaluator steps")
+ return None
+
+ testsets: Dict[UUID, TestsetRevision] = {}
+ for testset_revision_id, origin in data.testset_steps.items():
+ testset_revision = await retrieve_testset(
+ testset_revision_id=testset_revision_id,
+ )
+
+ if not testset_revision:
+ continue
+
+ testsets[testset_revision_id] = testset_revision
+
+ applications: Dict[UUID, ApplicationRevision] = {}
+ for application_revision_id, origin in data.application_steps.items():
+ application_revision = await retrieve_application(
+ application_revision_id=application_revision_id,
+ )
+
+ if not application_revision:
+ continue
+
+ applications[application_revision_id] = application_revision
+
+ evaluators: Dict[UUID, EvaluatorRevision] = {}
+ for evaluator_revision_id, origin in data.evaluator_steps.items():
+ evaluator_revision = await retrieve_evaluator(
+ evaluator_revision_id=evaluator_revision_id,
+ )
+
+ if not evaluator_revision:
+ continue
+
+ evaluators[evaluator_revision_id] = evaluator_revision
+
+ run = await create_run(
+ testset_steps=data.testset_steps,
+ application_steps=data.application_steps,
+ evaluator_steps=data.evaluator_steps,
+ )
+
+ if not run.id:
+ print("[failure] could not create evaluation")
+ return None
+
+ scenarios = list()
+
+ for testset_revision_id, testset_revision in testsets.items():
+ if not testset_revision.data or not testset_revision.data.testcases:
+ continue
+
+ testcases = testset_revision.data.testcases
+
+ print()
+ print(f"From testset_id={str(testset_revision.testset_id)}")
+
+ for testcase in testcases:
+ print(f"Evaluating testcase_id={str(testcase.id)}")
+ scenario = await add_scenario(
+ run_id=run.id,
+ )
+
+ results = dict()
+
+ result = await log_result(
+ run_id=run.id,
+ scenario_id=scenario.id,
+ step_key="testset-" + testset_revision.slug, # type: ignore
+ testcase_id=testcase.id,
+ )
+
+ results[testset_revision.slug] = result
+
+ for application_revision_id, application_revision in applications.items():
+ if not application_revision or not application_revision.data:
+ print("Missing or invalid application revision")
+ continue
+
+ application_request = ApplicationServiceRequest(
+ data=WorkflowServiceData(
+ parameters=application_revision.data.parameters,
+ inputs=testcase.data,
+ ),
+ references=dict(
+ testset_revision=Reference(
+ id=testset_revision.id,
+ slug=testset_revision.slug,
+ version=testset_revision.version,
+ ),
+ application_revision=Reference(
+ id=application_revision.id,
+ slug=application_revision.slug,
+ version=application_revision.version,
+ ),
+ ),
+ )
+
+ application_response = await invoke_application(
+ request=application_request,
+ revision=application_revision,
+ )
+
+ if (
+ not application_response
+ or not application_response.data
+ or not application_response.trace_id
+ ):
+ print("Missing or invalid application response")
+ continue
+
+ trace_id = application_response.trace_id
+
+ if not application_revision.id or not application_revision.name:
+ print("Missing application revision ID or name")
+ continue
+
+ application_slug = get_slug_from_name_and_id(
+ name=application_revision.name,
+ id=application_revision.id,
+ )
+
+ result = await log_result(
+ run_id=run.id,
+ scenario_id=scenario.id,
+ step_key="application-" + application_slug, # type: ignore
+ trace_id=trace_id,
+ )
+
+ results[application_slug] = result
+
+ for evaluator_revision_id, evaluator_revision in evaluators.items():
+ if not evaluator_revision or not evaluator_revision.data:
+ print("Missing or invalid evaluator revision")
+ continue
+
+ evaluator_request = EvaluatorServiceRequest(
+ data=WorkflowServiceData(
+ parameters=evaluator_revision.data.parameters,
+ inputs=testcase.data,
+ #
+ trace_outputs=application_response.data.outputs,
+ trace=application_response.data.trace,
+ ),
+ references=dict(
+ testset_revision=Reference(
+ id=testset_revision.id,
+ slug=testset_revision.slug,
+ version=testset_revision.version,
+ ),
+ evaluator_revision=Reference(
+ id=evaluator_revision.id,
+ slug=evaluator_revision.slug,
+ version=evaluator_revision.version,
+ ),
+ ),
+ links=application_response.links,
+ )
+
+ evaluator_response = await invoke_evaluator(
+ request=evaluator_request,
+ revision=evaluator_revision,
+ )
+
+ if not evaluator_response or not evaluator_response.data:
+ print("Missing or invalid evaluator response")
+ continue
+
+ trace_id = evaluator_response.trace_id
+
+ result = await log_result(
+ run_id=run.id,
+ scenario_id=scenario.id,
+ step_key="evaluator-" + evaluator_revision.slug, # type: ignore
+ trace_id=trace_id,
+ )
+
+ results[evaluator_revision.slug] = result
+
+ scenarios.append(
+ {
+ "scenario": scenario,
+ "results": results,
+ },
+ )
+
+ metrics = await compute_metrics(
+ run_id=run.id,
+ )
+
+ return dict(
+ run=run,
+ scenarios=scenarios,
+ metrics=metrics,
+ )
diff --git a/api/ee/tests/manual/evaluations/sdk/evaluations.py b/api/ee/tests/manual/evaluations/sdk/evaluations.py
new file mode 100644
index 0000000000..70720dc583
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/evaluations.py
@@ -0,0 +1,208 @@
+from typing import Optional, Dict, Any
+from uuid import uuid4, UUID
+
+import unicodedata
+import re
+
+from definitions import (
+ EvaluationRun,
+ EvaluationScenario,
+ EvaluationResult,
+ EvaluationMetrics,
+ Origin,
+ Target,
+)
+
+from client import authed_api
+
+
+client = authed_api()
+
+
+async def create_run(
+ *,
+ flags: Optional[Dict[str, Any]] = None,
+ tags: Optional[Dict[str, Any]] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ #
+ query_steps: Optional[Target] = None,
+ testset_steps: Optional[Target] = None,
+ application_steps: Optional[Target] = None,
+ evaluator_steps: Optional[Target] = None,
+ repeats: Optional[int] = None,
+) -> EvaluationRun:
+ payload = dict(
+ evaluation=dict(
+ flags=flags,
+ tags=tags,
+ meta=meta,
+ #
+ data=dict(
+ status="running",
+ query_steps=query_steps,
+ testset_steps=testset_steps,
+ application_steps=application_steps,
+ evaluator_steps=evaluator_steps,
+ repeats=repeats,
+ ),
+ )
+ )
+
+ response = client(
+ method="POST",
+ endpoint=f"/preview/simple/evaluations/",
+ json=payload,
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ run = EvaluationRun(id=UUID(response["evaluation"]["id"]))
+
+ return run
+
+
+async def add_scenario(
+ *,
+ flags: Optional[Dict[str, Any]] = None,
+ tags: Optional[Dict[str, Any]] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ #
+ run_id: UUID,
+) -> EvaluationScenario:
+ payload = dict(
+ scenarios=[
+ dict(
+ flags=flags,
+ tags=tags,
+ meta=meta,
+ #
+ run_id=str(run_id),
+ )
+ ]
+ )
+
+ response = client(
+ method="POST",
+ endpoint=f"/preview/evaluations/scenarios/",
+ json=payload,
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ scenario = EvaluationScenario(**response["scenarios"][0])
+
+ return scenario
+
+
+async def log_result(
+ *,
+ flags: Optional[Dict[str, Any]] = None,
+ tags: Optional[Dict[str, Any]] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ #
+ testcase_id: Optional[UUID] = None,
+ trace_id: Optional[str] = None,
+ error: Optional[dict] = None,
+ #
+ # timestamp: datetime,
+ # repeat_idx: str,
+ step_key: str,
+ run_id: UUID,
+ scenario_id: UUID,
+) -> EvaluationResult:
+ payload = dict(
+ results=[
+ dict(
+ flags=flags,
+ tags=tags,
+ meta=meta,
+ #
+ testcase_id=str(testcase_id) if testcase_id else None,
+ trace_id=trace_id,
+ error=error,
+ #
+ # interval=interval,
+ # timestamp=timestamp,
+ # repeat_idx=repeat_idx,
+ step_key=step_key,
+ run_id=str(run_id),
+ scenario_id=str(scenario_id),
+ )
+ ]
+ )
+
+ response = client(
+ method="POST",
+ endpoint=f"/preview/evaluations/results/",
+ json=payload,
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ result = EvaluationResult(**response["results"][0])
+
+ return result
+
+
+async def compute_metrics(
+ run_id: UUID,
+) -> EvaluationMetrics:
+ payload = dict(
+ run_id=str(run_id),
+ )
+
+ response = client(
+ method="POST",
+ endpoint=f"/preview/evaluations/metrics/refresh",
+ params=payload,
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ metrics = EvaluationMetrics(**response["metrics"][0])
+
+ return metrics
+
+
+def get_slug_from_name_and_id(
+ name: str,
+ id: UUID, # pylint: disable=redefined-builtin
+) -> str:
+ # Normalize Unicode (e.g., é → e)
+ name = unicodedata.normalize("NFKD", name)
+ # Remove non-ASCII characters
+ name = name.encode("ascii", "ignore").decode("ascii")
+ # Lowercase and remove non-word characters except hyphens and spaces
+ name = re.sub(r"[^\w\s-]", "", name.lower())
+ # Replace any sequence of hyphens or whitespace with a single hyphen
+ name = re.sub(r"[-\s]+", "-", name)
+ # Trim leading/trailing hyphens
+ name = name.strip("-")
+ # Last 12 characters of the ID
+ slug = f"{name}-{id.hex[-12:]}"
+
+ return slug.lower()
diff --git a/api/ee/tests/manual/evaluations/sdk/loop.py b/api/ee/tests/manual/evaluations/sdk/loop.py
new file mode 100644
index 0000000000..9e166b5fde
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/loop.py
@@ -0,0 +1,97 @@
+import asyncio
+import random
+import json
+
+from evaluate import (
+ evaluate,
+ EvaluateSpecs,
+)
+from definitions import (
+ ApplicationRevision,
+ ApplicationServiceRequest,
+ EvaluatorRevision,
+ EvaluatorServiceRequest,
+)
+
+
+dataset = [
+ {"country": "Germany", "capital": "Berlin"},
+ {"country": "France", "capital": "Paris"},
+ {"country": "Spain", "capital": "Madrid"},
+ {"country": "Italy", "capital": "Rome"},
+]
+
+
+async def my_application(
+ revision: ApplicationRevision,
+ request: ApplicationServiceRequest,
+ **kwargs,
+):
+ inputs: dict = request.data.inputs # type:ignore
+ chance = random.choice([True, False])
+ outputs = {
+ "capital": (inputs.get("capital") if chance else "Aloha"),
+ }
+
+ return outputs
+
+
+async def my_evaluator(
+ revision: EvaluatorRevision,
+ request: EvaluatorServiceRequest,
+ **kwargs,
+):
+ inputs: dict = request.data.inputs # type:ignore
+ trace_outputs: dict = request.data.trace_outputs # type:ignore
+ outputs = {
+ "success": trace_outputs.get("capital") == inputs.get("capital"),
+ }
+
+ return outputs
+
+
+async def run_evaluation():
+ specs = EvaluateSpecs(
+ testset_steps=[dataset],
+ application_steps=[my_application],
+ evaluator_steps=[my_evaluator],
+ )
+
+ eval = await evaluate(specs)
+
+ return eval
+
+
+# export AGENTA_API_URL=http://localhost/api
+# export AGENTA_API_KEY=xxxxxxxx
+
+if __name__ == "__main__":
+ eval = asyncio.run(run_evaluation())
+
+ if not eval:
+ exit(1)
+
+ print()
+ print("Displaying evaluation")
+ print(f"run_id={eval['run'].id}") # type:ignore
+
+ for scenario in eval["scenarios"]:
+ print(" " f"scenario_id={scenario['scenario'].id}") # type:ignore
+ for step_key, result in scenario["results"].items(): # type:ignore
+ if result.testcase_id:
+ print(
+ " "
+ f"step_key={str(step_key).ljust(32)}, testcase_id={result.testcase_id}",
+ )
+ elif result.trace_id:
+ print(
+ " "
+ f"step_key={str(step_key).ljust(32)}, trace_id={result.trace_id}",
+ )
+ else:
+ print(
+ " "
+ f"step_key={str(step_key).ljust(32)}, error={result.error}",
+ )
+
+ print(f"metrics={json.dumps(eval['metrics'].data, indent=4)}") # type:ignore
diff --git a/api/ee/tests/manual/evaluations/sdk/mock_entities.py b/api/ee/tests/manual/evaluations/sdk/mock_entities.py
new file mode 100644
index 0000000000..8d1d9e5ab4
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/mock_entities.py
@@ -0,0 +1,90 @@
+from typing import List, Dict, Any, Callable
+from uuid import uuid4, UUID
+
+from definitions import (
+ Testcase,
+ TestsetRevisionData,
+ TestsetRevision,
+ ApplicationRevision,
+ ApplicationRevisionData,
+ EvaluatorRevision,
+ WorkflowRevisionData,
+)
+
+from services import register_handler
+
+TESTSET_REVISION_ID = uuid4()
+TESTSET_REVISION = TestsetRevision(
+ id=TESTSET_REVISION_ID,
+ slug=str(TESTSET_REVISION_ID)[-12:],
+ data=TestsetRevisionData(
+ testcases=[
+ Testcase(
+ id=uuid4(),
+ data={"country": "Germany", "capital": "Berlin"},
+ ),
+ Testcase(
+ id=uuid4(),
+ data={"country": "France", "capital": "Paris"},
+ ),
+ ]
+ ),
+)
+
+APPLICATION_REVISION_ID = uuid4()
+APPLICATION_REVISION = ApplicationRevision(
+ id=APPLICATION_REVISION_ID,
+ slug=str(APPLICATION_REVISION_ID)[-12:],
+ version="0",
+ data=ApplicationRevisionData(),
+)
+
+EVALUATOR_REVISION_ID = uuid4()
+EVALUATOR_REVISION = EvaluatorRevision(
+ id=EVALUATOR_REVISION_ID,
+ slug=str(EVALUATOR_REVISION_ID)[-12:],
+ version="0",
+ data=WorkflowRevisionData(),
+)
+
+MOCK_URI = None
+
+
+async def upsert_testset(
+ testcases_data: List[Dict[str, Any]],
+) -> UUID:
+ return TESTSET_REVISION_ID
+
+
+async def retrieve_testset(
+ testset_revision_id: UUID,
+) -> TestsetRevision:
+ return TESTSET_REVISION
+
+
+async def upsert_application(
+ application_handler: Callable,
+) -> UUID:
+ global MOCK_URI
+ MOCK_URI = register_handler(application_handler)
+ return APPLICATION_REVISION_ID
+
+
+async def retrieve_application(
+ application_revision_id: UUID,
+) -> ApplicationRevision:
+ application_revision = APPLICATION_REVISION
+ application_revision.data.uri = MOCK_URI
+ return application_revision
+
+
+async def upsert_evaluator(
+ evaluator_handler: Callable,
+) -> UUID:
+ return EVALUATOR_REVISION_ID
+
+
+async def retrieve_evaluator(
+ evaluator_revision_id: UUID,
+) -> EvaluatorRevision:
+ return EVALUATOR_REVISION
diff --git a/api/ee/tests/manual/evaluations/sdk/services.py b/api/ee/tests/manual/evaluations/sdk/services.py
new file mode 100644
index 0000000000..fee8836401
--- /dev/null
+++ b/api/ee/tests/manual/evaluations/sdk/services.py
@@ -0,0 +1,375 @@
+from typing import Callable, Dict, Optional, Any
+from uuid import uuid4, UUID
+
+from definitions import (
+ Status,
+ WorkflowServiceData,
+ ApplicationRevision,
+ ApplicationServiceRequest,
+ ApplicationServiceResponse,
+ EvaluatorRevision,
+ EvaluatorServiceRequest,
+ EvaluatorServiceResponse,
+ SuccessStatus,
+ HandlerNotFoundStatus,
+ ErrorStatus,
+ RevisionDataNotFoundStatus,
+ RequestDataNotFoundStatus,
+ Link,
+)
+
+from client import authed_api
+
+
+client = authed_api()
+
+
+REGISTRY: Dict[str, Dict[str, Dict[str, Dict[str, Callable]]]] = dict(
+ user=dict(
+ custom=dict(),
+ ),
+)
+
+
+def register_handler(fn: Callable) -> str:
+ global REGISTRY
+
+ key = f"{fn.__module__}.{fn.__name__}"
+
+ if not REGISTRY["user"]["custom"].get(key):
+ REGISTRY["user"]["custom"][key] = dict()
+
+ REGISTRY["user"]["custom"][key]["latest"] = fn
+
+ uri = f"user:custom:{key}:latest"
+
+ return uri
+
+
+def retrieve_handler(uri: Optional[str] = None) -> Optional[Callable]:
+ if not uri:
+ return None
+
+ parts = uri.split(":")
+
+ return REGISTRY[parts[0]][parts[1]].get(parts[2], {}).get(parts[3], None)
+
+
+async def invoke_application(
+ *,
+ request: ApplicationServiceRequest,
+ revision: ApplicationRevision,
+) -> ApplicationServiceResponse:
+ try:
+ if not revision.data:
+ return ApplicationServiceResponse(
+ status=RevisionDataNotFoundStatus(),
+ )
+
+ if not request.data:
+ return ApplicationServiceResponse(
+ status=RequestDataNotFoundStatus(),
+ )
+
+ handler = retrieve_handler(revision.data.uri)
+
+ if not handler:
+ return ApplicationServiceResponse(
+ status=HandlerNotFoundStatus(
+ uri=revision.data.uri,
+ ),
+ )
+
+ outputs = await handler(
+ revision=revision,
+ request=request,
+ #
+ parameters=revision.data.parameters,
+ inputs=request.data.inputs,
+ #
+ trace_parameters=request.data.trace_parameters,
+ trace_inputs=request.data.trace_inputs,
+ trace_outputs=request.data.trace_outputs,
+ #
+ trace=request.data.trace,
+ tree=request.data.tree,
+ )
+
+ data = dict(
+ parameters=revision.data.parameters,
+ inputs=request.data.inputs,
+ outputs=outputs,
+ )
+
+ references = (
+ {
+ k: ref.model_dump(
+ mode="json",
+ )
+ for k, ref in request.references.items()
+ }
+ if request.references
+ else None
+ )
+
+ links = (
+ {
+ k: ref.model_dump(
+ mode="json",
+ )
+ for k, ref in request.links.items()
+ }
+ if request.links
+ else None
+ )
+
+ link = None
+
+ try:
+ link = await _invocations_create(
+ tags=request.tags,
+ meta=request.meta,
+ #
+ data=data,
+ #
+ references=references,
+ links=links,
+ )
+ except Exception as ex:
+ print(ex)
+
+ response = ApplicationServiceResponse(
+ status=SuccessStatus(message=""),
+ data=WorkflowServiceData(
+ outputs=outputs,
+ ),
+ trace_id=link.trace_id if link else None,
+ links=({revision.slug or uuid4().hex: link} if link else {}),
+ )
+
+ return response
+
+ except ErrorStatus as error:
+ return ApplicationServiceResponse(
+ status=Status(
+ code=error.code,
+ type=error.type,
+ message=error.message,
+ stacktrace=error.stacktrace,
+ ),
+ )
+
+ except Exception as ex:
+ return ApplicationServiceResponse(
+ status=Status(
+ code=500,
+ message=str(ex),
+ ),
+ )
+
+
+async def invoke_evaluator(
+ *,
+ request: EvaluatorServiceRequest,
+ revision: EvaluatorRevision,
+) -> EvaluatorServiceResponse:
+ try:
+ if not revision.data:
+ return EvaluatorServiceResponse(
+ status=RevisionDataNotFoundStatus(),
+ )
+
+ if not request.data:
+ return EvaluatorServiceResponse(
+ status=RequestDataNotFoundStatus(),
+ )
+
+ handler = retrieve_handler(revision.data.uri)
+
+ if not handler:
+ return EvaluatorServiceResponse(
+ status=HandlerNotFoundStatus(
+ uri=revision.data.uri,
+ ),
+ )
+
+ outputs = await handler(
+ revision=revision,
+ request=request,
+ #
+ parameters=revision.data.parameters,
+ inputs=request.data.inputs,
+ #
+ trace_parameters=request.data.trace_parameters,
+ trace_inputs=request.data.trace_inputs,
+ trace_outputs=request.data.trace_outputs,
+ #
+ trace=request.data.trace,
+ tree=request.data.tree,
+ )
+
+ data = dict(
+ parameters=revision.data.parameters,
+ inputs=request.data.inputs,
+ outputs=outputs,
+ )
+
+ references = (
+ {
+ k: ref.model_dump(
+ mode="json",
+ )
+ for k, ref in request.references.items()
+ }
+ if request.references
+ else None
+ )
+
+ links = (
+ {
+ k: ref.model_dump(
+ mode="json",
+ )
+ for k, ref in request.links.items()
+ }
+ if request.links
+ else None
+ )
+
+ link = None
+
+ try:
+ link = await _annotations_create(
+ tags=request.tags,
+ meta=request.meta,
+ #
+ data=data,
+ #
+ references=references,
+ links=links,
+ )
+ except Exception as ex:
+ print(ex)
+
+ response = EvaluatorServiceResponse(
+ status=SuccessStatus(message=""),
+ data=WorkflowServiceData(
+ outputs=outputs,
+ ),
+ trace_id=link.trace_id if link else None,
+ links=({revision.slug or uuid4().hex: link} if link else {}),
+ )
+
+ return response
+
+ except ErrorStatus as error:
+ return EvaluatorServiceResponse(
+ status=Status(
+ code=error.code,
+ type=error.type,
+ message=error.message,
+ stacktrace=error.stacktrace,
+ ),
+ )
+
+ except Exception as ex:
+ return EvaluatorServiceResponse(
+ status=Status(
+ code=500,
+ message=str(ex),
+ ),
+ )
+
+
+async def _invocations_create(
+ tags: Optional[Dict[str, Any]] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ data: Optional[Dict[str, Any]] = None,
+ references: Optional[Dict[str, Any]] = None,
+ links: Optional[Dict[str, Any]] = None,
+) -> Optional[Link]:
+ response = client(
+ method="POST",
+ endpoint=f"/preview/invocations/",
+ json=dict(
+ invocation=dict(
+ origin="custom",
+ kind="eval",
+ channel="api",
+ data=data,
+ tags=tags,
+ meta=meta,
+ references=references,
+ links=links,
+ )
+ ),
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ trace_id = response.get("invocation", {}).get("trace_id", None)
+ span_id = response.get("invocation", {}).get("span_id", None)
+
+ link = (
+ Link(
+ trace_id=trace_id,
+ span_id=span_id,
+ )
+ if trace_id and span_id
+ else None
+ )
+
+ return link
+
+
+async def _annotations_create(
+ tags: Optional[Dict[str, Any]] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ data: Optional[Dict[str, Any]] = None,
+ references: Optional[Dict[str, Any]] = None,
+ links: Optional[Dict[str, Any]] = None,
+) -> Optional[Link]:
+ response = client(
+ method="POST",
+ endpoint=f"/preview/annotations/",
+ json=dict(
+ annotation=dict(
+ origin="custom",
+ kind="eval",
+ channel="api",
+ data=data,
+ tags=tags,
+ meta=meta,
+ references=references,
+ links=links,
+ )
+ ),
+ )
+
+ try:
+ response.raise_for_status()
+ except:
+ print(response.text)
+ raise
+
+ response = response.json()
+
+ trace_id = response.get("annotation", {}).get("trace_id", None)
+ span_id = response.get("annotation", {}).get("span_id", None)
+
+ link = (
+ Link(
+ trace_id=trace_id,
+ span_id=span_id,
+ )
+ if trace_id and span_id
+ else None
+ )
+
+ return link
diff --git a/api/ee/tests/manual/evaluators/human-evaluator.http b/api/ee/tests/manual/evaluators/human-evaluator.http
new file mode 100644
index 0000000000..8c02962cf8
--- /dev/null
+++ b/api/ee/tests/manual/evaluators/human-evaluator.http
@@ -0,0 +1,73 @@
+
+@host = http://localhost
+@base_url = {{host}}/api/human-evaluators
+@api_key = xxxxxx.xxxxxxxxxxxxxxxx
+###
+
+# @name add_human_evaluator
+POST {{base_url}}/
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+{
+ "slug": "my-human-evaluator",
+ "header": {"name": "a/b accuracy", "description": "this is a description"},
+ "revision": {
+ "kind": "HUMAN_EVALUATOR",
+ "body": {
+ "data": {"metrics": ["accuracy"], "notes": "Evaluator for accuracy"},
+ "tags": ["human", "evaluation"]
+ },
+ "commit": {
+ "parent_id": null,
+ "message": "Initial commit",
+ "author": "01964312-ad5a-7bb1-b21e-4f055c9f988b",
+ "date": "2025-04-18T12:25:59.609Z"
+ }
+ }
+}
+
+###
+
+# @name fetch_human_evaluator
+POST {{base_url}}/{{add_human_evaluator.response.body.variant_ref.id}}
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name edit_human_evaluator
+PUT {{base_url}}/{{add_human_evaluator.response.body.variant_ref.id}}
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+{
+ "slug": "my-human-evaluator-updated-another-another",
+ "body": {
+ "data": {"metrics": ["accuracy"], "notes": "Evaluator for accuracy"},
+ "tags": ["human", "evaluation"]
+ },
+ "commit": {
+ "parent_id": null,
+ "message": "Second commit",
+ "author": "01964312-ad5a-7bb1-b21e-4f055c9f988b",
+ "date": "2025-04-18T13:10:55.658Z"
+ }
+}
+
+###
+
+# @name query_human_evaluators
+GET {{base_url}}/query?revision_id={{add_human_evaluator.response.body.id}}&depth=1
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
+# @name delete_human_evaluator
+DELETE {{base_url}}/{{add_human_evaluator.response.body.variant_ref.id}}
+Content-Type: application/json
+Authorization: ApiKey {{api_key}}
+
+###
+
diff --git a/api/ee/tests/pytest/__init__.py b/api/ee/tests/pytest/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/ee/tests/requirements.txt b/api/ee/tests/requirements.txt
new file mode 100644
index 0000000000..510e3b3b6f
--- /dev/null
+++ b/api/ee/tests/requirements.txt
@@ -0,0 +1 @@
+-r ../../oss/tests/requirements.txt
\ No newline at end of file
diff --git a/api/oss/tests/manual/tracing/windowing.http b/api/oss/tests/manual/tracing/windowing.http
index 5956e7d6a2..cad4ae83ad 100644
--- a/api/oss/tests/manual/tracing/windowing.http
+++ b/api/oss/tests/manual/tracing/windowing.http
@@ -1,6 +1,6 @@
@host = http://localhost
@base_url = {{host}}/api/preview/tracing
-@api_key = UGZaImq8.a94d2c99eab827b1cd27678358016a61f2e92c2cdea8f33b1cf3cc2afb7065e8
+@api_key = ...
###
diff --git a/api/pyproject.toml b/api/pyproject.toml
index 4f58361a5b..76fc53afd4 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "api"
-version = "0.57.2"
+version = "0.58.0"
description = "Agenta API"
authors = [
{ name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" },
diff --git a/docs/docs/prompt-engineering/playground/02-adding-custom-providers.mdx b/docs/docs/prompt-engineering/playground/02-adding-custom-providers.mdx
index c2b3121621..898cefceb1 100644
--- a/docs/docs/prompt-engineering/playground/02-adding-custom-providers.mdx
+++ b/docs/docs/prompt-engineering/playground/02-adding-custom-providers.mdx
@@ -78,7 +78,7 @@ To add Azure OpenAI models, you'll need the following information:
### Configuration Example
```plaintext
-API Key: c98d7a8s7d6a5s4d3a2s1d...
+API Key: xxxxxxxxxx
API Version: 2023-05-15
API base url: Use here your endpoint URL (e.g., https://accountnameinstance.openai.azure.com
Deployment Name: Use here the deployment name in Azure (e.g., gpt-4-turbo)
@@ -103,7 +103,7 @@ Refer to these tutorials for detailed instructions:
```plaintext
Access Key ID: xxxxxxxxxx
-Secret Access Key: xxxxxxxxxxxxxxxxxxxxxxx
+Secret Access Key: xxxxxxxxxx
Region: (e.g eu-central-1)
Model name: (e.g anthropic.claude-3-sonnet-20240229-v1:0)
```
diff --git a/ee/LICENSE b/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/hooks/setup.sh b/hooks/setup.sh
new file mode 100755
index 0000000000..dfa7669995
--- /dev/null
+++ b/hooks/setup.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+echo "🔧 Setting up Git hooks with pre-commit + gitleaks..."
+
+# --- check dependencies ---
+if ! command -v python3 >/dev/null 2>&1; then
+ echo "❌ Python3 is required but not installed."
+ exit 1
+fi
+if ! command -v pip3 >/dev/null 2>&1; then
+ echo "❌ pip3 is required but not installed."
+ exit 1
+fi
+
+# --- install pre-commit globally if missing ---
+if ! command -v pre-commit >/dev/null 2>&1; then
+ echo "📦 Installing pre-commit..."
+ pip3 install pre-commit
+fi
+
+# --- install gitleaks globally if missing ---
+if ! command -v gitleaks >/dev/null 2>&1; then
+ echo "📦 Installing gitleaks..."
+ if command -v brew >/dev/null 2>&1; then
+ brew install gitleaks
+ else
+ # fallback: go install (requires Go installed)
+ go install github.com/gitleaks/gitleaks/v8@latest
+ export PATH="$PATH:$(go env GOPATH)/bin"
+ fi
+fi
+
+# --- install hooks into .git/hooks/ ---
+echo "⚙️ Installing pre-commit hooks..."
+pre-commit install --install-hooks
+pre-commit install --hook-type pre-push
+
+# --- one-time full scans ---
+echo "🔍 Running one-time gitleaks scans..."
+
+gitleaks --config .gitleaks.toml --exit-code 1 --verbose detect --no-git --source . || {
+ echo "❌ Gitleaks detected potential secrets in the working directory."
+ exit 1
+}
+
+echo "✅ Setup complete! Hooks installed and initial scan passed. You are safe to commit."
diff --git a/hosting/docker-compose/ee/.dockerignore b/hosting/docker-compose/ee/.dockerignore
new file mode 100644
index 0000000000..3a6d70aca2
--- /dev/null
+++ b/hosting/docker-compose/ee/.dockerignore
@@ -0,0 +1,7 @@
+node_modules
+.git
+docker/
+db.schema
+tests/
+poetry.lock
+db.schema
\ No newline at end of file
diff --git a/hosting/docker-compose/ee/LICENSE b/hosting/docker-compose/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/hosting/docker-compose/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/hosting/docker-compose/ee/docker-compose.dev.yml b/hosting/docker-compose/ee/docker-compose.dev.yml
new file mode 100644
index 0000000000..8d540070d9
--- /dev/null
+++ b/hosting/docker-compose/ee/docker-compose.dev.yml
@@ -0,0 +1,372 @@
+name: agenta-ee-dev
+
+services:
+ .api:
+ image: agenta-ee-dev-api:latest
+ build:
+ context: ../../../api
+ dockerfile: ee/docker/Dockerfile.dev
+ command: ["true"] # exits immediately
+
+ .web:
+ image: agenta-ee-dev-web:latest
+ build:
+ context: ../../../web
+ dockerfile: ee/docker/Dockerfile.dev
+ command: ["true"] # exits immediately
+
+ web:
+ profiles:
+ - with-web
+
+ image: agenta-ee-dev-web:latest
+
+ volumes:
+ - ../../../web/ee/src:/app/ee/src
+ - ../../../web/ee/public:/app/ee/public
+ - ../../../web/oss/src:/app/oss/src
+ - ../../../web/oss/public:/app/oss/public
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ ports:
+ - "3000:3000"
+
+ restart: always
+
+ networks:
+ - agenta-network
+ labels:
+ - "traefik.http.routers.agenta-web.rule= PathPrefix(`/`)"
+ - "traefik.http.routers.agenta-web.entrypoints=web"
+ - "traefik.http.services.agenta-web.loadbalancer.server.port=3000"
+
+ command: sh -c "pnpm dev-ee"
+
+ api:
+ image: agenta-ee-dev-api:latest
+
+ volumes:
+ - ../../../api:/app
+ # - ../../../sdk:/sdk
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ labels:
+ - "traefik.http.routers.api.rule=PathPrefix(`/api/`)"
+ - "traefik.http.routers.api.entrypoints=web"
+ - "traefik.http.middlewares.api-strip.stripprefix.prefixes=/api"
+ - "traefik.http.middlewares.api-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.api.middlewares=api-strip"
+ - "traefik.http.services.api.loadbalancer.server.port=8000"
+ - "traefik.http.routers.api.service=api"
+
+ restart: always
+
+ networks:
+ - agenta-network
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+ command:
+ [
+ "uvicorn",
+ "entrypoint:app",
+ "--host",
+ "0.0.0.0",
+ "--port",
+ "8000",
+ "--reload",
+ "--root-path",
+ "/api",
+ ]
+
+ depends_on:
+ postgres:
+ condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
+ supertokens:
+ condition: service_healthy
+
+ worker:
+ image: agenta-ee-dev-api:latest
+
+ volumes:
+ - ../../../api:/app
+ # - ../../../sdk:/sdk
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ depends_on:
+ - postgres
+ - rabbitmq
+ - redis
+
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ command: >
+ watchmedo auto-restart --directory=/app/ --pattern=*.py --recursive -- celery -A entrypoint.celery_app worker --concurrency=1 --max-tasks-per-child=1 --prefetch-multiplier=1 --loglevel=DEBUG
+
+ cron:
+ image: agenta-ee-dev-api:latest
+
+ volumes:
+ - ../../../api/ee/src/crons/meters.sh:/meters.sh
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ depends_on:
+ - postgres
+ - api
+
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ command: cron -f
+
+ alembic:
+ image: agenta-ee-dev-api:latest
+
+ volumes:
+ - ../../../api:/app
+ # - ../../../sdk:/sdk
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ depends_on:
+ postgres:
+ condition: service_healthy
+ networks:
+ - agenta-network
+
+ command: sh -c "python -m ee.databases.postgres.migrations.runner"
+
+ completion:
+ build:
+ context: ../../../services/completion
+ dockerfile: oss/docker/Dockerfile.dev
+
+ volumes:
+ - ../../../services/completion:/app
+ - ../../../sdk:/sdk
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ labels:
+ - "traefik.http.routers.completion.rule=PathPrefix(`/services/completion/`)"
+ - "traefik.http.routers.completion.entrypoints=web"
+ - "traefik.http.middlewares.completion-strip.stripprefix.prefixes=/services/completion"
+ - "traefik.http.middlewares.completion-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.completion.middlewares=completion-strip"
+ - "traefik.http.services.completion.loadbalancer.server.port=80"
+ - "traefik.http.routers.completion.service=completion"
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ command: ["python", "oss/src/main.py"]
+
+ chat:
+ build:
+ context: ../../../services/chat
+ dockerfile: oss/docker/Dockerfile.dev
+
+ volumes:
+ - ../../../services/chat:/app
+ - ../../../sdk:/sdk
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ labels:
+ - "traefik.http.routers.chat.rule=PathPrefix(`/services/chat/`)"
+ - "traefik.http.routers.chat.entrypoints=web"
+ - "traefik.http.middlewares.chat-strip.stripprefix.prefixes=/services/chat"
+ - "traefik.http.middlewares.chat-strip.stripprefix.forceslash=true"
+ - "traefik.http.routers.chat.middlewares=chat-strip"
+ - "traefik.http.services.chat.loadbalancer.server.port=80"
+ - "traefik.http.routers.chat.service=chat"
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ command: ["python", "oss/src/main.py"]
+
+ postgres:
+ image: postgres:16.2
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+ ports:
+ - "5432:5432"
+
+ restart: always
+
+ networks:
+ - agenta-network
+ volumes:
+ - postgres-data:/var/lib/postgresql/data/
+ - ../../../api/ee/databases/postgres/init-db-ee.sql:/docker-entrypoint-initdb.d/init-db.sql
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U username -d agenta_ee_core"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ rabbitmq:
+ image: rabbitmq:3-management
+
+ ports:
+ - "5672:5672"
+ - "15672:15672"
+ volumes:
+ - rabbitmq-data:/var/lib/rabbitmq
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ redis:
+ image: redis:latest
+
+ restart: always
+
+ networks:
+ - agenta-network
+ volumes:
+ - redis-data:/data
+
+ cache:
+ image: redis:latest
+
+ command: >
+ redis-server
+ --port 6378
+ --appendonly no
+ --appendfsync no
+ --save ""
+ --maxmemory 128mb
+ --maxmemory-policy allkeys-lru
+
+ ports:
+ - "6378:6378"
+
+ volumes:
+ - cache-data:/data
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ labels:
+ - "traefik.enable=false"
+
+ healthcheck:
+ test: ["CMD", "redis-cli", "-p", "6378", "ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ start_period: 5s
+
+ traefik:
+ image: traefik:v2.10
+
+ command: --api.dashboard=true --api.insecure=true --providers.docker --entrypoints.web.address=:${AGENTA_PORT:-80}
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ ports:
+ - "${AGENTA_PORT:-80}:${AGENTA_PORT:-80}"
+ - "8080:8080"
+ - "443:443"
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ supertokens:
+ image: registry.supertokens.io/supertokens/supertokens-postgresql
+
+ depends_on:
+ postgres:
+ condition: service_healthy
+ alembic:
+ condition: service_completed_successfully
+
+ ports:
+ - 3567:3567
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ environment:
+ POSTGRESQL_CONNECTION_URI: ${POSTGRES_URI_SUPERTOKENS}
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+ healthcheck:
+ test: >
+ bash -c 'exec 3<>/dev/tcp/127.0.0.1/3567 && echo -e "GET /hello HTTP/1.1\r\nhost: 127.0.0.1:3567\r\nConnection: close\r\n\r\n" >&3 && cat <&3 | grep "Hello"'
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ stripe:
+ image: stripe/stripe-cli:latest
+
+ command: [
+ listen,
+ --forward-to,
+ http://api:8000/billing/stripe/events/,
+ --events,
+ "customer.subscription.created,customer.subscription.deleted,invoice.updated,invoice.upcoming,invoice.payment_failed,invoice.payment_succeeded"
+ ]
+
+ env_file:
+ - ${ENV_FILE:-./.env.ee.dev}
+
+ restart: always
+
+ networks:
+ - agenta-network
+
+networks:
+ agenta-network:
+
+volumes:
+ postgres-data:
+ rabbitmq-data:
+ redis-data:
+ cache-data:
+ nextjs_cache:
diff --git a/hosting/docker-compose/ee/env.ee.dev.example b/hosting/docker-compose/ee/env.ee.dev.example
new file mode 100644
index 0000000000..c42666965b
--- /dev/null
+++ b/hosting/docker-compose/ee/env.ee.dev.example
@@ -0,0 +1,91 @@
+# First-party (required)
+AGENTA_LICENSE=ee
+AGENTA_STAGE=dev
+AGENTA_PROVIDER=local
+AGENTA_WEB_URL=http://localhost
+AGENTA_API_URL=http://localhost/api
+AGENTA_SERVICES_URL=http://localhost/services
+AGENTA_AUTH_KEY=change-me
+AGENTA_CRYPT_KEY=change-me
+AGENTA_API_IMAGE_NAME=agenta-api
+AGENTA_API_IMAGE_TAG=latest
+AGENTA_WEB_IMAGE_NAME=agenta-web
+AGENTA_WEB_IMAGE_TAG=latest
+
+# First-party (registry & service)
+DOCKER_NETWORK_MODE=bridge
+POSTGRES_USERNAME=username
+POSTGRES_PASSWORD=password
+
+# First-party (optional)
+AGENTA_AUTO_MIGRATIONS=true
+AGENTA_PRICING=
+AGENTA_DEMOS=
+AGENTA_RUNTIME_PREFIX=
+AGENTA_API_INTERNAL_URL=
+AGENTA_LITELLM_MOCK=
+POSTGRES_USERNAME_ADMIN=
+POSTGRES_PASSWORD_ADMIN=
+AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
+AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# Third-party (required)
+TRAEFIK_DOMAIN=
+TRAEFIK_PROTOCOL=
+TRAEFIK_PORT=
+
+REDIS_URL=redis://redis:6379/0
+RABBITMQ_DEFAULT_PASS=guest
+RABBITMQ_DEFAULT_USER=guest
+
+CELERY_BROKER_URL=amqp://guest@rabbitmq//
+CELERY_RESULT_BACKEND=redis://redis:6379/0
+
+POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens"
+POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core"
+POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing"
+
+ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini
+ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini
+
+SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
+
+# Third-party (optional)
+AWS_ECR_URL=
+AWS_RDS_SECRET=
+
+POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp
+
+GITHUB_OAUTH_CLIENT_ID=
+GITHUB_OAUTH_CLIENT_SECRET=
+GOOGLE_OAUTH_CLIENT_ID=
+GOOGLE_OAUTH_CLIENT_SECRET=
+
+SUPERTOKENS_API_KEY=replace-me
+
+NEW_RELIC_LICENSE_KEY=
+NRIA_LICENSE_KEY=
+
+LOOPS_API_KEY=
+
+SENDGRID_API_KEY=
+
+CRISP_WEBSITE_ID=
+
+STRIPE_API_KEY=
+STRIPE_WEBHOOK_SECRET=
+STRIPE_TARGET=
+
+# Third-party - LLM (optional)
+ALEPHALPHA_API_KEY=
+ANTHROPIC_API_KEY=
+ANYSCALE_API_KEY=
+COHERE_API_KEY=
+DEEPINFRA_API_KEY=
+GEMINI_API_KEY=
+GROQ_API_KEY=
+MISTRAL_API_KEY=
+OPENAI_API_KEY=
+OPENROUTER_API_KEY=
+PERPLEXITYAI_API_KEY=
+TOGETHERAI_API_KEY=
diff --git a/hosting/docker-compose/ee/env.ee.gh.example b/hosting/docker-compose/ee/env.ee.gh.example
new file mode 100644
index 0000000000..5cba09c18c
--- /dev/null
+++ b/hosting/docker-compose/ee/env.ee.gh.example
@@ -0,0 +1,80 @@
+# First-party (required)
+AGENTA_LICENSE=ee
+AGENTA_STAGE=dev
+AGENTA_PROVIDER=local
+AGENTA_API_URL=http://localhost/api
+AGENTA_WEB_URL=http://localhost
+AGENTA_SERVICES_URL=http://localhost/services
+AGENTA_AUTH_KEY=change-me
+AGENTA_CRYPT_KEY=change-me
+
+# First-party (registry & service)
+DOCKER_NETWORK_MODE=bridge
+POSTGRES_PASSWORD=password
+POSTGRES_USERNAME=username
+
+# First-party (optional)
+AGENTA_AUTO_MIGRATIONS=true
+AGENTA_PRICING=
+AGENTA_DEMOS=
+AGENTA_RUNTIME_PREFIX=
+AGENTA_API_INTERNAL_URL=
+AGENTA_SERVICE_MIDDLEWARE_CACHE_ENABLED=true
+AGENTA_OTLP_MAX_BATCH_BYTES=10485760
+
+# Third-party (required)
+TRAEFIK_DOMAIN=
+TRAEFIK_PROTOCOL=
+TRAEFIK_PORT=
+
+REDIS_URL=redis://redis:6379/0
+RABBITMQ_DEFAULT_PASS=guest
+RABBITMQ_DEFAULT_USER=guest
+
+CELERY_BROKER_URL=amqp://guest@rabbitmq//
+CELERY_RESULT_BACKEND=redis://redis:6379/0
+
+POSTGRES_URI_SUPERTOKENS="postgresql://username:password@postgres:5432/agenta_ee_supertokens"
+POSTGRES_URI_CORE="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_core"
+POSTGRES_URI_TRACING="postgresql+asyncpg://username:password@postgres:5432/agenta_ee_tracing"
+
+ALEMBIC_CFG_PATH_CORE=/app/ee/databases/postgres/migrations/core/alembic.ini
+ALEMBIC_CFG_PATH_TRACING=/app/ee/databases/postgres/migrations/tracing/alembic.ini
+
+SUPERTOKENS_API_KEY=replace-me
+SUPERTOKENS_CONNECTION_URI=http://supertokens:3567
+
+# Third-party (optional)
+POSTHOG_API_KEY=phc_3urGRy5TL1HhaHnRYL0JSHxJxigRVackhphHtozUmdp
+
+GITHUB_OAUTH_CLIENT_ID=
+GITHUB_OAUTH_CLIENT_SECRET=
+
+GOOGLE_OAUTH_CLIENT_ID=
+GOOGLE_OAUTH_CLIENT_SECRET=
+
+NEW_RELIC_LICENSE_KEY=
+NRIA_LICENSE_KEY=
+
+LOOPS_API_KEY=
+
+SENDGRID_API_KEY=
+
+CRISP_WEBSITE_ID=
+
+STRIPE_API_KEY=
+STRIPE_WEBHOOK_SECRET=
+
+# Third-party - LLM (optional)
+ALEPHALPHA_API_KEY=
+ANTHROPIC_API_KEY=
+ANYSCALE_API_KEY=
+COHERE_API_KEY=
+DEEPINFRA_API_KEY=
+GEMINI_API_KEY=
+GROQ_API_KEY=
+MISTRAL_API_KEY=
+OPENAI_API_KEY=
+OPENROUTER_API_KEY=
+PERPLEXITYAI_API_KEY=
+TOGETHERAI_API_KEY=
\ No newline at end of file
diff --git a/hosting/docker-compose/oss/.env.oss.dev.example b/hosting/docker-compose/oss/env.oss.dev.example
similarity index 100%
rename from hosting/docker-compose/oss/.env.oss.dev.example
rename to hosting/docker-compose/oss/env.oss.dev.example
diff --git a/hosting/docker-compose/oss/.env.oss.gh.example b/hosting/docker-compose/oss/env.oss.gh.example
similarity index 100%
rename from hosting/docker-compose/oss/.env.oss.gh.example
rename to hosting/docker-compose/oss/env.oss.gh.example
diff --git a/hosting/aws/agenta_instance.tf b/hosting/old/aws/agenta_instance.tf
similarity index 100%
rename from hosting/aws/agenta_instance.tf
rename to hosting/old/aws/agenta_instance.tf
diff --git a/hosting/aws/agenta_instance_sg.tf b/hosting/old/aws/agenta_instance_sg.tf
similarity index 100%
rename from hosting/aws/agenta_instance_sg.tf
rename to hosting/old/aws/agenta_instance_sg.tf
diff --git a/hosting/aws/instance-setup.sh b/hosting/old/aws/instance-setup.sh
similarity index 100%
rename from hosting/aws/instance-setup.sh
rename to hosting/old/aws/instance-setup.sh
diff --git a/hosting/aws/main.tf b/hosting/old/aws/main.tf
similarity index 100%
rename from hosting/aws/main.tf
rename to hosting/old/aws/main.tf
diff --git a/hosting/gcp/agenta-instance.tf b/hosting/old/gcp/agenta-instance.tf
similarity index 100%
rename from hosting/gcp/agenta-instance.tf
rename to hosting/old/gcp/agenta-instance.tf
diff --git a/hosting/old/gcp/credentials.json b/hosting/old/gcp/credentials.json
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/hosting/gcp/main.tf b/hosting/old/gcp/main.tf
similarity index 100%
rename from hosting/gcp/main.tf
rename to hosting/old/gcp/main.tf
diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml
index 323d0515f4..6fb0d58a0a 100644
--- a/sdk/pyproject.toml
+++ b/sdk/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "agenta"
-version = "0.57.2"
+version = "0.58.0"
description = "The SDK for agenta is an open-source LLMOps platform."
readme = "README.md"
authors = [
diff --git a/sdk/tests/legacy/baggage/config.toml b/sdk/tests/legacy/baggage/config.toml
index f32346649b..d5a5f01895 100644
--- a/sdk/tests/legacy/baggage/config.toml
+++ b/sdk/tests/legacy/baggage/config.toml
@@ -1,4 +1,4 @@
app_name = "baggage"
app_id = "0193b67a-b673-7919-85c2-0b5b0a2183d3"
backend_host = "http://localhost"
-api_key = "XELnjVve.c1f177c87250b603cf1ed2a69ebdfc1cec3124776058e7afcbba93890c515e74"
+api_key = "XELnjVve.xxxx"
diff --git a/sdk/tests/legacy/debugging/simple-app/config.toml b/sdk/tests/legacy/debugging/simple-app/config.toml
index 389b22a2bf..7c2a204758 100644
--- a/sdk/tests/legacy/debugging/simple-app/config.toml
+++ b/sdk/tests/legacy/debugging/simple-app/config.toml
@@ -1,6 +1,6 @@
app_name = "asdf"
app_id = "0193bbaa-4f2b-7510-9170-9bdf95249ca0"
backend_host = "https://cloud.agenta.ai"
-api_key = "dWdKluoL.fc56608c5e0ce7b262e9e9a795b6a5e9371200c573cafbd975ebb6b4368b6032"
+api_key = "dWdKluoL.xxxx"
variants = []
variant_ids = []
diff --git a/services/chat/ee/LICENSE b/services/chat/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/services/chat/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/services/chat/ee/__init__.py b/services/chat/ee/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/services/chat/ee/docker/Dockerfile.gh b/services/chat/ee/docker/Dockerfile.gh
new file mode 100644
index 0000000000..7e2351a555
--- /dev/null
+++ b/services/chat/ee/docker/Dockerfile.gh
@@ -0,0 +1,18 @@
+FROM python:3.10-slim
+
+ARG ROOT_PATH=/
+ENV ROOT_PATH=${ROOT_PATH}
+
+WORKDIR /app/
+
+RUN pip install --upgrade pip
+
+COPY ./requirements.txt /app/requirements.txt
+
+RUN pip install -r requirements.txt
+
+COPY ./oss /app/oss/
+
+ENV PYTHONPATH=/sdk:$PYTHONPATH
+
+EXPOSE 80
diff --git a/services/completion/ee/LICENSE b/services/completion/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/services/completion/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/services/completion/ee/__init__.py b/services/completion/ee/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/services/completion/ee/docker/Dockerfile.gh b/services/completion/ee/docker/Dockerfile.gh
new file mode 100644
index 0000000000..7e2351a555
--- /dev/null
+++ b/services/completion/ee/docker/Dockerfile.gh
@@ -0,0 +1,18 @@
+FROM python:3.10-slim
+
+ARG ROOT_PATH=/
+ENV ROOT_PATH=${ROOT_PATH}
+
+WORKDIR /app/
+
+RUN pip install --upgrade pip
+
+COPY ./requirements.txt /app/requirements.txt
+
+RUN pip install -r requirements.txt
+
+COPY ./oss /app/oss/
+
+ENV PYTHONPATH=/sdk:$PYTHONPATH
+
+EXPOSE 80
diff --git a/web/ee/.gitignore b/web/ee/.gitignore
new file mode 100644
index 0000000000..6d61ed9526
--- /dev/null
+++ b/web/ee/.gitignore
@@ -0,0 +1,37 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.js
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# local env files
+.env*.local
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
+
+
diff --git a/web/ee/LICENSE b/web/ee/LICENSE
new file mode 100644
index 0000000000..ae7a2f38f4
--- /dev/null
+++ b/web/ee/LICENSE
@@ -0,0 +1,37 @@
+Agenta Enterprise License (the “Enterprise License”)
+Copyright (c) 2023–2025
+Agentatech UG (haftungsbeschränkt), doing business as “Agenta” (“Agenta”)
+
+With regard to the Agenta Software:
+
+This software and associated documentation files (the "Software") may only be
+used in production, if you (and any entity that you represent) have agreed to,
+and are in compliance with, the Agenta Subscription Terms of Service, available
+at https://agenta.ai/terms (the “Enterprise Terms”), or other
+agreement governing the use of the Software, as agreed by you and Agenta,
+and otherwise have a valid Agenta Enterprise License.
+
+Subject to the foregoing sentence, you are free to modify this Software and
+publish patches to the Software. You agree that Agenta and/or its licensors
+(as applicable) retain all right, title and interest in and to all such
+modifications and/or patches, and all such modifications and/or patches may
+only be used, copied, modified, displayed, distributed, or otherwise exploited
+with a valid Agenta Enterprise License. Notwithstanding the foregoing, you may
+copy and modify the Software for development and testing purposes, without
+requiring a subscription. You agree that Agenta and/or its licensors (as
+applicable) retain all right, title and interest in and to all such
+modifications. You are not granted any other rights beyond what is expressly
+stated herein. Subject to the foregoing, it is forbidden to copy, merge,
+publish, distribute, sublicense, and/or sell the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+For all third party components incorporated into the Agenta Software, those
+components are licensed under the original license provided by the owner of the
+applicable component.
diff --git a/web/ee/docker/Dockerfile.dev b/web/ee/docker/Dockerfile.dev
new file mode 100644
index 0000000000..719331462b
--- /dev/null
+++ b/web/ee/docker/Dockerfile.dev
@@ -0,0 +1,43 @@
+FROM node:20.18-slim
+
+ENV TURBO_TELEMETRY_DISABLED=1
+
+WORKDIR /app
+
+# Install jq for JSON parsing
+RUN apt-get update && apt-get install -y jq
+
+# Install dependencies based on the preferred package manager
+COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* .npmrc* ./
+
+# Extract PNPM version and install it
+RUN PNPM_VERSION=$(cat package.json | jq -r '.packageManager | split("@")[1]') && \
+ npm install -g pnpm@${PNPM_VERSION}
+
+COPY ee/package.json ./ee/yarn.lock* ./ee/package-lock.json* ./ee/pnpm-lock.yaml* ./ee/.npmrc* ./ee/
+COPY oss/package.json ./oss/yarn.lock* ./oss/package-lock.json* ./oss/pnpm-lock.yaml* ./oss/.npmrc* ./oss/
+COPY ./pnpm-workspace.yaml ./turbo.json ./
+
+COPY ./entrypoint.sh /app/entrypoint.sh
+
+RUN pnpm i
+
+COPY ee/src ./ee/src
+COPY ee/public ./ee/public
+COPY oss/src ./oss/src
+COPY oss/public ./oss/public
+COPY tsconfig.json .
+COPY ee/tsconfig.json ./ee
+COPY oss/tsconfig.json ./oss
+
+COPY ee/postcss.config.mjs ./ee/postcss.config.mjs
+COPY oss/postcss.config.mjs ./oss/postcss.config.mjs
+
+COPY ee/next.config.ts ./ee/next.config.ts
+COPY oss/next.config.ts ./oss/next.config.ts
+
+COPY ee/tailwind.config.ts ./ee/tailwind.config.ts
+COPY oss/tailwind.config.ts ./oss/tailwind.config.ts
+
+ENTRYPOINT ["./entrypoint.sh"]
+EXPOSE 3000
\ No newline at end of file
diff --git a/web/ee/docker/Dockerfile.gh b/web/ee/docker/Dockerfile.gh
new file mode 100644
index 0000000000..c362aa886f
--- /dev/null
+++ b/web/ee/docker/Dockerfile.gh
@@ -0,0 +1,43 @@
+FROM node:20.18.0-slim AS base
+
+ENV TURBO_TELEMETRY_DISABLED=1
+
+ENV PNPM_HOME="/pnpm"
+ENV PATH="$PNPM_HOME:$PATH"
+
+RUN apt-get update && apt-get install -y jq
+
+COPY . .
+RUN PNPM_VERSION=$(cat package.json | jq -r '.packageManager | split("@")[1]') && \
+ npm install -g pnpm@${PNPM_VERSION}
+
+RUN pnpm add -g turbo
+RUN turbo prune @agenta/ee --docker
+
+# BUILDER ---------------------------------------------------------------------
+
+FROM base AS builder
+
+WORKDIR /app
+
+COPY --from=base ./out/json/ .
+COPY ./.husky /app/.husky
+
+RUN --mount=type=cache,id=pnpm,target=/pnpm/store yes | pnpm install --frozen-lockfile --filter=@agenta/ee
+COPY --from=base /out/full/ .
+
+RUN npx next telemetry disable
+
+RUN pnpm turbo run build --filter=@agenta/ee
+
+# RUNNER ----------------------------------------------------------------------
+
+FROM base AS runner
+
+WORKDIR /app
+
+COPY --from=builder /app/ee/.next/standalone /app
+COPY ../entrypoint.sh /app/entrypoint.sh
+
+ENTRYPOINT ["/app/entrypoint.sh"]
+EXPOSE 3000
diff --git a/web/ee/next.config.ts b/web/ee/next.config.ts
new file mode 100644
index 0000000000..7de0509f9c
--- /dev/null
+++ b/web/ee/next.config.ts
@@ -0,0 +1,73 @@
+import path from "path"
+import {createRequire} from "module"
+
+import ossConfig from "@agenta/oss/next.config"
+
+const require = createRequire(import.meta.url)
+const reduxToolkitCjsEntry = path.join(
+ path.dirname(require.resolve("@reduxjs/toolkit/package.json")),
+ "dist/cjs/index.js",
+)
+
+const config = {
+ ...ossConfig,
+ outputFileTracingRoot: path.resolve(__dirname, ".."),
+ turbopack: {
+ // root: path.resolve(__dirname, ".."),
+ resolveAlias: {
+ "@/oss/*": ["@/agenta-oss-common/*"],
+ },
+ },
+ experimental: {
+ optimizePackageImports: ["@agenta/oss"],
+ },
+ transpilePackages: ["jotai-devtools"],
+ typescript: {
+ ignoreBuildErrors: true,
+ },
+ webpack: (webpackConfig: any, options: any) => {
+ const baseConfig =
+ typeof ossConfig.webpack === "function"
+ ? ossConfig.webpack(webpackConfig, options)
+ : webpackConfig
+
+ baseConfig.resolve ??= {}
+ baseConfig.resolve.alias = {
+ ...(baseConfig.resolve.alias ?? {}),
+ "@reduxjs/toolkit": reduxToolkitCjsEntry,
+ }
+
+ return baseConfig
+ },
+ async redirects() {
+ return [
+ {
+ source: "/apps",
+ destination: "/w",
+ permanent: true,
+ },
+ {
+ source: "/apps/:app_id",
+ destination: "/w",
+ permanent: true,
+ },
+ {
+ source: "/apps/:app_id/:path*",
+ destination: "/w",
+ permanent: true,
+ },
+ {
+ source: "/",
+ destination: "/w",
+ permanent: true,
+ },
+ {
+ source: "/:workspace_id/apps/:app_id",
+ destination: "/:workspace_id/apps/:app_id/overview/",
+ permanent: true,
+ },
+ ]
+ },
+}
+
+export default config
diff --git a/web/ee/package.json b/web/ee/package.json
new file mode 100644
index 0000000000..e98d5b2bed
--- /dev/null
+++ b/web/ee/package.json
@@ -0,0 +1,94 @@
+{
+ "name": "@agenta/ee",
+ "version": "0.58.0",
+ "private": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "scripts": {
+ "dev": "next dev --turbopack",
+ "dev:local": "ENV_FILE=.local.env next dev",
+ "dev:turbo": "ENV_FILE=.local.env next dev --turbo",
+ "build": "next build && cp -r public/. ./.next/standalone/ee/public && cp -r .next/static/. ./.next/standalone/ee/.next/static",
+ "start": "next start",
+ "lint": "next lint",
+ "lint-fix": "next lint --fix",
+ "format": "prettier --check .",
+ "format-fix": "prettier --write .",
+ "types:check": "tsc",
+ "types:watch": "tsc -w"
+ },
+ "dependencies": {
+ "@ag-grid-community/client-side-row-model": "^32.3.4",
+ "@ag-grid-community/core": "^32.3.4",
+ "@ag-grid-community/csv-export": "^32.3.4",
+ "@ag-grid-community/react": "^32.3.4",
+ "@ag-grid-community/styles": "^32.3.4",
+ "@agenta/oss": "workspace:../oss",
+ "@ant-design/colors": "^7.2.0",
+ "@ant-design/cssinjs": "^1.22.1",
+ "@ant-design/icons": "^5.5.2",
+ "@ant-design/v5-patch-for-react-19": "^1.0.3",
+ "@lexical/code-shiki": "^0.35.0",
+ "@monaco-editor/react": "^4.7.0-rc.0",
+ "@phosphor-icons/react": "^2.1.10",
+ "@tanstack/query-core": "^5.87.1",
+ "@tanstack/react-query": "^5.87.1",
+ "@tremor/react": "^3.18.7",
+ "@types/js-yaml": "^4.0.9",
+ "@types/lodash": "^4.17.18",
+ "@types/react": "^19.0.10",
+ "@types/react-dom": "^19.0.4",
+ "@types/react-resizable": "^3.0.7",
+ "@types/react-window": "^1.8.8",
+ "@types/recharts": "^2.0.1",
+ "@types/uuid": "^10.0.0",
+ "antd": "^5.26.1",
+ "autoprefixer": "10.4.20",
+ "axios": "^1.12.2",
+ "classnames": "^2.3.2",
+ "clsx": "^2.1.1",
+ "crisp-sdk-web": "^1.0.25",
+ "dayjs": "^1.11.10",
+ "dotenv": "^16.5.0",
+ "fast-deep-equal": "^3.1.3",
+ "immer": "^10.1.1",
+ "jotai": "^2.13.1",
+ "jotai-devtools": "^0.12.0",
+ "jotai-eager": "^0.2.3",
+ "jotai-immer": "^0.4.1",
+ "jotai-tanstack-query": "^0.11.0",
+ "js-yaml": "^4.1.0",
+ "jsonrepair": "^3.13.0",
+ "lodash": "^4.17.21",
+ "postcss": "^8.5.6",
+ "postcss-antd-fixes": "^0.2.0",
+ "posthog-js": "^1.223.3",
+ "rc-virtual-list": "^3.19.1",
+ "react": "^19.0.0",
+ "react-dom": "^19.0.0",
+ "react-jss": "^10.10.0",
+ "react-resizable": "^3.0.5",
+ "react-window": "^1.8.11",
+ "recharts": "^3.1.0",
+ "shiki": "^3.12.2",
+ "supertokens-auth-react": "^0.47.0",
+ "supertokens-node": "^21.0.0",
+ "swc-loader": "^0.2.6",
+ "swr": "^2.3.0",
+ "tailwindcss": "^3.4.4",
+ "typescript": "5.8.3",
+ "use-animation-frame": "^0.2.1",
+ "usehooks-ts": "^3.1.0",
+ "uuid": "^11.1.0"
+ },
+ "devDependencies": {
+ "@agenta/web-tests": "workspace:../tests",
+ "@swc-jotai/debug-label": "^0.2.0",
+ "@swc-jotai/react-refresh": "^0.3.0",
+ "@types/node": "^20.8.10",
+ "@types/prismjs": "^1.26.5",
+ "node-mocks-http": "^1.17.2",
+ "tailwind-scrollbar": "^3"
+ }
+}
diff --git a/web/ee/postcss.config.mjs b/web/ee/postcss.config.mjs
new file mode 100644
index 0000000000..d286a2562d
--- /dev/null
+++ b/web/ee/postcss.config.mjs
@@ -0,0 +1,3 @@
+import ossConfig from "@agenta/oss/postcss.config.mjs"
+
+export default ossConfig
diff --git a/web/ee/public/assets/On-boarding.png b/web/ee/public/assets/On-boarding.png
new file mode 100644
index 0000000000000000000000000000000000000000..00ec79f65327bfce14d6fae17cd03c4cc9256cb3
GIT binary patch
literal 6151628
zcmeFYcT`hZ`!?*1BhD!3Sdk*kSU^-jx=6`b5Rqa*=@12!5<-LkF$Bn{W1|X4Zz=-P
zrI!G)k%&M-O&|fH1PBlyLINQP$@vcW%a-j&>|)9Nkr^^z6g==Nlql
z-ui3XdGAv9_dgu}{KvX9$4-?3!xiyF_NV3b#}ywf3pbq!)A$km;Q#yjzZUqv7WhBU
z0@%&$MjJ*~?Q|}FiAq?Z)bAFZ#I?mV<5=$2SxdGBrR0Fn7oE9HMYrLKIrKxZ!0fQX
zFlW3sR-jJyJSO2C${A5KXPRcjVJUn<_^-?nhh9zB4D$dn=d+0qGsb6
z)M1l>nMVm?t$JPPl&%ukoBj{Q!jM>85XDP=kB5l|t$zX{k4!n;sBhx*UPThb5rmLX
z#JK2A$?pAi=JoU0+5s@3apbUT)rgqFD-GXDpXZct$Fj%gB)OSN-RZI~u)H|Q!`UwU
zGc0-?XuU~gG0uaB#m+!wzzf|Fp)3#BIf;cBfM*;3H@d|D4}=^4feHTflCH^&PtCR?
zTpan@rVwLttZQ2C)UcikTkg+NRL5A${cN8kSM0uT4u
z>nK|H*foWOxz;Be1^m$(!%%MH#<3-S$FfsZoq0nw`>V{^JG0HMZn+Jm>+ij>v$xKf
zItlSvm@{QyRM2rW&=?$#kM~wE^E5^9r*p=28sj9zE-2z|FEe1lN&_^GXkZc#3DJCD
z{1Go(-nmVj3%Hr)iYD=&f>C|}c05h;*j-6CJ}yQg^5DDw|MsY@!9)Go|G?VoR*HMl
z15J+U!9){K#0>A_Iual2nHo$ED!MK|)~7=(T9A{oi?Jew=EJl0*zN$b)3SJbQhjA&
z#zSvU6Su*)t>zbx;QmXmoFWD
zT$kZN-qWt|aX6}9zA-qEo)#%+G2M#3mLEb^v|5rvz%_EcjPwG=+>|tOt`BvKs+X+S
z7mVoOJpQO<8_6So&4t^V;9_m^&Cx*%$to?n^Bno=7>dW&mF58YoXHsgcY6|_n6qYs
z;)*U{Zh)l|9VKBaicNUX@uL?A<-%uxzhn|Gw*xe$jWLhWcA;z4g8dGYSkw$xj$j4^
z$4Zoh<$=knO#ARItO1tCh<*$jMpa438B<+zg6~lt-e^f6&3plzFvo5c+Z(ng&KH|b
z;xiyZ1mO%YiQlnRj9S>SYG$n7ZXsi@DC|K%H#W;7k@KFf^bfH18E>-QqX?JpL;i3}
zoFD%@VC!wn>xb_3XHMY_|
zW-Wm&z@b&xfhO8Ix^|8KzkUgyX*B9D9>|M+Tud5jvy
z>3y9W{?n}zJ-|1RtKu;m_CUpMZ&jr}XT>W(N($NQZSCyQkX?~J1g)EE6bYQ&Ot^Xm
zH#6`-gs1s&kUF$YmSmr}VTLd@jwW6#jWGzV)jrl*Oh}r?3=sww+0|K6@bIB-MI3B@
z7V9&8xZCoE?m$a!)?zYg8K(te3B}SU$A{z_X@Nn6Af5s4RY>uROhKcxOT>QM$>yp_
z{9~*tCnS88G=0r}61R)_^?P@Sw-V%siiM!vG)4TX&Rjm}1)w6yYN9Wifp{w3wT-9A
zXD~yoc#u7?TjkUDK;oQXkoDt6jG`nN%cuo^sw{};GP^Tf_#tc@U8@_oR`RyFXe;QD
zN-TtK9LhN=M2s@paIR3$YdEKECxJk~#Q(qlf@2xHt^WYA>$2k=Z9QUdrZ{tJ+FHG>
zUxupKX?wG#m`P`Y)!dW&oxJy17g(m0Vy$vU(H;W3LL}3v*RQi
z#3-7Dkv(XNjR8f1XTWf%g=Ss1vXHZ64&0U8Xv%M9{Rwu;1^ml!{N3QqgLs2kafs6J
z;w{5RMp#`GkAana0AJ${?NI-oT5|v=0Yd;=kX4NYJ%;_YAbqQlQJTUFR1iN0{J{^O
zgc4vh%oeZ{==h073<3l|iWt*7f?##Ke)nZ3pk@GHxU!xH867eC2e$tcklaR=5ndfp
zn0aT*UdzDm1JLXym3ZP`?00el^*%FL9|g>6pLBwan8$F*?ewK*lb{m(F5qc
zh~BKBv#|a5^oN7}ItZZ?u6#0$0ol-U%&~LgdcC0qL@L_n{lkI6BvGY6N)1zE@0gO`L8~9bvVJ+=dW5P
z!wAIVzDdTyDySc?ew%05lVAfrjWf)jA2C@a6o^oH{J>oa2AN2NMS?!4TV)Z=yAt(P
z;AY%5^*53Ng%;S5As7i)<_w=+Wt?2#k`4G60|Ru^_a8Jng|BkccB&&C@AWR*hfkMm
z_y-1Er?~#~ktvr9rIoAj%=n~_182_5@6Zg$x9e$u2$NKiVaqHcO3DuxLK=X1Z_;1p
zPI#Ci8oc`Sv@dYc)!{R=k1spjb&>TjUVkPk=H1x|_u8`LDua53Xli@#bgQBZf_f=k
zuWh!Y{(_KvzNB*p`qN}1R5q%ALSH+Goz}cK5kY=Zo>j%8^3ocHV02eRe1$|o{W(D5
zf};yaC6o5q9rIt5g^XrRTUF{8#cr%AK4k}J+u2nHSsivba=D;oC!uHY3!brC0dOb`
zPdm}Dh4o0n6*Omp5JXTnuEqo?8pwOZt3T!gpDd1$yk{ND
zWJnD8GD~ypT9JnJQsx{Rk2xCN%!%IlkiDEKDP>HvA`lUA6Z;R^gv*GjhM@TwOJ=|i
zJmo(?h=<2@V?`@-uBU;kTY4r$A9AEA=RXaA)~w@qW4)SRDv^mGKP{dHC>zBPam1&H
z;l|levK4`|0rbz0Vv$z^{-LB@j|Tsdqt^>ioWO5dm<>F8>p!PVI!P$Y;
zb~vKuvFf4`b0BLmA=Fq|S!q6wIz=o9M{lWhe2jn|Yj^aZuxEQoFEO
z@3Ey4syWOdhU!?HFpd;*V&@$I6f&M9F6^AzrojpuX)?f}w@ftSyr>zq@XYRjepYwG
zKk)7w#si&S(^mq@tzYOJ`$UxRor!!_X<>JBraV`T4HfI3j%N;~~{$RK*U^$E!#
zx3<9*wxF_p$B}@1m(pMCAt5)m_dhn#KV6KRmaB`2rV-9M@88j~;P;SiQqwT!QM&&<
z&R;1``llT9L^Fn+D0OE(Jn1d2dqO|d4n{XWL3oh0T+_|iTUC8H+kk<@FLZ>Nvzgpi
zISi5~sm#(NTUKbc;(|fGr{uX4^|0@lLL`}
zG=m4V9weI|v9}9bt|?^H?IoT9v}Ab7IMC@3(t6M@-2x^A3@H()Vwh+S^#?Iu_yP#0
z2OQ-o_x=G?tcDrW7z{q;tpC*qM(h=HSQIFA7#VMF$mqr*wp5bQaONW(H9yJ(hIPvZ
z7)<~+P)vg=v*!dmtY!l;H2;B@R!%9U35}1P6eers6IRZ>^w8#gu5S*ZUTGtoo3IGU
z6b)%rCM>PonMz7E%-}F|?y^59xfXZ4QexTBVxsX*IvPeOYU?mtK439MG?#_q?n!&p
z{4}8RI2`fOKCa53KtZlYCfnQ>?~KQhn-EGUJRBzRDu;`jJ_)>61bH7^a#BUdDH7MI
zw66Q}FNdH77?s=2HGIRA%qF!AWz^n@UhI;{Ynla@id@uzx^9`>BXMQKb+_kr0~#qJ
zEdqNf`Y{yt3Jp?ULp?j@q#IpX97o+QZr{OC6=MCH!`3K{qbMwAY%3%bvB1eDbB*>u
zxAM8Cfo9ZZar;-tffFx)5j14$Dt7|R;8kbyNzVb{-a_L4#ytQdU(#NW4h!HPZ0Vh^
zPqTNv=0vNBf7jPgr$C-ElY;#oSBCB!{A}t)m2?MSIw^mo*N>LbtIFqaFk7$a;dGH!
zD0~<>IniVE5Ap$jBHpC^{1^x(CKX;JE_->M81qO^yXbL3S0-?31Q}lN7NNPMTMw;)
zcVx(OON+V|=IoaUwz>1TvsZHULm=`M{eIFh-5PZ=Uq3S{3`X9qAE+u_PEW4w`|
z-CHaqJV;VG;jv_`+%9^(MVccd2WDb%c5%|Xa&*3O!&e%x1)mUP#ekha1FHfLSPRQz
z`D|nTT*Xq9=%Iv@6Su%9HxyzsRl(o^nUlX@4&)x6gbt-Y2R5!gzYBtUL01>^G(CNl%g5eu^PtsZ{
zC&WI%M^m)d>P?1e?g*is3ASgQ?sDC?Hc?58mrMso0~XB0X`mD$iCybS60l=
z4#wH7ItP$|kvFRYs@y=(r`Wo0qSzb^#$DpLKr^64Q-}=;SN{L@4q%foMZUat@(c
zAi+W|JF9t2gbyz`05Ags>IGVu5oQ6&2vKg%GC+@|cGfTamd#OJ-)Lxh$j(@gp&BHT
zUIOY2uZAyDW!;p*HvR)ou8Z4~x@@eC-aeQiwK#h9?5Wvs#GyVpd<=brEw?u&Iw?fT
zs(D{q>Dw#1ADkYlSd3Dp+%(o#75h66md-n+`RF0jN)91)O{#uugB|NwAh?PYDIw8{
z1$*wGPuEd&%MmwGv9EVW}SwyrU
z5Cls4d<`+!R0LbZI4ivGM;>DL)*^^s2_d<`Y}P>f)>PKC1;|z~rrd%w=)SIsCWiy}{yY7sD`y
zOO!W@X+oH%cs3hwbzWaG?G`?CB-zFN08%ra;8L88WJr^v--ezIm!J?WR0EUZxaRB5
zZ61gTgO=L3DjoX_T4qyQS}X77!@WD2%hH~4dI@q0J+m^ye&QwceN0kera>o6hOq>z
zf#N7%;fn8B5GPcpxm&2=A~gcS2d7Vf$vkX4W&=j*?ramE9FSwqcvh*$Ee2@l=$QTO
z@{2LLY<-XS?en-k)2QtD>k1&_fhk>4HMJX%>;=W^n&e-bk1@*Grqp@lE*@j-{;C{8
zFs`qSlgqBBzRkTt6?u$UaI&|Si^R*WeHX9?JJ_EZoWovj*d#j$=3*a<%IWjPCUnA;rF$vi^n)mRyT(*#GoJ4L60cb9Ms3>a=3X)^0S@QXGMYuR
zM)HgX#*hTiN6C$UK_Xa;oi@pCfLpW?!{NX4CV2gixcI3!>F7w8?y>3Y8SC^$E&7Z0
ztbOgK5)FLp$;Sxf4?Hk09
zsu9qEd>$BqYfi!PvY!Fn$HBHb&Q2kAMPqfIf~ga8PXc1YXTUs;XJy30=b(UQP!ctO
z<01p=AF*XM5QQL9p8$KWAps0rJY*ve5|NVa^H9R)&f!tuqY#A^lHN^f~Un|LEHF`96&pjI|9!sB>_}ttm6>Pdk+BWn9+tCv1FvZ(--}
zt(loqPpY-5VZDf0FseI{#_XnP;fIAr{4nCF5GmqeN>@T
zjh~sQpi|=Ihi}yC==HH?C7;c7pJ~ZIHhyWcdQ`D<&k+Uq{XMjn?6TJ01`4+_WZrG1
z7cE(4ZiO;4xqkM}_qH3k^>Tx2YPv+biE*Cu{7mv}uu)FIjf$a9w}qG!&70>(>xEwP
zmP)ibV<@buL|l=~3rl|aIKO{}b%l65M?K_mL4`X#)C#ZP=%1dO<7|jv%^)k-2N_D^
zX-g9@m@LzWUP;noEm*auQv$?LQKoy7cFwtIIYz~ag6*g2JK?SDg5EgIao1NOU&&J0
zVEIxUc0Qf_9-p#k!_;wM?9#}_RRG}sl`^=oaW#sS#q@gcjkwZ{ZSbE3amL^O9H?I%;E79<
zkh>B~tN#MczqzCw{3T!yDt|QvK}?tn>&9YcSl|$W?e~2Ja6`fa11LN{6*{?;pCwr|
z-fbN#gcM+G8xiXE7;S5n;-{e>uj}xOL?g4=9(X~^&BG!R(`_*XAL<2Qw`9_mAq;HI
zJPn_U|KZbkSqBfNq>Ct{J!DaS^YhoozwD%(78E%BMLvD
zdvgo5esl`&9&dyl&mJ;~O6h#+=cjY>{Dhs|?*ko)MVV2CSLAfYYtp9@b+n5X{H3!N
z8b);P>B}%#1Kdn)9sQKkddVF72(kET_5{+x+Py-fG15vj#G`4#>Gb?OZn5Wb6$
zp-dz^Ff=#|pEdT-sQHpbZ_u@
zlWKPGm3xnaP}ZZ4cJZ~Bq8Ir%u_StX^GP?@U3;rty|??1i_p7}M$ShNcO6F7w-@W#
z?W|zU5?nfulcRr3xq*nZ$G@*J%)`q@y&!%GS1|BuaEWlR6uI6jf=YrL>K)MPE5^+>
zO+R>PZJLogQOlW%keTKqeJHC7fID}HAFkcA3Jol{V)%`oAglRovi#}@^ldHb8*((!
z|0bTY)l_zw$v5DwNe=k%`B)H20Cf_`9096k>aregW8#ZPTnt=u3PLy`gQ|$VwOqYy
zV#JrgN!bfL+Eaz7BRz^~`P++W0Canx0>L)MJtq#qRR4PT%j)@h8@G8EX5Vl%M1mE!$_FWgb#zdizk6^+WT-9u^
z3`@EhS5#}1L$=j^<|GyqyuIPVvWcW1%?oOn(SIytF28r>@`e^iPZ(z~b?B1IQrPAve=?e$xZ@v!k^KU+ouW;3ebJER1_{8gJ
zZW(JTh)$V)LUF&ut)E?aSPqx&WQSA*ml9Kl-&Q)~W?iDfHIciD!zbp`;LPk#PDAgr
z0~BD{qMHwebIT};6R=CfL-~ohe6AZlT;t%wuS0bqw%Q+`9*)q;4AY(So)hz*kzk5{
z69-mPAM`w#M`zuYoYp-Fm}>G6=x0FR7BOm7Wn$?{Ah=9ci3LOq6r8W^pTw>L25iX%
zy5z-1Al+c|8K$y4S#7)N3Stn*i90I-
zRIyVM2>@63M85(MLqN{-;gqN2By%bJEdZ;TGVQ#@=z>U8y1X!V63013*|>fS6X-#l
z`wnO8B$t|g{=Vhg*-E~}&ErYyFKE|QK(M8U&klVUXgv?F(t1(@BSex+v)D?A7oJ~A
zvv3A^7X{f9@SCUGmX9bLEzG%S>(UT8OVGQQ=jWRzz6`1|mhrG=Ws`ERRa=o=tG;g_
z95ZK$Taq{xS(=!=uR;4#DWu>uQkDFfJl55??dguxnl$UszQLe&$R$f7^@!J93w$o@
zIayn=-peE-IJI#I-te#{72fU6z7g*kso$S}qkElohWP9II8EOc*>cV9D@SAWaoAI3
zq&;wrw|D0UGlx6QyPb%ryp?fxiZ(?FUruS?3j5g`*>my~{o0RpgsBe4(^1NzrLyiR
zcWgjw|JX&3b8hElJ+}PlP?Q>w6=ZX@g&yFobN?^=&5YJ2r3ka0nn5{6gF|s#T?}dy
ze|HM@brzkb4pUefwcg}q0=QVU>XKX*4&MywK18A%AfbLCtHuKUhl^j=xK8*{yeh){
zwc^N^$iL?tERSEZMC4!+QwH3)&Ym4JjI5p7V|JRSOf6yQG<^;wfpyG4U{U!px;KuK
z7i+8VW3nQT{}3E;)%-H7C#k(=xARyDQR#c;sQtS891|a_Qxo02*&ayp@Q
zcD&hSu5e(HHm6_C7nNUL`T{P{7Xr;0qh_1bG@3&QPz>YFhnm-@XOh_u-{-8Btxx~Q
z(e_WqG13`T1;IIOWnZgM73v7Tyv*^aPQQg&Jo1Nen@zo+8#yfN+hJxSeqU`j_;p~x
zB|wV`I|RWK8Dy4KY$!-A`&>DA>4t|2jBTA(8&C24x!>dmmss3}A~$0)>zc4%{<4K*?BzEDgmb{|(cX(Aj$0&kF8yXhC%tLE*JCvrf3x9CZz36-x
zRhK|Ut4aMOnfS8so~HAw^nCKbbP3aKi#3*A_1v)dI#fG>zP+91ygLILJr^UnJC*L%
zneT`6O`;r@m$8Y`32HAygp;b$3tIKGs?LaI0-Du6C&vjJuSx~`bbOUu;X0n_{55yb
zRXYQTr0!8QG&_y|m3d{MvSDDV@=;t_0J8l?ZB%#IC9Z3r`FyIyivx*g!}XD$76T7)
z{_H#mAvdg$kYZjB(g8Ww)~6M%Yqb76CX_Bdn6|s|!%&6Ec-Zq$2=^+6lJwX!Xzs|D
z7xT|e={KB}VOU%t`US=u!FgBpijW5HTnvZ4Obe-~mDcnK-RS<>M~i#jnV8XX0rCKK
zdpN0|edB>$xFDdgdZrjmPPsla&8Hqj>!MX~ccWA%yYsKa$40M&4AG3zZz
z>P`i(0lW;8tOCg&yW2k8Frxv(pb+O7j(y5r=Sc9#hwQJoE$w;YxwoaAePhOX6)Fml
z@na36pNhYy_>~=}{@>!G?K7cDr)mCvq;ul)pgECMOIA$02CpNC;<=nCT3XNp0(UHKj8i_LPhMW#qK)GtXXrYvOFWTm$F4>+|B@_s!TKuw@;S%5&!4u&8D4O93z;K6GIc^~f7s5oy}VUj=hh~E(131^
z=ZPfS5fV#%DP3u}B>B2!QL(WlDVYBzvBK_9)6tvtH8Hrw9QEqd0LSzTE1ON>0rK;6
zW-{Xmg+oCnnso{8{=vC}F1i<@R6LQ2=VnwDw)h)F47jKHxM&ljynJVF<_D0(>Vznw
zR-AWeN@wq78w0(0OO^X??KJlccfRFm#QmON-M{4_8Kx^ns=!A(*hfRMk;4>1wr0xJ
z&d;Ok)l~xL4a#C(HjHz1bFQ634FrZrJw{gaCEmkGCATMdZ6_W(-EgFjP+NlcvZ_x%Y{K(!eZ5!x
zQRGL?Q+oq8M8el@$^e&;*1{$ZxwZm)1dNKPHF4Z~ZW1#5-FUHfvYKc^$ACpTK;O>)
z3aNj0!)_MlY%M%gHRZQkQz4E1V=MC2CtSYFX!`Y$g1uP#itv#DGU`NK_=J!wXo@`Y
zq-o+@ab_^|w#K=zEn4S)gn>!XERPB%x9s@W?Y;|yzUii5NHfD6F&m6+oDCr7mKgRH
zx;RsgM?~w~SIc&VD7$Abi2yHtgye-7NTdUJ+5+y!&a3mxrECbND}sAX*|lVj9({EP
zB{xTpQKS=JBMgV?b|qyH_>!
zvsK$?)zpQ0k=>))UJ~YeX?I~QtbpS?KkV@K56h_EqK-rlDbQ&K6~nqqz3O84r$L@K
zTNnQ*v-7jsF5}p<7vb|Bo!tti{oIs%u;wEY?h!v02yyGxqDf{Kt2L}~jh&`v_ZEcQ
zU+{vR$ml8Gn$}TU>683ky69bVTWbJ6`Uys>Fnu;qdnGb_!ZW>AxAZ`ILYkbjcBl@r
z@>kWg{3NX9U#&t*-5@T@cQoG)d9x##$tn39*3@dg8-ccC9I?OP+_F&tcl11|PH&(+
zABm3b%%ow^;mO`BH<6VgH+5N7!iAxO^1r6fU#}?5$klSuH^?+QtaDmcbJ+egKC1QO
ze2;j-n8X~*hUqeG?=`w>Pf}Vti%Z_#SiC?8+`=4tM$ss}ej=x8>bj#&Msb{Nc2FAg
zB(*qnpXp|Pv>&{ec6A=9@vvqV3Z?Q{#HmqfH8$#5eyF)?Mu^gAZ0UvK95E7kh4VCS
ze-{5kDPGw`E`ngH$}bbxt?3#Na;pU||3+Rn9;^LdERWUm`UdO&!tA$8%G3>FV(^;m
zao)Yw#o=c|fL8PCn6W_aFOvau4$7|x*HBP8Tx1sC
zE1C0!q9!wGu)f)XkpiWqY~sanW4w8@$&xsH4m~mTk;dWUsZ`s`J+;Nenc04P9RQ*H
znmv;)b9r0=qVeQbJND_d`wz_-YV{oXqIP;?Qy;qrp5i~-^439_*T$EE2S#tCszM$9
zXGC@!_z!qo_ccT1%1^r;6Af)NQjZ4zzznnYeZLp+!8VN@6de+Vjc=|X(V`EwxDqeE
zg4=J4+LMFN|B>7?m9g;DWxrEoZ=#VtDaNtq^>T%wh4o<5;jyYLb*MpXXzc@_AlI?d1|?qxrsvd-c4I*XzWy
z)AzVhY>zSNf#;Y0y4jO#y(!64s$i%u=$LD<^w!Q_yIcKZIDty)YW9ew6zk=tG#8z0
zs6rO8HKqZRj6UX7nPR=@`~KZv1`&K}kAntBu@{Qu_IX7NWD7>E6^bkJ9%n4fvj?>9
zm1H;fJ;1zQJTI7}IRV~32@E03R@MZysLiMhEhgP}5K85>d{I@&-=!-cCAIaL8MxCN?rbzA4uJaU=A=YcGbDaIK
zxf`c<2~)@ABnWA4yyI{&Rmts8pG!IIK&M*>wtObHt~NL$G*0wt*kzv|(lp1TBY~w8
zAa+}B5O5|y30^!|uyc<4%Y5;+rB4sVl6mL9LILbz_+8FGk9x^W%P&8N5A(OcwPBmw
z`6&K&99Q>s{j#Af>O%VR_uvD9zT!jv)3%?L2R@sXRhxWBebt&*AGFVd5_%>ERWzT@
zXi<2=p5T5p)TM@ouk7j3;%n`HS?Hpn^hfv1q4Z;F-PbI}BCd~8eml(bi_V|k*A{Gy
zn7ERfCAg~lmv&X*VCiYK(WBPg6RFrXGU1U&4%IuTrgf)YcahAi7v)!UKW>_>ZAp-+
zT5#_tYsw*~!~^Y2w;fSQ!W@52e)|{X3~qTIR0zl3x9>>!Iso#!Mqy|6|l5W}o>
zzwEs`(1=x?J@ukno-goEkQS_j-t>4E`vrRAvFhNPE?Ul6{{z~{$XzE_CyLmrzt>9tzDPIx8W+XiaEq3+=w9mxdO!VOG~$ROVsPTBwM4~wto$h=)WXeEa64Zo@jpu
z&z0V<|1q=wH6zbf*HH_Z^|`FKK{mn_s`EK5EQJU2H%#O+LEIYUX7>Z6id6q#O}(t1
zKZ>L3+8N`u8L_Q{jW$yusmW10tY3IK_8;@Hxc_IAMXzzvE$i;jE}8W$MH}Myvj=2#
zW4Z7s6{4xu!y7d(+O!0X#{!M{6BjG+aRt;!pQpn{7i_0dHXEX;*WT=EuD8>9oL(0K
zzvF3QkgFb`f8Dn+DW)zz(4YElUQxO}uzMV?sH3*ogVb7bKCb3j;IVB{QNOo3TzD6t
zn`dNwGd<)yUbw@mtK-iSEw%O|S#!l&jNe_is`%3)M9!5B&L=yEJ`lI41a;m+c|4fU
zyaQFN(?8S~&~Xki{~*ZMk2;2EXjM#Sx0R8gnncUJ7~{Kz=BgjEQw8ph<;^~$2h)bLUZ#FfS1aCQ5L@-~wzJ6ZEUV)y&-FF`51C(vLtQT?
zpIB<6ympNHIul%0>hoft>d-_?OHSvPG0Nq4M!^oK*`q-9*`9QjeAAS!xrci48wnAX
zH=S5B_H;EfDK;jn!ArOTEYqVx{K*qgb+@2wvo=e#GDKab)mu45^3TXUH_{i
zke?W(Bh_E<#9E;$c1;Bc6hUMUtySY9xPLD)=GZ7LKqdc84ruQ*jR&(Bteb86(Ee2;!+^72TNZ%)SU!#4L*&8#bHMK4kkw31V;
zvwmV8#c5bh7o2q(e0rqx38&YE%b%G2Tq5P<50)-bpR^mJxfP%MG;7L)X>XmhgN+V)
z=;?qP1*C#+J~G(It?v!0Xqj`9L&P(U@l7NC?ANZ3f*qV~t2gPD(nKrg0Mk|-;XoL$gR?I&Zr@|O^Kuy+^dbG;9^xUSxjh)TeJ7^CHb%*P^C@jWU
zuPG>K8Za$iBsjHHZVfr_v1@5VJxZhGOJ)+|@yg23J
zP>z$#%QD^`z_NJV!qI;r&CH##WLCZXp|wG1zO0-9mFt;hxAd8LE1D_X9Uq%+9Kf^3P|u
zTSV#+VV1X#T43EVU&Yyrff`ro#&@o!hkw1XHKGZE3u_OCd0{?dL{n_oZfZn(gdhRc
zRIuS?O@><;mKsbGP8w_oaeWyEd3|1_b}tMEgv|8{LUO4DS!cJ5eIKtHJ!bsn5I^o}
z1<90F8Q{&szeD7@h^jC)aBK7Tjo;1yhsv}Vora1ZKZPjzZ?!`A1`w}0gzZ~ojy
zYiuZQFG9WbX?TkXrDo{|T1-t@!%vk@3rqU1l-$p+SPWOoKX)_r<#W>M_j!hfsKw~a
zk~fu_ZcPZr5pNgwEE`Bm-`UF;r~PW|Uip)YG*a6wn2n14P5rzF4oPZ*mt`H@(iKlV
zXVzZG@Iq?tc{J?bJATU{JfpR)8C})bUNmuoqMtrLE7Prwikb4&IknBC%mdA>95|Zx
zt3UjLD#C9<-hyH5kJ4P6hl_7N^rP7mTZ~d-w_VeHpLp%Ctko|Ne`0x~eMnG07anvI;m$LFg0DU5d
zTwJ`XNuP1{3PbHBVlR!OLc2>4*sZ8sZzu+8)Nc8O&j_7lFU;*e?N2Ma+|uripPCHc
zny|ifv2H(#ySl&r->>D;W54|S?B7Q#)2W)BSt=J4`s*DiUYxH;;#aS0nfo-jr3
z;&xf;?12A^27lIiop)tz&_JDbzQ*$}FQxu6K{h5g&wr+bn_87Sm)sJUen>dkgqbh4
z!Lefs-Q{s!1beyVVXYPRc#qB#+fV8JWG(p%eB9P(v+RG-DYS4`T
zGN?Y~2#?XU*pI8Q5f5!?o2(2qJuN2Im`s5sRV|Z@vRv#o9)o>P!e1M;6~Mt7pXf~5
zy2_-CEQM7H1-E`gu9c#cgXrtOH3>ZlxPsj{H-K5VEQR
zuqgjEV4nokMi?d7Bk6vVExuV^&JKfMT66SMpm3fzwa}pE*;E77OewcJ+Az#DpP
zI`t`}lQU~0bHU(3%9}W^k5npdevR$CpH9ES20wFjp@OMf>w7NPqYHkH{ALSv6rtel4%Rpvn{7(Zz>pETPW`S5C&&yyli23*?9*
zle-P}1S|1H^+*M*{MNei*&(;;xWg_=iLDVl@p)m3N6pGcE`}!~{cm)FM{hLUw}030
zuajk5LbOli<7k64G$BZ}24?M@Z|8Nk_@rYyPWaO>q?5fV+JstFt$WjZnN*lD*j(6(N_3gGBe5#gVvZ$DMAYFer;m#F5
z(ff3ly!=ICJ2hc$x2C*V>CHnoey?io3&FN1Qa*M0Mi4Ig`uFJM9*Q@faEX)4P!Ben
zu<2%Y6{tNu3J2VvSnHUUr`}5kPkZ2QmqaOTsk)xCvq}%4y<2AHjFWBsr$-%W4LA1J
z68t*ic@>qYQ*9_UshtTyJ^}h
zo4b_2_u7dwE^dKlgKBdl`pqjO!tYEdfxB2kvzOzw8cxJIFb>}hT
z&1z(oJzpK^rx}lWSiEG~8RXlYC+k91
z5AUr*`bBTg+grfTmgzKY59Kcu^A7*bpv5ajNG@v<|I5+U7GT@Ac4#ZOUk+S(R$hfA
zaBB^gfa>TssQ_+-gsfg@?fgYdm*F9L_mCYyaF>(1u{#{V19M>i+B1^$nFt(BA4QS}lUW8dx>9Ih05A?o>
zfv5-vZVNx?8qqS_+{Ux!ZpXeG#e&p>vjFu?Bg=TUh9O}z_T5hgOXX(6wglK4mMYo`Llu;pN8$`BEXz!#2jtTiN~9Ftin_
zF$7HPe@PEhh^L$kVWDU>;pbVG&oqcR*EqD(hZ4TWGH_1GKm9++*FT{olhbKnk$4SC
zGmU9=89nmBxo75=@O-`$NO|9e!1SU^VLo}{m8c$F&qiH(s+}h
zo#oTx)Lc*711RgvjZZ^0)I(a%Y1}p@a<7fTsgyXicc#Es@IOj2RviHlR=yG=nQmb&&^<4DRb62kz%Ad>&`F$)z
zF}g`US0n0rRN`iJ1xl@lP5w<}d7b-is(Y
zzRjvJS+o{}Q2Pe_71)8HU8Fku{li=rEwb;wP0sN6wdK6O
z1u{ptqR#_rn-$7qB)v7YrMqlniFv;U(~QE`Xj*`D#L-K4?lkQa5xvwv&fIn*PyOZ4
z94i*iL&@*DrXl)byV46k@lxLh)o|pI%Tk9WvZ*G4t~O_>DahCN*9^y$kJv*al~R^1
zefwh@pqWk37Oc_hGFFW^`L~FXYa3!u-x({7iEw__np7~9Z_hdI8VAeVWLVCwtYttx
zjFP;>VIH`YX;6FkD~4XZpCL}|M=AD%?MTol4M?8z*u>v!5gSnOD9~tRYyr-#ZJDk6
zpOO>!6N&$cxZjl96x^=RjP&D|{91`%NA2fFdo{ha#tg*{>x|3EF?h6~iKh=6cX%a@
z4ep>(ADqtnGjOjn{nhnL@Z&pU?sYVIcMg`iYuSDoa|9TUr1omUiw}k8zK)tdz8_Kg
zDnR#&Z%pc073=v%yi*25uO_bZ(BT!g%cIC}sMhJytimf(74@dRlfggF*DA6+LvK`^
zecvvv
zso;D7AqVRt;2k~yyOOhdf}x&{=A;K$^+G-Y*PCwr3^RmNo`&n{`H
zI@sG=-5t!L8sF&YmCUEvKF{l0no}h$J*kxOrab-PL<*Wbe*3)P1by%NR=1gJ?y3jv
z$di{|BK|BO{8ipgVNe$Psx9)vmP(46Tr$cmn0fm9hSDG*3l%7}l7f#C$3vl<0j;E&
zPMzkI@+pDKP~&~k$eGuA&VDl66i$D#l^+W%>6FE(r1`5Cdrfs}Y34cd8-~#ZYqPYh
zZ<{+TMJgx|!OmTH%S|Ai-~17jB6xf`h+xR{{za!3t564%35Ng5x@{IJcaj(W0IsgW
z{=a#SL`q!nCy`lpYrzMHGR$fm4npv@%V)hC`RdrCgs7vQ*jZ08q59;6jNoQB?>kca
zc32pBq;W{C=%{J_ta;r}E7`o-;}v@C8=ii
zJkia6x0g}KgtOfqLuagj*~h^^HZY6_--q@&7#xZ;brt*n=(dC>MlEe
zPr7$TZYi;C9P^lK%Y55)(`HtjX*<*uVy!qIbD)gXI#JBboEGfu)$wg?y-hqFL0v!|
zrwGOjA2?5?4JS;wrx(<;F*8S-qvF1d4&dy^M@OhpLDE7egfZ+Ix`36+Fj2S=Ba-hu
z<7|*&yNfo`?kP-VT_g98W5ubo0#>7riE$okYEO>!BjkCNyCb>*xEw}(gM(+FjO5{F2hS9d#xnp%*sqD!AQ<
z6Yt!8ioSBZ)nq7fsd0D#KVv~JX8Ers<8cu0>DS7@|W#_R)2%P$x-`_r%(={@i6bFY8%Xb7J!bbpX2(#pi(H);H44HcJ0r%@SpnDn~x+xFSJs9dA
zwtTUS>?b%J+~rL5J8#drKM1{y9<_*NTMboyc@`wiP546ZmS!H9kj?*p*!u3Mrn2?zajatj3q`sp
zA}CdQSCJ-7KiJ%lFxDe_r2Md-P?e2mU(l13xz|cBk08*9P2fSC*h?LtW8Md&e)yadc<2
z?`2b}>PIar4ZDV_P$58S_8@Adiyb~L4RFN+%d@Lc*E{DLX4RnrCyFPt*CT4$Aa$F1BvW#vSC<1aqluY=ksvdU=<*`!eo!RzzNj+)mwT6b%HE
zuQtOKm<-|qJGM2|Qq!I%?c_vK@C|~s0~AbkQ67!A6|7wol7=PCZpkfzPhW|&7T-zQ
znT@Rb9*j0>mGrm_7&BFAVnE%o3lppjfIdPEtba_MjHORH+-3
zWNP?UELut%i_Luu8076ADKb%i-uQZy!
z>C87;Jzx?o5Ug79`9z;#Onv?3?m(cy(U#uJI_IFZKdLxH)61_Hv_!ML%6O7)t&nqH
zx1(ZGKNdo$pFs}R+|6LIVL(4|t*@^;r~Vp@h8FQM6$&iL
z?Y5^eh_5lrB0UQmJ7Gt|(4WFWHE8+-lu;G~93MV_v
zSw6gZsaZk!^3w6HSEOV{?pYP|So=x)tY!}RuY9Ns42+wb
z<1WvDA{Mgn&%54EJFg<-lo;uqb6^~jIxSb|xzAZC7{6jxN0`w7d-u*#cxaeDe-(=3
zfcjg2Z?FaaJuQAf;aNo9MO06L1+1Xae+F%b^C+JMgwIg&tMA0^_BxxBP4T6r_74M?WwmndAPwX@1s{s
z@xI|sKWy2*%2zXZ(3dAR2B8dM9P>d!4vJb5_i&5KiZaT1tg6pPa0Wuv>;{
zYyGFFVQJzsl8-i84;|xk3#|)De{S(e?y{acBF*7MAu;WdgPeZR&9`f@c@Ksb;LF{@
z=tAo-ERegjr~R$0jl+vNnfcP}NKNT7*U2b}p|>Z307e2Yo)uY*b*@jaH~#s(NuN_7
z9mo~my*wK8m5|H+#vK|&S@D*>R%L0sr|N$5X@pnm8<`C^o#};g@nko)+N$}D)w65W
zf|+bnQbob^%x!Gqz+%77XDJ)J6G-`_1k~-^i5i7GiH`~_Mh!!XHA8mxBE?-^D|o*;
zQ>W}Sv`^KxU)M_$m33PNr#k^IW2T^6_?9EE7;je{!oGa=I@?+kL_@oQt8bva(%0=%
z00Cbm=RbC@HE+?v{>?{NKpZ-ZI~0dE)#WCh-T%~z<*IcXzI@3~ho!X9V=7zU{`=+0
zV6LE?B7bb1I8WJ(TRVrXe3%Yhtb^xSB1zgX(tFKq1>1x;Iu(h>TlAAF8Y0MZtdu
z6GWF(8vedU<^0{q@hd>VeTm{9=)_6?DkzwQh)ln~xFf*>2QIr!(z3fp+6L1;YgYZS
zH72rFT<-=1*VdhrE4Z7
zKEi6~ACJyb!aWh)FPSm2#->5rCFc8;^+eVb^I+|vb3J8#~DEro}d(f+NXddc~Wpi`ajDgzEpPodres>{>z3_(t?4+
zXsj_n(jG0t(%1dy5;w)F*7i9_NCGmEfi?y5Ch~iPie4s{EgReCUlJ*=`t0SaFs-8D
zmM4cc)d{;=3~yQX+G`!_E-y)BKbwPZ8dVsREnKR;4=cX$z3YLo*u({$?0cT}CeLM2
z3Q^38gW=%?^YE3}59ltTUa)hrfM=JX=a$i9z6n;_xTB**hDcwYK8&bgOzFOsBok$R<}$?f5EbX%W0avhHp7IP?xO`f>2lpV;hU{%p&3N~u
z&&H%#_r_Tusn49Np~D&goeT7s_os;u~DUK
zeP1~&)^)C@>78Rgr-E`A&Nq|P}v?8dNfZPA7zHvie`l3dXopyqPV_3UY^Np
z4FBl8oSuXW0i9b?EO>irW{2E~>3d?e1;n?fnkdH6tp&%n+;!X;Zkt|fuZBoqZKX9H
zF;zM*O{bb@OT&8%mIV0hZVM&JP>Xr|A6^bEez^W_{=9TH0(Gmbl-XnlNtzwMeVsJ``NX5gNSj@Uv
zaZkMd|8aVD>oWfBsQ&{foJ1$7=
zW|Wz3jaiYDW$O&$xw9@La+UTxe7w>4jWh0ad4_?zjsqema41_b(efi)LAW>Mg2bcY
zz6PXwGhcF1{kSLMb#LQS+W1TZ{BpyX&;!et3xl%0Q+Ai7ZoUn1fhjmWYV#BEm1xMU
zO{iw`4L|+k>(@%A2uONU+C`n#dc3{-4v`ZFr2$CKFrA2=BTyXz1
za|G~*KoN*Wbzg-ppcjCm%um9b+q?Q4VOwh!nb6b8y;QG0@*WG=8A+bd1n?
zS|11zKt4@*^hmHi0Hi0gjVEA@^u5fLfHwG?Sr7pk#2__P$RNJ!J|gNvIaB6*@}l
zOo1B%0=bb5>%>}rayMoEiGEO%PY3Ik#Gk!dPST8dmH5XRtV%kQgNWu-k2&
z#2oi-Zu*el?Orv%N&KtIWS={q8u#Q3B-iSzV@oj<;~j&G7QhX0)7vP8A@LW^!S
z@k@pwO)7($eM9QZ?Mu9r*lx&+EiSzVK_;~s1{g%Z>-!T1;yn(jo5LT2KymzP@((#b
zy>8jxXlK_+^=Qzdu?T0DweuY3V0`w{RfOLUh-uA{U+ymlM@$d&nwO7%K?aI_Cau
zd$qgG!0fh)QSx0Wrw-%!%9u<
zWKqxnKX6D9rwZ}cIswXy&6tz(R$Ojn1XBr6s5%~Mr|0iW5gC;Hu@!qVN3p?jKZp~*
z{gmpAsg$F|3j+6*9$=>3a+^9gu@wkPjl8r|yoR89T97Vc$rZoNpF?6;+Vi8OS1fK%
zQD^*cV2A_{UQw7H%6MNTY+cIS2GkNSeU8{AUyluZrxbLKVo|+h6Cy%K0LJ$&%`b+Y
zTRDJ&`z!iN0qs-N+CjGQz)Lt2M`b49=qdr4?1mdZESPJdei&fS>EHHNZd0%SYdmRtaVGq~JA(X`P0AJ)+d
z+EXVh$tY2eUF}vd$!2{;Wlar1AqmDc-s=yngrMy@Q`);glxIJs7b!I5wmwFi?Ph6s
zY9A+!pm4Gc3>Ew=?)75E|8$LMqT{U?PCHGZ%x4%9MfI7zT88$}0tLfX^|U&4Cl6a2
zOFMfXI`K663K@5Q&AK_4z3Xc4GE827m`N+u@KoSiUO$@vov?Y%g1yhzJF4;rjc{QX
z5~sHZ{r?a3nX2`_E0YU72~J)Zr#}i1CeEDmx-Me`nbEx4e(nw)*VcpNExn;T4cyMt
zY#WVv*(EThnBacPkCwOGwa2Acjiilutx^M(wMBvy$>v=w-e#jumwbX`uD9n2Z{&Mu
zxjn1-E_bf_k%uO})>_0SOm+C`kbod1Gio^Vtpu!RU@7JF2^)sU#{~VAZ{2gz>{$|z
zZU#&(8UkDQMy_v5)s)cvj!)$NLB*AY)8ga}SFu8VW6wR2(n3c>?bGT^zvaNoGHj-y
z*_gERKczB4ta>f=N^&wib6zz~pN!(S<+0ybU0ngir#_T_cvNN)UJN#FDJrrJYb$5N2p908&%WIDXGhe$5m??D|98B$b(s_E*#Dze;?kkyv&SGp`?Y#w_VOk?NVA1
z07H&r)}hy=Y>_0VAv#nF|7C1M
zWO0W|fwo!pzaH{1;~osy5a6o0FIW_Y^|iQ`g&OMij))rWksxdrMaV@-MoOwkxYnkl
z!u?mpCW6QzWuO564?)vOas`VlC`v&&-jC8Jt;ek4I^SL(xtid;lu%)V>|1WmxP=ZG
zZt;0%_i!0gb4GM%?Q-92UL_)_pRm}p=A|tcj`p3*?xqjo`h{)B`|XQ@o9SkuuUb5^
zZO52Xj@31_R)*4CR&PT|-JzKTdJxg8+od@o=3;yFBDxEUPBhvF1fan`uhEn0VJjFO
z*G@c9lOwt~S#j2zTZ-C}f@61z3QyV|hlB~B9xMS`KcF6zxrsQGp&sc3!9W#x-6X-N
zP*7f76xQM#O}9%j$JfJ%#BhB?R2d8t_53o`f5OJ-<^1zpWp94z=f`%{Wp53$$PohY5GHdQcNX$+u!bDv^
zCzCL%!ze^XW$9hL+vv>`5>9U<0t+&muWXIPn!rk@GIxLqC9!O@kDjo=mH-CmWWx?h
zu7w=h|M_VTb7C0w;Wj{0U%t$%Sr@<4lAV7yDUhwuxJm%;_Z+$2eXpyyd;;^>zVykk
z9fOwZ=Y{X#ar!;OJsgIG!QJzoLDLoC=Iww;>~6iapEw_Ta+|z_q>X`DG#?KpEqN!@
zZBpu3O-cOKC)qdE*;g03bmhQ?z&y~+qO$OWc3&?2fgU{y{
zrS0HoyHI{9J-Dfc%t9SpIlZ%PGgI6@aA7;CD|ExYd1ArM+qDa!SW_}(n{5rT_|bM@
zsts~cm$Otc$Q@o+um-NYG3D=FGagTh-6DCc25-ALcU$bc=esHBt$W{aQWRMAc~sM;
zusV-uuN^H{8Le>oC1K&5RY-f^w%0LyU}1@nFv4grW2XH5d4rML*lu=B*!_}D;G8#eZyp2dPOKn*Lm!bc;G|Lzz2u{F@BCw
z!db5%)dxnFdoQDO;XwKc(sm@CZLKyuUbsUpw(f&gnG4lOURlBZil@5L*N0E#MXPg-
zvaV}b)z9GhAg8>-kW>
zl=Ip*;i4t|-@chT*^Q-tn3l?Pi^W1{#GV*b1i!z(&T3gKl_8dt-Sd+b;B}+GH>xO*
z1>SvXkN;@rCw=E!Pj|jb+*6Tvyi=E)Vjklr8`CV1`bh+u=VuXwyDlxAV8g0SuTe0*
z)3j2PRqXb>R$TcFk5Ne80v0M~{BYteACJ{XxF5w!NbQ}f!JMCJ5Tp_d;MIGYuNG4W
zn!3W{>lsTP)-SHEqy)(7R5s9!gX2l?ZTNL&^?HbhN*g48j@XE?mq`;IqVwze+}xWa
z9-Ym4HAn@KjejOU1Wnh#azOoup@8#H$;O-+T{;5@uLrjg@&k@pub8KS3@#h|eOkDZa?v9G@W&scJ<D
z6GMUvDY4nZp5&~mXh5W?qFFRU>D>`OprNV&YB&At`;H7vGljNtenVuaMBHfvioHgl
zGY6c#{HQhc?M>+3z3)6>Lpkg&u>+0ueonQ_r$xG{-S*86v=8`@{YA3YtEh~#e}yN?
z+{;^h_JRm^B`QCrsEkAaW1DOojd#9`duZ6x8E;3`y8>K8mUNf;`EYQq`DRx|nRqKy
ziiV>Pg1yhIf<<*;cVvG6Gr|6+^gI-InX~@Hb3YWOQ9_6_ogFQwWO~vKiLSON<%h07
z*krl8-KL~Q-EtqtmD9)V@ZrQw=W_z{K41u1_$K>+aRLC>S5UX}jEK?>teE(t${yV)
z)1woUYXBShECJc|=i7&ie`zHiP}bpazp|J#&(^Lbm6udEy4W9hK8KQi;3gd$|B5#b
z>In!czbn~zD8#Nq#ZWAF*wYCeaBTQbZ!$f6Cb6Q@R-j4&;M>f(CuOjakhhP>AFVhP
zK(y|1EExC+!*<#wjoK6#=PdW(RnJ_7S;Zc3(moe+zLKi}kNEW7YubdE2W@N9HBllS
zkEORsTZubx+!~eA+m360`A(jJJR=2JcVuM!FcSjiknYaALU>HvG$9l4+#sGVEG_Mk
zW+21k=+(-zse=GsE2U|>O$Q?OYlvK#woCc(RGWK8oGEdtXcw)c{n}BXsg8Ak>toxk
zddV!0>#z1gzEppZwjgV*zt`PPPS~}I68&5Tjv4`HKDx1K8;c5S1UxFzi~NK?YD^1_
z;qpH%jKNu0GW;Ln3B{fo+BN=vQxTG9&pv5(lB_6a)|4(t&k>J}&x)&&(*yL&>q)|g
zuJ%~4O3h@;gNj^Bk-ty;nYA%ZgB-7tC;FD^rciLBLWDQUbrQhM;?Glhg_FVG}owDZiV
z3W;0*E*2HE7qXJx8JOIZV3KU)_}2MEl+$xWriirtj?SeP{$Og;_@q4iOODIjm(QZ}
zT_wE*ibKX%JYF45KNDvcx3xUo=k^ncB-2wl
zxq|a#BC=&`+Qt|_kO=~|Y%eReS7^P@5UWSORkHuG)3jGV`0@6_?H_kr(1&iuFLGlj
z_^<#U1lIOAEWn2y%pLh{Cj}!uZ6D?){lRkdwQ0;D+4--Bhq;!&cY+8s(GpsK#Cq@;
zq~2Pkyy?!C{&HJmm-7X?P!obzO5+W3qZOPo-d(UT*_Zdi^GbQ!2f4}2r+UauPx3%8
zBpw2bthG_xX)9VC8`+XCQm%Xg4w^c{Gx;HO!}z}3S}bdaClcIHhp&pO_2m+be
z8!Qa#K~X;dQgg&Ba}dlAfUM-8L)c39wb_8TJaOX+sR{&$jLT$Mn0;x=l>6%2YnAI+
z@1!H^n*Q02vXQ|0PYv@6PA_a;OT_emT;<^sQA2L7zyj{u7;hoj?6dASFR@Excsj}0
zc}}NWC1*(aoL1EnTytpe{`P&MSe>4s+RySj-Pxf5DlmwrnhiNfVWZGWz#wBfwNnh}
zt;>(v0TUV@vn@lY>X5@{7(MDeQ?-l=BiW1Q)&91
zjqvBPT)pl`&&?`r0LwF*4>=^vc%9r!kJ62}SyK1-f`GB49P@oWnTno2w4?cX20pLG
z4{5Fjjpq08^vX^SorOU{R7%7VGcwS>fk?1PxxTr#YqiWHsAHTe~_h%36|AES!~EA#p{cJCEqMIy*zfw98fV~v9<@!8P9P7cMD-DS}REoi;*%!dkd6jO_o=KcakPE42J
zy*Xcx0bAydD$JuqiBlHsZ8t00Y+UHgUqwy)S09fNa`^3fxcwGHqaM#-gK@Qnc3p0#O>obI88?+xJ=C`-R=#
z@M~EGx(`9$@H8m>1DeD@3x&g=`@y!`rvDl0<%sf*!BZaHyiiJ2;iI%%>85u!)w;XV
z)W{z5E}IVS_T=iyzyakh%7jg?w~{%q=q^QYoLazxi?%41>~9UngIbrs%sOKL#&4+WbuHm@y>er7
zZa2CC*U)mIwD-qT(l$obcA;j2@K0*-X+Jzhao2l^EM0-ssNFpcq0BPW;|_rO;OwP%
zQ%N&Wo$gvbW-~ZWiq&AoD=>=4f6yl6X|37@hR7^4>(0~>8v(XsJHad_l7U``_Cg<9
z-Il6H4uoNLwW486u}DMkYQZXg-3&Wz8x3
z*@H{o0`?YopI}!1T*toE)=rD9T?}3^2c~^{Ckmmeh3-LL&
zWR{({hi~N|`n2e}+A;yRU$NXzy*_5ZpstQ=Gm=zkK30>jX7_R-*|OR@`C2VVW-++p
zeRZ8OH-nLYIVaAob0IyAMRQEO+f>I)H?zWzBAlUaRWM=n40l>Enh7A(#y!_@wI}V-
zu51ihd@*4XLtZzJdvZKesgHc#Zm~TN
z4~{j+Vlf>Hz(GwU#Mv%(ZLvZm;B}XAjG$eLPgBUg>j@uHDT2C>fvSf!cl`i3_&Go7
zdQP9t;+%r9Cj%e1-stAoCW`L^-kHEZ
zuw9kIKskNHi{x4p$O~g8A>H8f?05>)lr!2Ls{^-~i4?9xrXq&Cv?vfH$+8^;odaEp
zyBICQqk7k|Z6|N-vB@(~2x)+fq(A~S-e7=IW6Mc9D@e%RXQK5Pku=I)HzRVFMhg{SadXo*g@y2~RkhJ~#-GvG&oj&s
z8CI*yjCQ%_T02`>Ek(fIJoMunWnnt&(31M9EQz%2%tdMoXVIOZ4$H842GK{#tbz1U
zsU6>X5?y=XZ4@=ioeqd@qQY57u-1GeCzOP<%}jS&k=O&+5ARKT{u4q$iftwT)QG?B
z$(!w$C8CxydUdWPv$gOQKCkA(rw5be+c@+p-9JLh(tIxiK3zgK98WIx&Bdgd{xKn|
zzZzs6ofu~w&96MOc7u=`!s{SG()yV3V=m6SMs1#jPJuSiA4?!4#k{>&WiNHJ`uW7?
z?-{I83(Cuo958qUs}K9Z|tcK%aJ
zEIKHnAcby1H&&~}bta+uiXHq|bZT4W%$m~%>a&|t>jwCp-2WW>(s#D4+F*Q#3`}U4
zQOQwm{rJ*+o%%%#$5!*DIF`6xTiV~tezY~wz?_3PVTmgr5{37ehaGHTQC=IF#z+P?m5bEd3iD&{-Cb)l1F
zwd0CW*X_San*hwmSIIHPLSr>2(&<(m>L>-IK>xR!yhO{WqB}oO83vL+f_}jh&$e`e!&NT4!Dx^-q16wg5g$=HSKYX+2u7E;vV`Tqmtt%ZQt1o
z6>*;CFaFk>H|@7hBr@*M(r_mkBBhufbAMPRi|%E;O15Mu`D3g4d%n$${>9{zfy&Uy1_<5w^H}lr+xPJ1J9kf@2Zoc6Z`5(qT-jc3AIl?g((6MbVapi
zi$UUC$OGaz6G>h=P!rH5JAl*Z!lK)*dm}JqzUs1KsCM4M!((Rkie(q^o9*q&-IC}u
z2iI+DWi5f&vUL+2M?nRc<}Kf<@Lf2gm`jCHYpvj9G4t>itC=1&Qe_`FOm+I^2#xIe
zTz+~&T-4}N0}NTYy_|u|_-pD~fx_MiRe3;+$z1V0xJR41=fJ6TSXh&CCUFT^iG`^E
z_9*L{vJjd?7w7@(K8%C9=X@(xD;qWeMn$6r>Lfs`#4>RkE}~ID1n^ufZFYmS)q{l`ia{dY-?182TvG!*DSDDRqfv5ONSJJ4n~ix)I3zkl=$`
zXgvJ~L{FB|oXLlyB*5C>3?+=wOlgMCDI(*zrC3y0V3
zi7c>8zGiUACRjcLlMH%+dqfnCOqkXO{e`9YUSIP@gl<9_z2wJ$;CkR*?yY=FyO0zS
z#47t~K6-^@ai5w<_xy+B?{MohfadfnfWhzxdR>W5U9hbXaoz-~_K;9;$_4ULES02A
z2@Riajc3sqgDK4sIWZe95B7!Bj&55UQ0lL-n9WQ!1C+N)jlhj|U7qqg)0(kXJWPrV
z;*>T$a!7Vpt-kYE5QN^pGVCm^PiNTshd^(jvH+9E{>{eOKqZbPbm~|T#G0CGi$wg!
zN3`QS5BOD!=t-5mkU@UQw)|emx|hXkvud_jzM33E$%(A#fC7#-#m7`xYzP)8l>Llb
zeiPNyLZ7|%=i;lL47mGgwdm3PZZb_)<^7nB0IPQW`Fv}&q1+`O`Ry||EUFybL^r|n
zA?Xzj7E(Q98=81>fFfj^;zm+>P-(jr>9a2`DQjgDwg)#bJhC`;?7^8UeXGs4ve%G{
z0X%b+?1rI4@>8e66NX^r9!~{&PuxvC%w!tW*xqrz1#MdO=l962JEc;~z5LGyAC(~n
zQ7sy7(#~kdtHV;!Zs&fQ!)xJO?b0d{%)wM+pBUBdE0sv;#TCrLylI6rZ0uQudGvIv
z3N~iiJPe}0oDatE(G+e#`B=H};lhxNy(KNXDoCGSZ!{cxt^|O#QxXv1M
z)Vm7>OA|3{RX+@XmYyJnhjbp#X9YjEJDC8D^qUnYSO_V<7;Uqo$
zbx34J1H7eIhJj8JXsy%++r>6*OwQf;${9BTj~A_-LYdAb0Z+5cfnB-6Q(zENrM
z7Dxn^inI{PF59KhWQc7qtyHuu#LT9;k5&b2WuF#H12FXZpR!X+ZolHapGYVV14fI(
zjP^F}LTl2C16w{fGG5m!_J=QS1&*#`%3)M;^tdQD7#>h4P(24FzUfZZ?t`4kfH!da
zGAg`9C7Hfle6GYp6P^o2EByt**T8}!AHgnqwiSd+%b8Y
zTNme-wrzzLbYkKn+nLTr=@oCVo)%a|AuZY-R_M#>(q8BHu-~E0*yYXjjUhO$hAIw<
zA`pn}o1I^@J_u$WglGrAwRfsnZq-YtJ=!;=`_=Z}CRznKOP@Sy-ce?jtPl39*sPAq
ztk;h)=sl<(3)&~ZGx5X9q_mh1pY9Ek8m1NYXH{+h%9(XO1F5j{Ho8p=%lfbZomI5%
z%E;2qCs<_{kn_)X^YSC6Li_HaBbTwx97gfm=FQw)sP*eyx%pg8K6
zm8?Xg3;kd1S-`t7=XXO&x1?(B@*q$&xq<1fhwxgJ#%z^yB=d^;A{GtuN)AkrQ9=O8
z2H8_g&dA+4cj9TJibG9fujEihNsb;A2BR?wnsC*GT7#bB;ha)&Ag8Z*8QIVvN4f|b!|F?;C
z=tmqd0D=CbAcOu^D-8!zfB);HpCf=%_2~MFhCx6(Pt8xXJksZDfP)x|Q5fP#*E+MV
za^v1|uBR3ma*HdAs*h~MK}YP1cdAUu`lT!~%9AtO1ac475dd1}u1sRAdf_#WsUR;lxFzjCc~)^p;+DpZ
z5b|_uCCRhf7&{Iz%b22Q8S*Yj0hfk_->(6~{||b929iKO{;fJ^*W~ViW&g%?N@hm%
zdfODU+4?p%G8U`NA(bMhKQzTp>RP(IBAl$_INWGnyJt}!YW@}@{Jq-sR5_Uwl$@cP
z-hZ>|z1PzgbFh^xo@7Q(HL=`D6*spH9QxK1srrT^PBcx5U{H{UZ4{2+{E^>N?BJd<
z^|rrXU_ZNe+t!Z|WZ|!_N$&?WQ2y@Q!g@xmJHH$896zr-A&Sz^3#iJJDRD8n`95Tn
zAqd-NQ*1dU%X}NyHe)RO$#U+&dKTzLowlizy>3(Q7~S2Ee3
zl-x~ak3U{hXZx{`h6`<`yOeln`6LzNvqsk5LY>}w$}pq(EIMs|=STfy*W&pKsKO()
z41FX;ySY-`L8T=0kX|OqHCb6?@}Q}FvF%hUnmRKLa|(2dD8j_P%;K@YIbUkQnx0#HHDqqC
z(N`HX<5H1|g6UR(hbJdnx892sfrF)uy@g@KEKn(lv$E0?xPdkA|pV5wx1
zlE-|A7fw|{<3e#bKUKse(KRQw_s$QJRP)v`ARCBW7y6U^*BF
zoRhCxixVzSDSAhaMEn8UPSv^17%2=zf=wulFA)Xc1`tCFeN@ALfM}ygSBUg3?q744
z{|Az^+eJ!7^lk13C3U}ipaUHGSsnD5{^xZ%`4Q{_h337iLUgZXH~zLvD{{}Q3%EbU
z2vJ(KVtZCuwDnBG^X9q98P85S#M;(^fGLeU)*$H8jK9C|S}5ZYXuMjsyMAjoHYdKj
zL~~2ZYU}9hdp%W`y*;)_%`5@+RDb#Yo}e8QuZQe@?qIyaB%Iq?mt()Tu!0)b+KObF70UtZ(FrjZy}#GJ
zR$RroqMnvjFGcb$2Vc}eC1oRaVDD;4iY{ml&-q^B1;vl8N+wQ&WL)$i5Ww{NBf1
z1$|}Hjol2Gkx*gTp3pMd5SWjOr_|(#6w=oH1;djXP>xaVqfhB3BK;_gL(EsQsHJL)
zWOC}n4wuijPHR6-il=*rHlWczwDc<~-K2F5uU&8RnI3F_7K}LQYEUo=!kI#aAFEh&
zsZ??j%Afwf^R59}djDuqbZa;nG00RlUd;`BfHpihE1y$=(L6C9eaf)b(gH@;1Xbhf
z1iW^I5d-hnc3M<4?8}qcU$rrIc?3~g^oADB4>HVXT#}V*r8k3%OW|+hMI^Q+f!o%N
z;JEh6#rG$yU26+(lvJ_av
z!#i%8SZJpeXY{lI@KbgnuH7@FI&{D!HG%5&Fb9Xv~OU@Jzw
zyk*hUmF6Y9@&`}h$`lN2nd|e^KG}AYVSR>%2T{PHf#p_b6&U6xCA{52CE#>EGZ7wI
zHZ_4hny6O{5M24mzb4cVIL(*IO??*h0Jh@q!9bn<3+F_tt2>i7oLwDN0GUTNwc%%4
zabuV7M`X`*2fOm6qcn=2XvtBtYm?)%_Bh$Y9?%g-Cs^8E+i(8sw>G44{%@G=7f8W~
zgTd=zx(74^0{RC5V8ZhGwy@HIGwVDHn41Sg2WhH1`lX}k?-}7@nhC!+=SCP
z=iQWPD#0`ahlW>i@Z5!z%<0WPdy6+7QPfe`t3vru;$@8zp586n}ngS
zn&eAFBI
zRK9|0&EB1W5iAC7)&G4LoVZsfDnAz}{m#8wT4$yojX}Ov!Ys3X0`7C{*(N)`OrJjN4!uJNrNHtGDut)Fc`>1UIXE<3cJ7_n!sHyu@I
zVwODVI}UbLFi<)hA1j##4?Mz~&H7XTzThXX|HE*b{hZCrU(PHiO3YQ5XG2s^qQA>x
z61JLqB%6?oT%CXdZxuqM#QB1_%1W5=hhRSV@@bi;NA)I;S&hs(=OPIfepN{EljZ$b#t(ku}@joq9dEC;`R
z2ooK1z&y2MzJ+;8xID|LD=%uqa1l1J;LZE`{Jm5E`?Jq>Z{FdbOaI-mKVihqmL&+c
zx6)4eI-Ae+0
zF@&F1t}JjGj*uO)J-;=1_0h4}YXXo=uA;<
z*r|6g6pO*lkMh{;WAHhX@=K?{YTgS1^-`z5nO%J@6=cdhd|JB_$NT-n
zPzB$&6ZnNx7SbyLwqwJFlW#V-UJu?EItO}~`Wnjp!2Zp*>xorxlSt;-=C^m)yTl3w
z(%-B_$(r$AE`CkC={piq_j7z^rslkm8*t6w;xQo-KKt$W?28~cmbcfW-|Y2JNT{;u5F7g
z0%(35cJ^Zc;7R}5M7S}3>J5CVZ-4lyZGVGS6C=_0CZ~9d_EP6wJf8hFXQ7EOHdm$0
zERYj1(=Cv-&%n->jlC_dc!FYW!UvW`yrY_>W-c}E@qQm!d?HhV%0_#)?Xb_fm#5Oc
z>%1>Pm5Kp%sm!Cq=noFBY|w6(53WF(64YW_`i9@@Dj+}%&1?N|4|o%H`0n8&=+y0l
zk2*Wh_xiHU)ATRWs!t4e3ycDmHxsPip!+z|QCgDOW5M->6+L%m{GNn9J8iT$d|%F5
zBkacD(_|OIyLA~~-8zucLi-)FEeB}JnhoQ&GzCg5LQ9=h%X*?85?r+`jhhzKO|JrR
zQ|fi6v@0h&cnzeh+-;nn)*Didcfa0|L(xz!G1+T0Mr!Z!l_R~0LW6uhb}F;4Bu)Th
z5_m`%k8<{!SgAx!XiwKxDA#IuhECD#(%nxcf@qqnhgS9
zOEZJ)Y*a?RCbIOLmKtqJa!9vy_OtYxS9t9FlutiP5QB<(%A$P5XU&8BzNviV*qht8{3&)alB$Xn>E`-E}{W<)csf**A!^E=;_sdtq=7asl
z^$>YcpLE(~i=V9ANas-<7xZy@^#)$_M&U1HJm*$DFQN7_S>$zoLfX0k_m+hCCtV}`
zL`H*~r^Y(78f(w!Pc8K%8;;yUv$$IjOGED-yY(h6gHJQxEtT>7?aJaS>|6g
zrWt&5I4`R0dtayGsx6xQ%GCVUfD@Het)QC^2tPHWssuA0LPW3#CClAPl)FHu^w~)d
zZUh{kRSH!9LtL()D;d*~9m8&NCi{sn(XeTuPA^j$1blcQT-)(#uV3c=)oR=vxgV|9THPpR$&!(MWX36*R0*@I@zOKD!4!`OZK5l-o?#y-w38D2u@61ux
zP36F`jdRO^ePk_^-uY^yz(AWVtJj+&B)h>U=im*Ox|03OUkujX4DBPG_^M7Q58n%E
z-W9pcduMN%coP*-Hb0{9fD%b8?>KSmRdi8|O5ly+2d9P{s?6Vy$@iOqiUp0gg9`Ne
zQc&64HfY4{MSs>Y&6yl$jTxgLKoA4-5#273r4b$J2iTSQh4kEH`gIy924pn#ADDwT
z0cQ{Q9Gneov?6Z$D>mrA;T~}a{MF5a6JlEC;M6$xdvg|}GS!hwy}2Sk0O7(gt0ZW@
ze!kM*5k&kbrY>?WDbXL%D~f){-&>yC(&>GyV0~=*vqq@ouD;%uZ9r`4OtOgED3~zaWhYk0J~Rn^hdiRXQo
zqLX}5Oh@DeX~;Mu;*_DG$jaGX>LfOm>*`Cdbiq}hM!jbWqg?lV{yNbezPDNLA}Z2s
z4~DuIy%ZSP@e`VmEEIe6jP#vN{INO+g0jtj=EQMT+iXeVFTK0zu1}uy^e{W#{n9JJ
zuH>mUnWja#(zx^E*ri3o^S5pXRWCf4Ez@~(i|q#MRr{-_o$^m{ziyIF`Xl{w@_oYT
zBVxRlO0)Fyf#vKMltwZTc20tfp{%cfpKy5e6)YiJz2qGF`KQU=Hg1is(^vgUVu7i4
zxvx$w6;`Q2AM{t4vTk##5Gm_|;$Ko%K$r$pcGOcF2zMvD)9S7H@Vyluc2$@K=`Zh+
z3~(!}*^LX0$x_S&e8jf)-tCiYHMaqEUFSuov{x%JLZPFv(8R@M=V9+3bBP=!pt2gh
z1_$ip>5z3_)JJYs*v7byQ4B9zP%PYgrCF{0%-9{3$gYFr
zvfeAoyQ`VIV8qqT-?~+l_zPzLgGtzKyco5wSxxmffPaDW|JTK!7n^`z+WXaS6%EDx
zKfb;@tf_SSdd7wY>C&ZFQF^b^I|v8}0-+b_T}p`3kzS-1rAm!-sgW9*AT{(T0U}*G
zAtd?2j5EJ`XP*20!*k9{auOcS+I#J_*M84F*yf|*Va>5=#Gcd+Qb(8lQM_n!zr{~$
z-LoFsTp}S`ft`>{Q$A_Wz_7M$DPlWTKogCBWAjwL=Ug+x`C`bbXk(VZ$*5eQ>2?g?
z3A7A&s2p;obkI;}HSR0#^nKd12T$l6~?Dcw`0UREO3O_{@NOWwf*#i&UT%=#9PxNgz;X6
zkPoT3$vvK#KD;UwRBIbmahju}!g?J4>90S@xvv=KbeKO~Ixs#fz8-auB%%0KJx1Bp!D+*o
zFj-q8md1-&@~u*N1Jk|m51YGTdIy6`mbub_WZJ2fDuchRm})5TM7yYmY=T)n8R~P&
zim?x6#H_17f23d9x2W{YeFt>TxU*bOWxx{f_FPB_)A@4)`hxH<|LL_UADsHOYmrL$
zlRG4|?AfBz{v6WJNYyn%T}@wo)S+-IePY&1^;#^eGKBZhOxeP8^<-78lI>Sp4D8nF
zU~BHo&FziT0LCQ|_i9aA!hNHX0K_?X=cn{1^!#0YnP
z>*QU{Ub%$r7$>Hnxk*`Wu8qsOxWfYlz()SBd!7a-a%hcz3E!)I5cU}!C7}Gj5?CCl2cP|LoAsBRwcNcQH%0pk
zW)KV3evt1azr_Uvya)UI`3~I03SjnYEl^JARVRR!%lK|;EhOk^R^>JgRsE_pBWiHz
zW3zOFR5>k2>}$c3cf%nk!Jt-N`0}!7&Y-Qy&7dtiU7+BT{0FC~X>By|is8xbmZ%dL
zILuUU}mKO&)w4L*k0RpL0mE(g~=glMWx!0a=lvDet|+
zX14r-u=6~O7Bt+Nd9%9o6~byN^R14#^n!1^xigaQ7g)uacEg)k6WCXKG_ug?ya{7&5do~
z+B*`mS$*>==}M?P$qj|T)nS6L*Bd^P*vq=i?Rj&qsBnnyGU>eh^^&~X-x>MK93R6_@hkPWtt026Ko
zxwc1roC~-TT*))+hx_(*I$p?`a0!kc!4*2Reb}|TzOuYFs_^vhW+*
z+C!7zQfBt5N=EX==qiN`u)0=>FYUmemgRU=@RLUaZV42eHA4=K1*fjnwt<*g21tg~&zj3&B(5`GcTob=LIB6p>
z>2H*`kyKh6kgS1}9m1|;B~AJNhE%I6&*pya8c81|6jAQw%P@E(Ua}M5U|HuEU?-rN
zE^RDC{7)`6P#|#uj{0Yo`0L(A)j!sXUlWa~eO#|h#{j!UY9ds9Jg
z3YD0Nv>v5eTGFy)tTjz3hqY))na~7Fu)!OIlJ|Ny;^iCtFwVwXG(ZE=?Tyk9=Jm-J
zE%VM~S02=4zuq@d79{(UqACBLO<~GOc=7SIn9m=-Ytfw&r(}-zHYMkcq$+}Tj0WyZ
zYdaM`gmHTnt+LHeg~oW21w9~4e)}$UPG!`(9fnet8z+>=WS66na(x%f0xTj1I2ar>
zFdyu74M+<=`e4C*buGm)JNW!|p*u_ay|vQ@e6=*vHj)`}HjlVpzuy;`?5vl+H1+~~
zxLosap;DyVQ3(Cg<$@ZSD59C96Z`SK%m6wG%a;qi&f@H@o3OKseXWR71DHSa$+%}<
zd5`_+SS6CnNoLuufSIwf20q*bF7O62?y(*j8{|GjY{+68O-?aXE4X
z(wD^odeWV9>Mh&hI(+;HEes)`>Z8p(Hv3cq@^f`NZ3d4mI|TPbBu-6)FGKh^6#_ER
zDX7|yD>=WsVJm@`Av_LrNbvbDEdr;RFnA`0l!?9MPkiUr7sm6(>jP(yr|`=QH0u6A
zp4_p&{Q^S4K4?&Y2DjesFn$RGWk4KG13g4fgUi^=!CwG;GI)mHmk*alP_#PUqsOmv
zBc-3gZAxBp-JD;|Ej#W=agM#_Bm?j`FS(AIGq%MuK~yu}F3D^O1X%JWK3uhO3?6H;
z%H5`6NUWyjZvGO7!AV%BY%dftHpI9#Pob`oPl|Vabwg~2-Z3VQdrNR1bf0kK-DHgY
zV3FSbmy#l4dlDiZ!W9z|fUZ_*+cSs`w8=92p~Hi?tOHfvUG#qGf@@M
zg@bHssrZauVO*x7Y3V}!xVS{h=zHol^XZsz$$FnEg)8?O`eeN@&ZN(oke(}+uACpGwdnWT
zcwqAeRm)SUe4o~t*%9$j;FOZ9#3i@Z*u=-Ca`Ixnv4*}=sE-W{_0$&D!lK4-
zia#{9(nMH4JhQ8+UTP{zb6ZL*G~!pK1o9Na+!Q@{WHiMbdI53Luc-eH^shnxiLI#$Y;~;qq*Lssc
z$BhT^`^BBSRPWn2g|ddc)$%E|!@m}UENA5|H?=Y@L2fP%qPb^e$s;EBK0H&!Ro}y~
zc`C{vGc$k%R7}TT_bN=0OBhhin>(^KqXNM^8w{ag=cr=(RR!RhTuye(79)FRLAKf
zxJ4wrHDAblVqyob4hMR!mG)&DgR2dD1i#SnjQs4vn0FViA9M7uO5|(bbnOWUb4;iI
zl1gWlD__UPxPvruMQfikFH602a5PQT`hkP>CP~p+yIH}O9?)cZcf~#9pEy3qTi2&k
z{$CtUp);1NXLSe@cafBg{TA$v_KkXg>GJ?+t=uR8zOvXn7CCf4n%lD5?iBb8_;
zYDRwpJ+hGve{|`>-i`6x8od<>bFPizA!T{Ek0hinY^KNSN%Vg7oM)L0K??juO9EdH
zQ0(pL!j8mbw1R>>yINXUDEGlt8``4zDEUTtK^>yUaUbw0PunRp4*mT}33KMP*Tvt{
z$6h|!@IHL$!kj;4w8PFfl_3p>o}92!^=3?nL`&Owhd-Niv>G%VV)}ONHCKy9!A5J|
z1j(Dwq$$5m=#;Ug2sb2q9nlpanWEqI1>Y>*$!Zli
zZaVFut{rmv4nY90ONxu
z`5V-z`Ytw3y#P@nR?7SFUlMNmz
zuvUfcB!zJIY>b-ECXsw>tt^+)6f
zQwzGgWw-i$&(=yCTf!wCUfrOVUl_>xeJY5Ar#mTNUr$<>UkgdxUGUO~d8%uUz<2k(
z(5qxGNw}rfv6j-P(lK*`DQ_<1C#;5h%+1Ftb8#oKVe9n!6PMxcI_s=$Xj{^_|Ncm{
zx`S$K(}%6VnuV=7$GXKkhg@UtXKh_vCNWj4e5G83)kBAZ+*0|~(?fC32bTQGgu|f!bXCMsCu}$B;
z6WeZ?vVGXPA%rU@IitY)<2+P%2^@n|j^UNgVxTX9WL*VQyW;+}p-a
zslGpGQI&c0%FwQ71OQss7_WLTuS8R5Z2C&;T6cpb)sS+rcI>O^TQ#?~wwy~J^l~20
z=r1U8UU7aO!W`G+P=Qe$ZdJ_b*SJ3M;FITqU4=#fW>ighMCEJ>i$SS%-1scL!zP1-L-ipr>xI`hC*K08^p
za($%g+dNv-{NAP?t{Y3AewbM3W%^DpmQW*LiN2)6BDKy9MVJDfJeRP=D1FlzdoP*c
zq^EAi%)-5A4w5?W=nH{eh^;K_7(pC5($CwfPmE9WWWjE77)<&e(DXM0@c2>lJA}rv
zr&4EatAsIRW3AjBB8x>_iWq;S!InP`0H@GDIPWo@to{u-*jDc^A}sme{kifg(#w>5
zs`EGR#jF05f4mP);p<_C$8S$A_6q_O2*FpCgVZQ`Z^tEuXwSNGJ7@;L-?`a+p~GR8
zF8?HXZwDo76%vPzZxFk9FK;m$l~0&vSJo!_)UAA3w@+GP$C>Y>i_V}pEZ|_NR2fB4
z2Ve-=sy$s!LvL%x&mBCaMW4q5;I_qa7_0#r_f}nyi}3B5i@c=bM-*}AQc
zzsCdn8$yOZ=d{1p&1Q8zHO7^Yj;M6-29iFK)xXB7E|VQ0tt_djr|!@{sP-*W<B!JT)a1fsWie>Rqk-vWJC8iLx5HbNxcSvjq*~OsX8HR2MzXm8b7RA&Kh)
zwIh#w{h*_n1l>$@NJ_-YA&Rl@gI6ub4V&5vG12EX;f&h4-f6Wl5`6vE7R)6*sVL;+VBtfMUtUJ6d^XL~>qKP*
zjWs}SSrfj_%vhVz%^+GlrXZ5}(yAji7n<4oa&3TL`DzUPr^0A>nbn57&cHdW2s!sZ
zG4}`JKy>YmIm@=CXX8zWh=Q2mqeI&SCy6P5VL1Vt;U1aQ7MRGy!+^aQX~lrnL??
zclt_y>KEHmN1Yc{*Wu8|;MqakPD%ZQ>c$T`<>vybL@j009cxy^tz&o4(`(srsSY$e
zu#}wwsBhW{)c9luwmn9S-cMB*e7p%f!D+TSHqTZCYr5j-y7?t?6&?na-a*pL4|19ONXzh`wEl)`T4>s3CPB@i
z$uKc4C}GZ;VxZ~zo|^NNel+(R?O2IrjO`DL2dWlgxtj7d)xk98h7Q?Zog7o|zf>o%
z_DWs#;er+mI;{QF-sX}68#otg{gY*)LWCUtx%M}bup>`O2UTJ}77LQP{$2yy$}2}p
z+GQ#nVBPi*t>$8~p=$qu_NW)3A$Y
zQzA>nB?ha$Dxwz;HbT@++pMz}z08F}xpd1Ahiz8BHSGClP_6nk)1BrM-7s`twal0H
z3TuM+O~{0l6RHflg6`5PU~gL$s<+vNISGn0EPM$%t;*zJb>;%1+eNiffq6g4Cv6*E
zJJm(A_G#UbXG=B=RX-@D)Ym8-d>
zukMg+NFnOh>6Ff7Dm!6@!&EJ-(p@w>xubQN0e6gOTB6GL5ic2)s+iJ_pPkS0cS)ma
z8)o@Sd7F$T5vD&hV>1ocjTX^?Z9&||cRJrziPnt1MeJp@V#CFkO-sz@
zkfs!)3y+{F4imr(YmfT=?p&LSxtAt4S>BL5=N@Ucw+`9fasYYz>L`&{j-|#fRU1xt
z2In;!H}nbBM7c91u0eb=A)c4)XfWsR7Hs?+TR7+Ow~L%K@mWmw-O+vVBA@>;-fRZb5Iux9YsF
zd_)fUe$X}zHwZ5ML>V}G9wi}MGkjVRj`h>lW9YvHH%7_AOZ~xc!`hiP&ziTNs(X#H
zF423q{~xyiC`RbJO~zkN#PEioNap^$NiGbjjao^v~8BYVl8B%pcwI|7geI_=ORW
z?(MTgOgAGooAV{A#v7`^AR+-CE|
zgC}1~ywkyk@p);y(A^%}bQgwjXF3N8kC3hm*X6?L4XNb|@g|+K!6I
zdbS@GN6-7`$Z|C|BKA|2YatoNPqI0NTPWzWHvHPkL(PVDg%~_v4|=H~2(`t%5JNiG
z>7Z5i1P->sTBQTLEOxvJdW&TX^FP`DMTK?WdkX#?9R3kCI$>4Iy%>^i*o(TvKN$@2ZPGLVE9ORn*o
zTbtN;Zsl{0eaYxM>DFGJUj&GQS-tw|pQ2-2*Q6#6(;O&Y4=SvB0JY;Qx%;jm_DUW>
zlAke-(JVk~PoO21`K#KXoDgj+xY;4XaXHX;bXw$1+E~3&|3GWI!G^<^DpAC#-faz|
z2)YvDDIR3K@rXAZ0zE*_^5g?%)bJ0~<2C72ysvvu9T{+b($-JWopPXk>p$XyCJh1-
z{(tHJ58-P5uhxq+0RocS9QUWUjs{oz7o~8x$T*C%5x{!?2MPbx#mC7BV~0fw@C{BW
z(F(p1-@Z-Eq#ACU6GXlXS%gDXTygm`liv6(CZuqCIM?Hj5zW25^rO*bAu$=RMX>})
zI%jaGy6>!5f+%1h;oz>fp;q3c#UZo;2SA6^mfc5B^8_sbJB!2A1y6d)WcEMR;4{$x
zGz8tPB(am4v30TR^`mXpJFx>*-l2y+x*M00YI5Ls#J1XKTL5ap0m3dkh<;NUe19NA
zxA`Xdps0)}U?=Hxpz9fp18L-hym-#Hh4Zbi4oy}S+x2_cgC8Ts4X>%ZY*sypQ`bCv
z6h@V)Gg*J68EuD&oZq2UEO-yD&4MdRx@3o~C-6LjtrXcRH;k@5bA}(hmLB)O;
z&r0Rn9nv<@4K(aRYfA+ez1!G5Sa2s~A9ivM^P{>B!1X3{GR!56(Q_V!Oka4W1wXX$4)QWezyrbA<@mg=pQ>6yw4q7_?td1N&07(Z6?fZ&hqjZtj<4WVM-m|ZO-)pMTk0#HmtU}Jhcus~k0h#@i
zd|}j6+`B-DYsEFvSoi*7$wPIQ+Zzj=7rSM!9XZri9i(t9&$w*&=TLAvq{YRLi1cb)Yv>>PlS1Vl)B8_Vgy9IP7Z{o!Xv
zZ`{M$#mcv}NjMajG9hG2Jx0bgU*zB2Rj~gd)r6VN0KVOnxzmmR1{Sxh9co?pM)oFk
zi$2fog+v{dDNAm=jc6H|&BPs4i`~F5F=7hcT-qql+iO2rGB-te;?UT7;gDd_MW{o%
zr}j&os=NIvEuNlOlJ@7PzD%NLn_G3jOubBGnNtWL0${dP@{yyiba)t0a*Qj@46CX<
zdJiKzRVy0BeKq@t8{_IMIuo)YoRY_>#eSkcPdR=FGh0S#3_Y&{fXLx`!OP%oTD9jl
zaT@{sd!NJo4s>kV8i%C1h2XgH+zv@x|G?>MUj-QygLMI)K>;9Qk9hAUoasDqPkH9J
zk{>QUYP^m+z22E~hS`>jhYR<=W@6YJ777VOG>hKGZtNYIn1avDtvWu6o<(NIw|wlZ
zGom(FCxQE3a6E|&xY%`MmVuRV)L7-#H$cG`yw;2)H!g%@NO<9<+{=)Jdq-iuar^rZ
zN2EHV#MaP<-iiAVlHIlEX6DZ{_8AZN^LnXq;=MQiv!n2*lJb9skiRv`|MFY?FNYP*
zpAQTFfZ~e?iYd}HIJ4dpKGBDsfi@AiJmqC96c&5|%3jBCOf6~uep)W9f^H~yeiAV(
z`|~3bJ)<{EkRUmbAp`ej6p8Iv86XZCFwXwY;53DbrGxE&0#Nhx)_W2m9J>#p%SUU2
zLTwkjQ@eOwY``?%tXT@2-x8^71C~Dd1d)=d-IMKLIO$L1AV2JBTsWCNw(~#V{935f
zgA|!y2+ZWL0m;mpnV8?)!HMiNs$yN?QTxZ7MV!3bu&988mxNKPAK{R3EN)@2+j_A%
z5r~D*WPOqHA5ovj;gZKxY1e*pxMdl=$N=GQ^LI17>TR3L`S=`xvmTM_;1_cbae*>I
z_;YZ>GEyPaKI_ZS&gDBetFwMt#>jy)H9zf(_!(8o0JUZa!`u=WWUY;Gb_kZtjdB#u
z?ee@;AS%~lKLa`-Z$l^IezZMnz6+on)sIuL_rZk(*M_~OE2*b0iUbr@KUz2r_`ay}
zKb8i`lIRo-z>
z(~qdpZC>c{;V5;-$LN)o)DT8kXIS!qQI+$)x7+X$n%{OgMNimEl;hp#w`T&{Iu0Ih
zoW(yP3U%wIpb~UWoUW+*HSA&`?7r~V3OrHt6eKTDmZ^3Z<#
zFMm8CsqWJTUwA|^Yc@h6}btcP1J>-1`)ga
zkFCa3ZlPe(E7bd~0qKSKRu1;;3fxCLU
zTRp-p6Z{;|F6{#tjAIPZmpZ9`44*Rc0{Y
zGPb)gRnj^<*l_LpgVKRds)0+*Z-dpN&`ahw#qSwYvu*{LVa~EA1JH<==dPmLRbDnm
zUyzmk7n2ZriJL2Zl6tcvBfqU#?UoZM!I?xOgKwr(gH>M(t-d`JzwK&ExTtx~vbi~w
z8Q94q<~|!n#h1sM(bS3!uz@@L0_=gmQJ4arQ
zXV}31rmL_M{eeOzA1*b;izu5PPXKUO%~1ioiC!K>D#(U>48%!1$l#`<67T`!X$r;*(+P;x~wLR1^`L_$p||EKOsb9|6Ki5
zpstQxA$1~+fBRE4i!qi{ur8G(*6=vCsl$Sg`azD902OOM36mB@Ld;IdGaJMFnJ7}v
z?20Gxj7z68^?JpnNQEW0Wk25IkC-PE>7Fxs6RuOpo#e3Zqx*URkP;NF2VI
zzXNJ;F}DIsJ{bu`|fWOIq-MQ&12$H@vBIVX(vPvXX<0ee(4l82%4I>2G>~6K1r(*c+qv
z50pEul9`!}dbIPAk?u5bubHVIS|68Ow%I9D8^EaFRCAIQCBb!d?eL@)$E*gK4I)mp
z?H~qxw!n0F9~@wCU23S+fo$6kE}~-bx#W*&!w;|W>x=R$irQz6-luRNY+`KjL*r*~
z7DIL2&kvG_$=AN8>k0=4@Z?8PC+G+AeEIg8`%4;L+-dYOV#zH@iqYY?$7&~_OtkjB
z>c*&3gISr-gHbyljBmI%tv-Q+#EnOwM|U%N4By{P^XxC4L`^SR{`@)^K#tVtu|)>T
zE@?WK6Q(4=>X?PIl4SlFky>Z8>53}EMKUl7R9_sQ-98~-+$g}q}8lgRp@`J#q$G0;R%
zC;x$hwQ${%(Qu4so&AuK2{P8H?v@z8Q9_|C$D7-)tw?@*@m$E|Y^#?Abo2GQ?;is3
z<6(Z!WfAiHI&SWAUhhZk1@&r#G-|%YuBi5jtlf(oxTA@m7~}0ChlFs-cJNPXnRo}D
z4`uU-DwxJ9R^uITn6MsGc?NS`t;7<-ok*OtWR6qu#Y?Em*?xn1-EUUX;|&$$gB1u@
zn?Z{_dUI28N3pwgZiP%4yg~kf*5DDosS`u@T*BA&K;QTPUj=_E0nPC3!cMREjV3_F
zaJS=>1!Qq;QJyRebCehbXW;GU?_I=wi^U_
zm#CSUgHptt^HJxwGR!Oev3VAQ%NqgkynJZeB~
z;{^YQMec(vKykQOq#N%o1TOI&uCrhW>wqCdZ*;agw1yD-o-8|Weio()YCMI)7TKG^
zA!32x7Ev7{o3f6bI(PFLg^+@x(;-&Z;6PTsLtwbf8MajjwA5H9t5OtII^7BoP^&4%
zfDNaB`XM}CM!&7`>qI8kDQKmH#T=x1mS`dG*$;(gKFG0c&8@Ra2oB!MU&5}9(~K%*
zN_r$HVX`BWs!d}-ZQg@W>9lneiKje6wps`OtGke=64@O1W@5`T&h?W(ot?Gq>zHh&
z$K=k;(2>TCld|AIz$PF
zzZYgXVLN7e7=b5zaenhpP3AjfOqxEFDVKXSaHUg@g?=>TmsXL3Kb%V=WHH8fXk3)W
zYZnPysNu*cLAim7VsaAlu$Qp2Z_3J_iByJA!OKf#`o>cxbtHtb`c`^aXLKLe(N$(q
z;IxhW&uSJgF~zhUCp%d?Imbi*Z>W)&{bd>`HsU=uCctk7nG}S#0hGX=7?G-LAvQxq
zCp%UL&njQ=$}@eP3abm&32Zl69ji~-J!RMJy{R2ctY5P*P``0}XDs*HU1#LAWXVR3
zO7(W$cJvhNB{q8j9B>1ZD_9y=0&6|cK08_mGOK{=`_o}Y`sYSwo
zU&8kR0lSj_k<%P5R4>o66xXtTWiE)@r9%gA$P>a&Ui`5J(!1<%z{WnG6|DEf;|igL
zRl$+d6u49Ui-O`aNZAUmSQeXeH)l-a+JCYe
z*`Hk@>^R177lW>O@gy!tePvq%mVRsiueFIS5;Orhd>Wdg)~?{W
zu6&&?OVWLE>v&2a&!ch9Jur!uH^49YU*h5G|IcWCF!JM!WQ%=2)F`H$Vy;{ZDMfeB
z#6?(!vL2Cy^vRDS2Tfk#v&$Uh>cr?c`{
zY1Bms1!RL=fDUG?AnNNQNm+>B*q2;BPe)rMdY@3Wh^v?No@DKB^HTRSna~GJ^Fs8k
zexDS+A>}{J8{HUu&t{2-*%>YHdkh@u^pty*3uC!X$!CP@iSmqZALk3C21qZ1D
z7LfOQv`oeij$`R=GRsTfVo^JNUTT-}dmWkD{6f(B%#7PY07yCOrkHDP=WW=ePoK<0
z+0Zo;5_zr$SKN-S=KjaGcFx5ef~t|T6yc=`Ew3$~P7Bkp=owQH-QV2V<YNZW>oM
zlEiEqaWtI>ZgKQ$x5e+(750vD7vG9lWqNmSee#nmr)%dTT+E%WoD4
z?&tE3yGLx%N!4sNtLsDBL3Sa`x8CKk)d~YdLumJ)cIb7$moa&@?WHd;(c53E(-qug
z_ip#@GX_jR~8W~2CAHBU(cA{U}_3YEKljxoA^`2z?lZ7vQ
z3qgX30!oJ-$H41WpPq66VP8y;*Y!RNLovdfX)^Z{V{6BoqY-OYJSr=6N#k%Bt
zgnZG1rY6cffW()@oNooKFpo?~KOctEpgV&Ca=8cUgSJF0s??y|{DSQ06
zSrRmUfU`O$x#fpE3CXHhhYpXqer|mzs6Dfv2pl$JW3b)*l5-fr?{L;6_`d%w8}GAd
z-ckn`-1B8R2@Y&L)fO9OmHGwYM&ahMcgkS-~{(ES|evd_ffTbI*L8R>T2Wz|xYCU$AYNBY0
z?_?`B=D_JiSY0|T9U7~c!)X)ceZ?!}aSj*3gv8Z6#zba%)gHSvG@$a(vc|=$j9&pi
z(QXG4M}D03$vK^9iDJeoTL@bbMGnf!ymmU^&t@l{6ggnZg*;Iiv>WeD46DC?b}Qus
z<`K@CkBWvF4Idj=Ah8$^hGJs@CDD>fuir=C_X5hi;pqW($F238;_y%*o*BPM+7^$5
zew=eqhlL|}bUOT6d$MP_M04uAWG4HTNq
z)FQlZe_f{S)2xr8z>|{=QNpyh+=9I^p}!xnzd9ogPkOME@=dR@RQ6i>Iua~JYAi_N
zes0}EVL!w#7wKUzlp&e)l1!fSrp!h|v|>zVnz}FZwZ^tKs$J%r?1g%2s^cGKq30Eu
z>={7$#oHhQ>3+3n0*m~*F5DoU7Xj`#XYoWWjuSEoP<|p
z%cq$aX^43^{B+QeEz}Zrzn~ejGEi;r%w<1Vy5SsGK#7w7$&;s
zRA0?^q8rOuZv~%^>>ZyYoq0|(6`AeJW=#6uc0O>o6#
z{5OEI+K1D%g)sFo&1GD`9y=gbBlryO>g97@0mRr;_zuBX7#%j-40e7|3A`3{Nh4@*
zk-u#Izf2XE1C|AioqreGVtvwfuD_xT}g*iQh
zG1|iF=|S5|svYSq*G>(pql%K!g|5Aaw;mEKEcCm$%@i|AWO5=9x
z^Cu81v^>ATa>$YJasApMWB3#ak~z{2wj(-{;yGeXO_)X(A4xZWy2A<;F!lS9qn^(}
zKla#~-@`hSe)AOVAbyyPpYAgG#G;$3_4e*)olhHOl)802Uj04m%gU9Kw-C1uUB|2x
zl8Kj1G}baqWLA7Bd!8d<$emZ4RU_bi+>N1sYSqDFEkETR
z|1Y^JJotm7qV1x?`T$FkwSJc2j#+vM+9yE1WYX)2LG`-6x%0a|7UL4+O5tE#DN-n?
z2DWqkdv>QTtFDrhWFDRgzWj0C8pG#}>2#lL>>`nfLHIncegaO_O1VnXzkk|d<@I