diff --git a/.codecov.yml b/.codecov.yml
index a3f9e9e6dd06..b3d0129de992 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -25,12 +25,12 @@ component_management:
branches:
- "!master"
individual_components:
- - component_id: api
- paths:
- - api/**
- component_id: pkg_aws_library
paths:
- packages/aws-library/**
+ - component_id: pkg_celery_library
+ paths:
+ - packages/celery-library/**
- component_id: pkg_dask_task_models_library
paths:
- packages/dask-task-models-library/**
@@ -130,6 +130,7 @@ comment:
ignore:
+ - "api/tests"
- "test_*.py"
- "**/generated_models/*.py"
- "**/generated_code/*.py"
diff --git a/.env-devel b/.env-devel
index 1842a982b854..cd60f2e9d365 100644
--- a/.env-devel
+++ b/.env-devel
@@ -17,12 +17,13 @@ AGENT_VOLUMES_CLEANUP_S3_ENDPOINT=http://172.17.0.1:9001
AGENT_VOLUMES_CLEANUP_S3_PROVIDER=MINIO
AGENT_VOLUMES_CLEANUP_S3_REGION=us-east-1
AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY=12345678
-AGENT_TRACING=null
+AGENT_TRACING={}
+API_SERVER_CELERY_CONCURRENCY=50
API_SERVER_DEV_FEATURES_ENABLED=0
API_SERVER_LOGLEVEL=INFO
API_SERVER_PROFILING=1
-API_SERVER_TRACING=null
+API_SERVER_TRACING={}
TRAEFIK_API_SERVER_INFLIGHTREQ_AMOUNT=25
AUTOSCALING_DASK=null
@@ -35,7 +36,7 @@ AUTOSCALING_LOGLEVEL=INFO
AUTOSCALING_NODES_MONITORING=null
AUTOSCALING_POLL_INTERVAL="00:00:10"
AUTOSCALING_SSM_ACCESS=null
-AUTOSCALING_TRACING=null
+AUTOSCALING_TRACING={}
AWS_S3_CLI_S3=null
@@ -47,13 +48,15 @@ CATALOG_PORT=8000
CATALOG_PROFILING=1
CATALOG_SERVICES_DEFAULT_RESOURCES='{"CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": {"limit": 2147483648, "reservation": 2147483648}}'
CATALOG_SERVICES_DEFAULT_SPECIFICATIONS='{}'
-CATALOG_TRACING=null
+CATALOG_TRACING={}
CELERY_RESULT_EXPIRES=P7D
CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH='{"type":"tls","tls_ca_file":"/home/scu/.dask/dask-crt.pem","tls_client_cert":"/home/scu/.dask/dask-crt.pem","tls_client_key":"/home/scu/.dask/dask-key.pem"}'
CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG=master-github-latest
+CLUSTERS_KEEPER_DASK_NPROCS=1
CLUSTERS_KEEPER_DASK_NTHREADS=0
+CLUSTERS_KEEPER_DASK_NTHREADS_MULTIPLIER=1
CLUSTERS_KEEPER_DASK_WORKER_SATURATION=inf
CLUSTERS_KEEPER_EC2_ACCESS=null
CLUSTERS_KEEPER_SSM_ACCESS=null
@@ -63,7 +66,7 @@ CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION=5
CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES=null
CLUSTERS_KEEPER_TASK_INTERVAL=00:00:30
CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES=null
-CLUSTERS_KEEPER_TRACING=null
+CLUSTERS_KEEPER_TRACING={}
DASK_SCHEDULER_HOST=dask-scheduler
DASK_SCHEDULER_PORT=8786
@@ -83,7 +86,7 @@ DIRECTOR_PUBLISHED_HOST_NAME="127.0.0.1:9081"
DIRECTOR_REGISTRY_CACHING_TTL=00:15:00
DIRECTOR_REGISTRY_CACHING=True
DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=null
-DIRECTOR_TRACING=null
+DIRECTOR_TRACING={}
DOCKER_API_PROXY_HOST=docker-api-proxy
DOCKER_API_PROXY_PASSWORD=admin
@@ -98,11 +101,11 @@ EFS_GROUP_NAME=efs-group
EFS_DNS_NAME=fs-xxx.efs.us-east-1.amazonaws.com
EFS_MOUNTED_PATH=/tmp/efs
EFS_PROJECT_SPECIFIC_DATA_DIRECTORY=project-specific-data
-EFS_GUARDIAN_TRACING=null
+EFS_GUARDIAN_TRACING={}
EFS_DEFAULT_USER_SERVICE_SIZE_BYTES=10000
# DATCORE_ADAPTER
-DATCORE_ADAPTER_TRACING=null
+DATCORE_ADAPTER_TRACING={}
# DIRECTOR_V2 ----
COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH='{"type":"tls","tls_ca_file":"/home/scu/.dask/dask-crt.pem","tls_client_cert":"/home/scu/.dask/dask-crt.pem","tls_client_key":"/home/scu/.dask/dask-key.pem"}'
@@ -128,25 +131,28 @@ DYNAMIC_SIDECAR_LOG_LEVEL=DEBUG
DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS=[]
DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS={}
DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT=01:00:00
-DIRECTOR_V2_TRACING=null
+DIRECTOR_V2_TRACING={}
+DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED=1
# DYNAMIC_SCHEDULER ----
DYNAMIC_SCHEDULER_LOGLEVEL=INFO
DYNAMIC_SCHEDULER_PROFILING=1
DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER=0
DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT=01:00:00
-DYNAMIC_SCHEDULER_TRACING=null
+DYNAMIC_SCHEDULER_TRACING={}
DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=adminadmin
FUNCTION_SERVICES_AUTHORS='{"UN": {"name": "Unknown", "email": "unknown@osparc.io", "affiliation": "unknown"}}'
WEBSERVER_LICENSES={}
+WEBSERVER_FOGBUGZ={}
LICENSES_ITIS_VIP_SYNCER_ENABLED=false
LICENSES_ITIS_VIP_SYNCER_PERIODICITY=1D00:00:00
LICENSES_ITIS_VIP_API_URL=https://replace-with-itis-api/{category}
LICENSES_ITIS_VIP_CATEGORIES='{"HumanWholeBody": "Humans", "HumanBodyRegion": "Humans (Region)", "AnimalWholeBody": "Animal"}'
LICENSES_SPEAG_PHANTOMS_API_URL=https://replace-with-speag-api/{category}
LICENSES_SPEAG_PHANTOMS_CATEGORIES='{"ComputationalPhantom": "Phantom of the Opera"}'
+LONG_RUNNING_TASKS_NAMESPACE_SUFFIX=development
# Can use 'docker run -it itisfoundation/invitations:latest simcore-service-invitations generate-dotenv --auto-password'
INVITATIONS_DEFAULT_PRODUCT=osparc
@@ -158,13 +164,13 @@ INVITATIONS_PORT=8000
INVITATIONS_SECRET_KEY='REPLACE_ME_with_result__Fernet_generate_key='
INVITATIONS_SWAGGER_API_DOC_ENABLED=1
INVITATIONS_USERNAME=admin
-INVITATIONS_TRACING=null
+INVITATIONS_TRACING={}
LOG_FORMAT_LOCAL_DEV_ENABLED=1
-LOG_FILTER_MAPPING='{}'
+LOG_FILTER_MAPPING='{"gunicorn.access":[" /v0/ ", " /v0/health "], "uvicorn.access":[" / ", " /v0/ "]}'
NOTIFICATIONS_LOGLEVEL=INFO
-NOTIFICATIONS_TRACING=null
+NOTIFICATIONS_TRACING={}
PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES=30
PAYMENTS_ACCESS_TOKEN_SECRET_KEY=2c0411810565e063309be1457009fb39ce023946f6a354e6935107b57676
@@ -186,15 +192,17 @@ PAYMENTS_STRIPE_API_SECRET='REPLACE_ME_with_api_secret'
PAYMENTS_STRIPE_URL=https://api.stripe.com
PAYMENTS_SWAGGER_API_DOC_ENABLED=1
PAYMENTS_USERNAME=admin
-PAYMENTS_TRACING=null
+PAYMENTS_TRACING={}
POSTGRES_DB=simcoredb
-POSTGRES_ENDPOINT=postgres:5432
POSTGRES_HOST=postgres
POSTGRES_PASSWORD=adminadmin
POSTGRES_PORT=5432
POSTGRES_USER=scu
-
+POSTGRES_MINSIZE=1
+POSTGRES_MAXSIZE=50
+POSTGRES_MAX_POOLSIZE=10
+POSTGRES_MAX_OVERFLOW=20
POSTGRES_READONLY_PASSWORD=readonly
POSTGRES_READONLY_USER=postgres_readonly
@@ -227,7 +235,7 @@ RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED=1
RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL=6
RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC=300
RESOURCE_USAGE_TRACKER_S3=null
-RESOURCE_USAGE_TRACKER_TRACING=null
+RESOURCE_USAGE_TRACKER_TRACING={}
# NOTE: 172.17.0.1 is the docker0 interface, which redirect from inside a container onto the host network interface.
R_CLONE_OPTION_BUFFER_SIZE=16M
@@ -259,7 +267,7 @@ STORAGE_HOST=storage
STORAGE_LOGLEVEL=INFO
STORAGE_PORT=8080
STORAGE_PROFILING=1
-STORAGE_TRACING=null
+STORAGE_TRACING={}
# STORAGE ----
SWARM_STACK_NAME=master-simcore
@@ -269,11 +277,18 @@ VENDOR_DEV_MANUAL_IMAGE=containous/whoami
VENDOR_DEV_MANUAL_REPLICAS=1
VENDOR_DEV_MANUAL_SUBDOMAIN=manual
-## VENDOR DEVELOPMENT SERVICES ---
+## WEBSERVER SERVICES VARIANTS ---
WB_API_WEBSERVER_HOST=wb-api-server
WB_API_WEBSERVER_PORT=8080
+WB_AUTH_DIAGNOSTICS={}
+WB_AUTH_LOGLEVEL=INFO
+WB_AUTH_PROFILING=1
+WB_AUTH_TRACING={}
+WB_AUTH_WEBSERVER_HOST=wb-auth
+WB_AUTH_WEBSERVER_PORT=8080
+
WB_GC_ACTIVITY=null
WB_GC_ANNOUNCEMENTS=0
WB_GC_CATALOG=null
@@ -300,7 +315,7 @@ WB_GC_SOCKETIO=1
WB_GC_STATICWEB=null
WB_GC_STUDIES_DISPATCHER=null
WB_GC_TAGS=0
-WB_GC_TRACING=null
+WB_GC_TRACING={}
WB_GC_USERS={}
WB_GC_WALLETS=0
@@ -330,7 +345,7 @@ WB_DB_EL_STATICWEB=null
WB_DB_EL_STORAGE=null
WB_DB_EL_STUDIES_DISPATCHER=null
WB_DB_EL_TAGS=0
-WB_DB_EL_TRACING=null
+WB_DB_EL_TRACING={}
WB_DB_EL_USERS={}
WB_DB_EL_WALLETS=0
@@ -395,11 +410,12 @@ WEBSERVER_PROJECTS={}
WEBSERVER_PROMETHEUS_API_VERSION=v1
WEBSERVER_PROMETHEUS_URL=http://prometheus:9090
WEBSERVER_PUBLICATIONS=1
+WEBSERVER_REALTIME_COLLABORATION='{"RTC_MAX_NUMBER_OF_USERS":3}'
WEBSERVER_SCICRUNCH={}
WEBSERVER_SESSION_SECRET_KEY='REPLACE_ME_with_result__Fernet_generate_key='
WEBSERVER_SOCKETIO=1
WEBSERVER_STATICWEB={}
WEBSERVER_STUDIES_DISPATCHER={}
WEBSERVER_TAGS=1
-WEBSERVER_TRACING=null
+WEBSERVER_TRACING={}
WEBSERVER_USERS={}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index d8684350361c..b0fde245e8a2 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -13,6 +13,7 @@ Makefile @pcrespov @sanderegg
/api/ @sanderegg @pcrespov @matusdrobuliak66
/ci/ @sanderegg @pcrespov
/docs/ @pcrespov
+/packages/celery-library/ @giancarloromeo
/packages/common-library/ @giancarloromeo
/packages/models-library/ @sanderegg @pcrespov @matusdrobuliak66 @giancarloromeo
/packages/postgres-database/ @matusdrobuliak66
diff --git a/.github/ISSUE_TEMPLATE/1_bug_report.yml b/.github/ISSUE_TEMPLATE/1_bug_report.yml
index a0af793a09d3..841ae3ec33ac 100644
--- a/.github/ISSUE_TEMPLATE/1_bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/1_bug_report.yml
@@ -1,7 +1,20 @@
name: 🐛 Bug
description: File a bug/issue
-labels: ["bug", "t:bug"]
-assignees: ["pcrespov"]
+title: "[Bug]: "
+labels:
+ - bug
+ - "t:bug"
+assignees:
+ - bisgaard-itis
+ - giancarloromeo
+ - GitHK
+ - matusdrobuliak66
+ - odeimaiz
+ - pcrespov
+ - sanderegg
+projects:
+ - "ITISFoundation/projects/15"
+type: "Bug"
body:
- type: checkboxes
attributes:
@@ -20,7 +33,7 @@ body:
- "production aws (e.g. osparc.io)"
- "staging aws (e.g. staging.osparc.io)"
- "production on-premise (dalco)"
- - "stating on-premise (dalco)"
+ - "staging on-premise (dalco)"
- "development (master)"
- "other (e.g. local)"
validations:
diff --git a/.github/ISSUE_TEMPLATE/2_feature_request.yml b/.github/ISSUE_TEMPLATE/2_feature_request.yml
index 2c392adce857..69e05bc0f83f 100644
--- a/.github/ISSUE_TEMPLATE/2_feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/2_feature_request.yml
@@ -1,7 +1,17 @@
name: ✨ Feature request
-description: Suggest an idea to implement in the simcore plaform
-labels: ["t:enhancement"]
-assignees: ["pcrespov"]
+description: Suggest an idea to implement in the simcore platform
+title: "[Feature]: "
+labels:
+ - t:enhancement
+assignees:
+ - bisgaard-itis
+ - giancarloromeo
+ - GitHK
+ - matusdrobuliak66
+ - odeimaiz
+ - pcrespov
+ - sanderegg
+type: "Feature"
body:
- type: checkboxes
attributes:
diff --git a/.github/ISSUE_TEMPLATE/3_maintenance_issue.yml b/.github/ISSUE_TEMPLATE/3_maintenance_issue.yml
index e4a0060c764c..65e5c262ec2d 100644
--- a/.github/ISSUE_TEMPLATE/3_maintenance_issue.yml
+++ b/.github/ISSUE_TEMPLATE/3_maintenance_issue.yml
@@ -1,7 +1,19 @@
name: 🏗️ Maintenance
description: A change in the code to overcome technical debt
-labels: ["t:maintenance"]
-assignees: ["pcrespov"]
+title: "[Maintenance]: "
+labels:
+ - "t:maintenance"
+assignees:
+ - bisgaard-itis
+ - giancarloromeo
+ - GitHK
+ - matusdrobuliak66
+ - odeimaiz
+ - pcrespov
+ - sanderegg
+projects:
+ - "ITISFoundation/projects/9"
+type: "Task"
body:
- type: checkboxes
attributes:
diff --git a/.github/ISSUE_TEMPLATE/4_pre_release.yml b/.github/ISSUE_TEMPLATE/4_pre_release.yml
index 1502fd0b34ff..14500b1a36b2 100644
--- a/.github/ISSUE_TEMPLATE/4_pre_release.yml
+++ b/.github/ISSUE_TEMPLATE/4_pre_release.yml
@@ -1,8 +1,13 @@
name: 🚀 Pre-release to staging (developers-only)
description: Issue to plan and log pre-release from master to staging deploy (including staging hotfixes)
title: "🚀 Pre-release master -> staging_ Dear Support team
- We have received the following request form for an account in {{ product.display_name }} from {{ host }}
+ We have received the following request form for an account in :
+
+
diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt
index 0eb9d7d4a641..67b1801b9123 100644
--- a/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt
+++ b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt
@@ -1,6 +1,8 @@
Dear Support team,
-We have received the following request form for an account in {{ product.display_name }} from **{{ host }}**:
+We have received the following request form for an account in :
+- Product: **{{ product.display_name }}**
+- Host: **{{ host }}**
{{ dumps(request_form) }}
diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html
index 2bfd9404271e..5e9121137d98 100644
--- a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html
+++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html
@@ -3,9 +3,9 @@
{% block content %}
Dear {{ user.first_name or user.user_name }},
-Great news! {{ sharer.user_name }} has shared a {{ product.ui.project_alias }} with you on {{ product.display_name }}.
+Great news! {{ sharer.user_name }} has shared a project with you on {{ product.display_name }}.
-To view the {{ product.ui.project_alias }} and accept the sharing, follow below:
+To view the project and accept the sharing, follow below:
{% if sharer.message %}
diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt
index 2fae91408f5c..f1ad3335cdef 100644
--- a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt
+++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt
@@ -1,8 +1,8 @@
Dear {{ user.first_name or user.user_name }},
-Great news! {{ sharer.user_name }} has shared a {{ product.ui.project_alias }} with you on {{ product.display_name }}.
+Great news! {{ sharer.user_name }} has shared a project with you on {{ product.display_name }}.
-To view the {{ product.ui.project_alias }} and accept the sharing, follow below:
+To view the project and accept the sharing, follow below:
{{ sharer.message }}
{{ accept_link }}
diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt
index 0a7f2157a39a..59d89b0a8ede 100644
--- a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt
+++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt
@@ -1 +1 @@
-A {{ product.ui.project_alias }} was shared with you on {{ host }}
+A project was shared with you on {{ host }}
diff --git a/packages/notifications-library/tests/conftest.py b/packages/notifications-library/tests/conftest.py
index 006b7ed1a7b3..c2575aab6d07 100644
--- a/packages/notifications-library/tests/conftest.py
+++ b/packages/notifications-library/tests/conftest.py
@@ -66,13 +66,12 @@ def product_data(
product_ui = ProductUIData(
logo_url=vendor_ui.get("logo_url"),
strong_color=vendor_ui.get("strong_color"),
- project_alias=vendor_ui["project_alias"],
)
return ProductData( # type: ignore
product_name=product_name,
display_name=product["display_name"],
- vendor_display_inline=f"{vendor.get('name','')}, {vendor.get('address','')}",
+ vendor_display_inline=f"{vendor.get('name', '')}, {vendor.get('address', '')}",
support_email=product["support_email"],
homepage_url=vendor.get("url"),
ui=product_ui,
diff --git a/packages/notifications-library/tests/with_db/conftest.py b/packages/notifications-library/tests/with_db/conftest.py
index 9dda5da676d3..0ddf0d9f464e 100644
--- a/packages/notifications-library/tests/with_db/conftest.py
+++ b/packages/notifications-library/tests/with_db/conftest.py
@@ -16,11 +16,14 @@
from models_library.users import UserID
from notifications_library._templates import get_default_named_templates
from pydantic import validate_call
+from pytest_simcore.helpers.postgres_tools import insert_and_get_row_lifespan
+from pytest_simcore.helpers.postgres_users import (
+ insert_and_get_user_and_secrets_lifespan,
+)
from simcore_postgres_database.models.jinja2_templates import jinja2_templates
from simcore_postgres_database.models.payments_transactions import payments_transactions
from simcore_postgres_database.models.products import products
from simcore_postgres_database.models.products_to_templates import products_to_templates
-from simcore_postgres_database.models.users import users
from sqlalchemy.engine.row import Row
from sqlalchemy.ext.asyncio.engine import AsyncEngine
@@ -50,16 +53,11 @@ async def user(
and injects a user in db
"""
assert user_id == user["id"]
- pk_args = users.c.id, user["id"]
-
- # NOTE: creation of primary group and setting `groupid`` is automatically triggered after creation of user by postgres
- async with sqlalchemy_async_engine.begin() as conn:
- row: Row = await _insert_and_get_row(conn, users, user, *pk_args)
-
- yield row._asdict()
-
- async with sqlalchemy_async_engine.begin() as conn:
- await _delete_row(conn, users, *pk_args)
+ async with insert_and_get_user_and_secrets_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine,
+ **user,
+ ) as row:
+ yield row
@pytest.fixture
@@ -82,15 +80,14 @@ async def product(
# NOTE: osparc product is already in db. This is another product
assert product["name"] != "osparc"
- pk_args = products.c.name, product["name"]
-
- async with sqlalchemy_async_engine.begin() as conn:
- row: Row = await _insert_and_get_row(conn, products, product, *pk_args)
-
- yield row._asdict()
-
- async with sqlalchemy_async_engine.begin() as conn:
- await _delete_row(conn, products, *pk_args)
+ async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine,
+ table=products,
+ values=product,
+ pk_col=products.c.name,
+ pk_value=product["name"],
+ ) as row:
+ yield row
@pytest.fixture
diff --git a/packages/postgres-database/docker/Dockerfile b/packages/postgres-database/docker/Dockerfile
index cc5be3400397..09cb7f30c7bc 100644
--- a/packages/postgres-database/docker/Dockerfile
+++ b/packages/postgres-database/docker/Dockerfile
@@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:${PYTHON_VERSION}-slim-bookworm AS base
@@ -32,10 +32,9 @@ COPY --from=uv_build /uv /uvx /bin/
# NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv
RUN uv venv "${VIRTUAL_ENV}"
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
+# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
+ENV UV_COMPILE_BYTECODE=1 \
+ UV_LINK_MODE=copy
ARG GIT_BRANCH
ARG GIT_REPOSITORY
@@ -46,8 +45,6 @@ RUN git clone --single-branch --branch ${GIT_BRANCH} ${GIT_REPOSITORY} osparc-si
FROM base AS production
ENV PYTHONOPTIMIZE=TRUE
-# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
-ENV UV_COMPILE_BYTECODE=1
WORKDIR /home/scu
# ensure home folder is read/writable for user scu
@@ -58,11 +55,5 @@ COPY entrypoint.bash /home/entrypoint.bash
RUN chmod +x /home/entrypoint.bash
-ENV POSTGRES_USER=scu \
- POSTGRES_PASSWORD=adminadmin \
- POSTGRES_HOST=postgres \
- POSTGRES_PORT=5432 \
- POSTGRES_DB=simcoredb
-
ENTRYPOINT [ "/bin/bash", "/home/entrypoint.bash" ]
CMD [ "sc-pg", "upgrade" ]
diff --git a/packages/postgres-database/requirements/_base.in b/packages/postgres-database/requirements/_base.in
index c5aa128b710f..0294edf9114f 100644
--- a/packages/postgres-database/requirements/_base.in
+++ b/packages/postgres-database/requirements/_base.in
@@ -6,7 +6,6 @@
--requirement ../../../packages/common-library/requirements/_base.in
alembic
-opentelemetry-instrumentation-asyncpg
pydantic
sqlalchemy[postgresql_psycopg2binary,postgresql_asyncpg] # SEE extras in https://github.com/sqlalchemy/sqlalchemy/blob/main/setup.cfg#L43
yarl
diff --git a/packages/postgres-database/requirements/_base.txt b/packages/postgres-database/requirements/_base.txt
index b16bdd318cfa..ad96d677f661 100644
--- a/packages/postgres-database/requirements/_base.txt
+++ b/packages/postgres-database/requirements/_base.txt
@@ -4,17 +4,11 @@ annotated-types==0.7.0
# via pydantic
asyncpg==0.30.0
# via sqlalchemy
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-semantic-conventions
greenlet==3.1.1
# via sqlalchemy
idna==3.10
# via yarl
-importlib-metadata==8.5.0
- # via opentelemetry-api
-mako==1.3.9
+mako==1.3.10
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
@@ -23,40 +17,25 @@ markupsafe==3.0.2
# via mako
multidict==6.1.0
# via yarl
-opentelemetry-api==1.30.0
- # via
- # opentelemetry-instrumentation
- # opentelemetry-instrumentation-asyncpg
- # opentelemetry-semantic-conventions
-opentelemetry-instrumentation==0.51b0
- # via opentelemetry-instrumentation-asyncpg
-opentelemetry-instrumentation-asyncpg==0.51b0
- # via -r requirements/_base.in
-opentelemetry-semantic-conventions==0.51b0
- # via
- # opentelemetry-instrumentation
- # opentelemetry-instrumentation-asyncpg
orjson==3.10.15
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
-packaging==24.2
- # via opentelemetry-instrumentation
propcache==0.3.0
# via yarl
psycopg2-binary==2.9.10
# via sqlalchemy
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/_base.in
# pydantic-extra-types
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via -r requirements/../../../packages/common-library/requirements/_base.in
sqlalchemy==1.4.54
# via
@@ -64,17 +43,14 @@ sqlalchemy==1.4.54
# -c requirements/../../../requirements/constraints.txt
# -r requirements/_base.in
# alembic
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# alembic
# pydantic
# pydantic-core
# pydantic-extra-types
-wrapt==1.17.2
- # via
- # deprecated
- # opentelemetry-instrumentation
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
yarl==1.18.3
# via -r requirements/_base.in
-zipp==3.21.0
- # via importlib-metadata
diff --git a/packages/postgres-database/requirements/_migration.txt b/packages/postgres-database/requirements/_migration.txt
index a9f890849e77..c472ad904b09 100644
--- a/packages/postgres-database/requirements/_migration.txt
+++ b/packages/postgres-database/requirements/_migration.txt
@@ -8,7 +8,7 @@ certifi==2025.1.31
# requests
charset-normalizer==3.4.1
# via requests
-click==8.1.8
+click==8.2.1
# via -r requirements/_migration.in
docker==7.1.0
# via -r requirements/_migration.in
@@ -20,7 +20,7 @@ idna==3.10
# via
# -c requirements/_base.txt
# requests
-mako==1.3.9
+mako==1.3.10
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
@@ -29,7 +29,7 @@ markupsafe==3.0.2
# via
# -c requirements/_base.txt
# mako
-requests==2.32.3
+requests==2.32.4
# via docker
sqlalchemy==1.4.54
# via
@@ -38,11 +38,11 @@ sqlalchemy==1.4.54
# alembic
tenacity==9.0.0
# via -r requirements/_migration.in
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# alembic
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -r requirements/_migration.in
diff --git a/packages/postgres-database/requirements/_test.txt b/packages/postgres-database/requirements/_test.txt
index da15f704e3e4..23620af98a03 100644
--- a/packages/postgres-database/requirements/_test.txt
+++ b/packages/postgres-database/requirements/_test.txt
@@ -19,33 +19,37 @@ greenlet==3.1.1
# sqlalchemy
iniconfig==2.0.0
# via pytest
-mypy==1.15.0
+mypy==1.16.1
# via sqlalchemy
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via mypy
packaging==24.2
+ # via pytest
+pathspec==0.12.1
+ # via mypy
+pluggy==1.5.0
# via
- # -c requirements/_base.txt
# pytest
-pluggy==1.5.0
- # via pytest
+ # pytest-cov
psycopg2-binary==2.9.10
# via
# -c requirements/_base.txt
# aiopg
# sqlalchemy
-pytest==8.3.5
+pygments==2.19.2
+ # via pytest
+pytest==8.4.1
# via
# -r requirements/_test.in
# pytest-asyncio
# pytest-cov
# pytest-docker
# pytest-instafail
-pytest-asyncio==0.26.0
+pytest-asyncio==1.0.0
# via -r requirements/_test.in
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
-pytest-docker==3.2.0
+pytest-docker==3.2.3
# via -r requirements/_test.in
pytest-instafail==0.5.0
# via -r requirements/_test.in
@@ -76,7 +80,7 @@ types-python-dateutil==2.9.0.20241206
# via arrow
types-requests==2.32.0.20250301
# via types-docker
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# -c requirements/_migration.txt
@@ -84,7 +88,7 @@ typing-extensions==4.12.2
# sqlalchemy2-stubs
tzdata==2025.1
# via faker
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_migration.txt
diff --git a/packages/postgres-database/requirements/_tools.txt b/packages/postgres-database/requirements/_tools.txt
index f896126c0b04..3f4b1c1e0fe7 100644
--- a/packages/postgres-database/requirements/_tools.txt
+++ b/packages/postgres-database/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# black
# pip-tools
@@ -26,11 +26,11 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via
# -c requirements/_test.txt
# -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# -c requirements/_test.txt
# black
@@ -39,12 +39,14 @@ nodeenv==1.9.1
# via pre-commit
packaging==24.2
# via
- # -c requirements/_base.txt
# -c requirements/_test.txt
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # -c requirements/_test.txt
+ # black
+ # mypy
pip==25.0.1
# via pip-tools
pip-tools==7.4.1
@@ -69,11 +71,11 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/packages/postgres-database/scripts/erd/Dockerfile b/packages/postgres-database/scripts/erd/Dockerfile
index 1adbe09416aa..2e427c9373b1 100644
--- a/packages/postgres-database/scripts/erd/Dockerfile
+++ b/packages/postgres-database/scripts/erd/Dockerfile
@@ -2,7 +2,7 @@
# Define arguments in the global scope
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:${PYTHON_VERSION}-slim-bookworm AS base
@@ -20,11 +20,6 @@ RUN apt-get update \
&& apt-get clean
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
-
# devenv
COPY requirements.txt requirements.txt
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/06eafd25d004_add_state_type_unknown.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/06eafd25d004_add_state_type_unknown.py
new file mode 100644
index 000000000000..449529aff44f
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/06eafd25d004_add_state_type_unknown.py
@@ -0,0 +1,53 @@
+"""add state type unknown
+
+Revision ID: 06eafd25d004
+Revises: ec4f62595e0c
+Create Date: 2025-09-01 12:25:25.617790+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "06eafd25d004"
+down_revision = "ec4f62595e0c"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.execute("ALTER TYPE statetype ADD VALUE 'UNKNOWN'")
+
+
+def downgrade() -> None:
+ # NOTE: PostgreSQL doesn't support removing enum values directly
+ # This downgrades only ensure that StateType.UNKNOWN is not used
+ #
+
+ # Find all tables and columns that use statetype enum
+ result = op.get_bind().execute(
+ sa.DDL(
+ """
+ SELECT t.table_name, c.column_name, c.column_default
+ FROM information_schema.columns c
+ JOIN information_schema.tables t ON c.table_name = t.table_name
+ WHERE c.udt_name = 'statetype'
+ AND t.table_schema = 'public'
+ """
+ )
+ )
+
+ tables_columns = result.fetchall()
+
+ # Update UNKNOWN states to FAILED in all affected tables
+ for table_name, column_name, _ in tables_columns:
+ op.execute(
+ sa.DDL(
+ f"""
+ UPDATE {table_name}
+ SET {column_name} = 'FAILED'
+ WHERE {column_name} = 'UNKNOWN'
+ """
+ )
+ )
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/42ec7816c0b4_computational_collection_runs.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/42ec7816c0b4_computational_collection_runs.py
new file mode 100644
index 000000000000..75bc05590371
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/42ec7816c0b4_computational_collection_runs.py
@@ -0,0 +1,121 @@
+"""computational collection runs
+
+Revision ID: 42ec7816c0b4
+Revises: d159ac30983c
+Create Date: 2025-07-01 13:30:02.736058+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = "42ec7816c0b4"
+down_revision = "d159ac30983c"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "comp_runs_collections",
+ sa.Column(
+ "collection_run_id",
+ postgresql.UUID(as_uuid=True),
+ server_default=sa.text("gen_random_uuid()"),
+ nullable=False,
+ ),
+ sa.Column("client_or_system_generated_id", sa.String(), nullable=False),
+ sa.Column(
+ "client_or_system_generated_display_name", sa.String(), nullable=False
+ ),
+ sa.Column("is_generated_by_system", sa.Boolean(), nullable=False),
+ sa.Column(
+ "created",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column(
+ "modified",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint("collection_run_id"),
+ )
+ op.create_index(
+ "ix_comp_runs_collections_client_or_system_generated_id",
+ "comp_runs_collections",
+ ["client_or_system_generated_id"],
+ unique=False,
+ )
+ op.add_column(
+ "comp_runs", sa.Column("collection_run_id", sa.String(), nullable=True)
+ )
+ op.create_unique_constraint(
+ "comp_runs_project_collection_run_id_unique_constraint",
+ "comp_runs",
+ ["project_uuid", "collection_run_id"],
+ )
+
+ # Data migration: Create collection run records for existing comp_runs
+ op.execute(
+ """
+ INSERT INTO comp_runs_collections (
+ collection_run_id,
+ client_or_system_generated_id,
+ client_or_system_generated_display_name,
+ is_generated_by_system
+ )
+ SELECT DISTINCT
+ gen_random_uuid(),
+ 'migration-generated-' || run_id::text,
+ 'Migration Generated Collection Run',
+ TRUE
+ FROM comp_runs
+ WHERE collection_run_id IS NULL
+ """
+ )
+
+ # Update comp_runs to reference the newly created collection runs
+ op.execute(
+ """
+ UPDATE comp_runs
+ SET collection_run_id = (
+ SELECT collection_run_id::text
+ FROM comp_runs_collections
+ WHERE client_or_system_generated_id = 'migration-generated-' || comp_runs.run_id::text
+ )
+ WHERE collection_run_id IS NULL
+ """
+ )
+
+ op.alter_column(
+ "comp_runs",
+ "collection_run_id",
+ existing_type=sa.String(),
+ nullable=False,
+ )
+
+ op.create_index(
+ "ix_comp_runs_collection_run_id",
+ "comp_runs",
+ ["collection_run_id"],
+ unique=False,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index("ix_comp_runs_collection_run_id", table_name="comp_runs")
+ op.drop_column("comp_runs", "collection_run_id")
+ op.drop_index(
+ "ix_comp_runs_collections_client_or_system_generated_id",
+ table_name="comp_runs_collections",
+ )
+ op.drop_table("comp_runs_collections")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f6fd2586491_add_functions_api_access_rights.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f6fd2586491_add_functions_api_access_rights.py
new file mode 100644
index 000000000000..8dede1708eb5
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f6fd2586491_add_functions_api_access_rights.py
@@ -0,0 +1,72 @@
+"""Add functions api access rights
+
+Revision ID: 4f6fd2586491
+Revises: afb1ba08f3c2
+Create Date: 2025-06-13 12:14:59.317685+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "4f6fd2586491"
+down_revision = "afb1ba08f3c2"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ "funcapi_group_api_access_rights",
+ sa.Column("group_id", sa.BigInteger(), nullable=False),
+ sa.Column("product_name", sa.String(), nullable=False),
+ sa.Column("read_functions", sa.Boolean(), nullable=True),
+ sa.Column("write_functions", sa.Boolean(), nullable=True),
+ sa.Column("execute_functions", sa.Boolean(), nullable=True),
+ sa.Column("read_function_jobs", sa.Boolean(), nullable=True),
+ sa.Column("write_function_jobs", sa.Boolean(), nullable=True),
+ sa.Column("execute_function_jobs", sa.Boolean(), nullable=True),
+ sa.Column("read_function_job_collections", sa.Boolean(), nullable=True),
+ sa.Column("write_function_job_collections", sa.Boolean(), nullable=True),
+ sa.Column("execute_function_job_collections", sa.Boolean(), nullable=True),
+ sa.Column(
+ "created",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column(
+ "modified",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.ForeignKeyConstraint(
+ ["group_id"],
+ ["groups.gid"],
+ name="fk_func_access_to_groups_group_id",
+ onupdate="CASCADE",
+ ondelete="CASCADE",
+ ),
+ sa.ForeignKeyConstraint(
+ ["product_name"],
+ ["products.name"],
+ name="fk_func_access_to_products_product_name",
+ onupdate="CASCADE",
+ ondelete="CASCADE",
+ ),
+ sa.PrimaryKeyConstraint(
+ "group_id",
+ "product_name",
+ name="pk_func_group_product_name_to_api_access_rights",
+ ),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table("funcapi_group_api_access_rights")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5679165336c8_new_users_secrets.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5679165336c8_new_users_secrets.py
new file mode 100644
index 000000000000..1187c800a65c
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5679165336c8_new_users_secrets.py
@@ -0,0 +1,77 @@
+"""new users secrets
+
+Revision ID: 5679165336c8
+Revises: 61b98a60e934
+Create Date: 2025-07-17 17:07:20.200038+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "5679165336c8"
+down_revision = "61b98a60e934"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.create_table(
+ "users_secrets",
+ sa.Column("user_id", sa.BigInteger(), nullable=False),
+ sa.Column("password_hash", sa.String(), nullable=False),
+ sa.Column(
+ "modified",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["users.id"],
+ name="fk_users_secrets_user_id_users",
+ onupdate="CASCADE",
+ ondelete="CASCADE",
+ ),
+ sa.PrimaryKeyConstraint("user_id", name="users_secrets_pkey"),
+ )
+
+ # Copy password data from users table to users_secrets table
+ op.execute(
+ sa.DDL(
+ """
+ INSERT INTO users_secrets (user_id, password_hash, modified)
+ SELECT id, password_hash, created_at
+ FROM users
+ WHERE password_hash IS NOT NULL
+ """
+ )
+ )
+
+ op.drop_column("users", "password_hash")
+
+
+def downgrade():
+ # Add column as nullable first
+ op.add_column(
+ "users",
+ sa.Column("password_hash", sa.VARCHAR(), autoincrement=False, nullable=True),
+ )
+
+ # Copy password data back from users_secrets table to users table
+ op.execute(
+ sa.DDL(
+ """
+ UPDATE users
+ SET password_hash = us.password_hash
+ FROM users_secrets us
+ WHERE users.id = us.user_id
+ """
+ )
+ )
+
+ # Now make the column NOT NULL
+ op.alter_column("users", "password_hash", nullable=False)
+
+ op.drop_table("users_secrets")
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b998370916a_introduce_data_deleted_in_projects_to_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b998370916a_introduce_data_deleted_in_projects_to_.py
new file mode 100644
index 000000000000..95457ee0c4cd
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b998370916a_introduce_data_deleted_in_projects_to_.py
@@ -0,0 +1,40 @@
+"""introduce data_deleted in projects_to_jobs table
+
+Revision ID: 5b998370916a
+Revises: 5679165336c8
+Create Date: 2025-08-11 13:58:38.424398+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "5b998370916a"
+down_revision = "5679165336c8"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "projects_to_jobs",
+ sa.Column("storage_assets_deleted", sa.Boolean(), nullable=True),
+ )
+
+ op.execute("UPDATE projects_to_jobs SET storage_assets_deleted = false")
+
+ op.alter_column(
+ "projects_to_jobs",
+ "storage_assets_deleted",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("projects_to_jobs", "storage_assets_deleted")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/61b98a60e934_computational_collection_uniquencess.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/61b98a60e934_computational_collection_uniquencess.py
new file mode 100644
index 000000000000..1de7d9da7f89
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/61b98a60e934_computational_collection_uniquencess.py
@@ -0,0 +1,35 @@
+"""computational collection uniquencess
+
+Revision ID: 61b98a60e934
+Revises: df61d1b2b967
+Create Date: 2025-07-08 15:40:12.714684+00:00
+
+"""
+
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "61b98a60e934"
+down_revision = "df61d1b2b967"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_unique_constraint(
+ "client_or_system_generated_id_uniqueness",
+ "comp_runs_collections",
+ ["client_or_system_generated_id"],
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(
+ "client_or_system_generated_id_uniqueness",
+ "comp_runs_collections",
+ type_="unique",
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7e92447558e0_update_api_keys_uniqueness_constraint.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7e92447558e0_update_api_keys_uniqueness_constraint.py
new file mode 100644
index 000000000000..e3a42a641256
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7e92447558e0_update_api_keys_uniqueness_constraint.py
@@ -0,0 +1,37 @@
+"""Update api-keys uniqueness constraint
+
+Revision ID: 7e92447558e0
+Revises: 06eafd25d004
+Create Date: 2025-09-12 09:56:45.164921+00:00
+
+"""
+
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "7e92447558e0"
+down_revision = "06eafd25d004"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint("display_name_userid_uniqueness", "api_keys", type_="unique")
+ op.create_unique_constraint(
+ "display_name_userid_product_name_uniqueness",
+ "api_keys",
+ ["display_name", "user_id", "product_name"],
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(
+ "display_name_userid_product_name_uniqueness", "api_keys", type_="unique"
+ )
+ op.create_unique_constraint(
+ "display_name_userid_uniqueness", "api_keys", ["display_name", "user_id"]
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b566f1b29012_modify_conversations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b566f1b29012_modify_conversations.py
new file mode 100644
index 000000000000..8d211f483c28
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b566f1b29012_modify_conversations.py
@@ -0,0 +1,61 @@
+"""modify conversations
+
+Revision ID: b566f1b29012
+Revises: 5b998370916a
+Create Date: 2025-08-14 15:02:54.784186+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = "b566f1b29012"
+down_revision = "5b998370916a"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "conversations",
+ sa.Column(
+ "extra_context",
+ postgresql.JSONB(astext_type=sa.Text()),
+ server_default=sa.text("'{}'::jsonb"),
+ nullable=False,
+ ),
+ )
+ op.add_column(
+ "products",
+ sa.Column("support_standard_group_id", sa.BigInteger(), nullable=True),
+ )
+ op.create_foreign_key(
+ "fk_products_support_standard_group_id",
+ "products",
+ "groups",
+ ["support_standard_group_id"],
+ ["gid"],
+ onupdate="CASCADE",
+ ondelete="SET NULL",
+ )
+
+ op.execute(
+ """
+ ALTER TYPE conversationtype ADD VALUE 'SUPPORT';
+ """
+ )
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(
+ "fk_products_support_standard_group_id", "products", type_="foreignkey"
+ )
+ op.drop_column("products", "support_standard_group_id")
+ op.drop_column("conversations", "extra_context")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d159ac30983c_make_func_api_access_non_nullable.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d159ac30983c_make_func_api_access_non_nullable.py
new file mode 100644
index 000000000000..788945d77944
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d159ac30983c_make_func_api_access_non_nullable.py
@@ -0,0 +1,191 @@
+"""Make func api access non-nullable
+
+Revision ID: d159ac30983c
+Revises: 4f6fd2586491
+Create Date: 2025-07-01 08:50:29.095068+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "d159ac30983c"
+down_revision = "4f6fd2586491"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET read_functions = false
+ WHERE read_functions IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET write_functions = false
+ WHERE write_functions IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET execute_functions = false
+ WHERE execute_functions IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET read_function_jobs = false
+ WHERE read_function_jobs IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET write_function_jobs = false
+ WHERE write_function_jobs IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET execute_function_jobs = false
+ WHERE execute_function_jobs IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET read_function_job_collections = false
+ WHERE read_function_job_collections IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET write_function_job_collections = false
+ WHERE write_function_job_collections IS NULL
+ """
+ )
+ op.execute(
+ """
+ UPDATE funcapi_group_api_access_rights
+ SET execute_function_job_collections = false
+ WHERE execute_function_job_collections IS NULL
+ """
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_function_job_collections",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_function_jobs",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "execute_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "write_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/df61d1b2b967_computational_collection_runs_2.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/df61d1b2b967_computational_collection_runs_2.py
new file mode 100644
index 000000000000..e4b986921b9f
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/df61d1b2b967_computational_collection_runs_2.py
@@ -0,0 +1,38 @@
+"""computational collection runs 2
+
+Revision ID: df61d1b2b967
+Revises: 42ec7816c0b4
+Create Date: 2025-07-02 16:04:02.458800+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "df61d1b2b967"
+down_revision = "42ec7816c0b4"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=False,
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.alter_column(
+ "funcapi_group_api_access_rights",
+ "read_functions",
+ existing_type=sa.BOOLEAN(),
+ nullable=True,
+ )
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ec4f62595e0c_add_support_fogbugz_fields.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ec4f62595e0c_add_support_fogbugz_fields.py
new file mode 100644
index 000000000000..f5b22003fce4
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ec4f62595e0c_add_support_fogbugz_fields.py
@@ -0,0 +1,38 @@
+"""add support fogbugz fields
+
+Revision ID: ec4f62595e0c
+Revises: b566f1b29012
+Create Date: 2025-08-26 13:06:10.879081+00:00
+
+"""
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "ec4f62595e0c"
+down_revision = "b566f1b29012"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(
+ "products",
+ sa.Column("support_assigned_fogbugz_person_id", sa.BigInteger(), nullable=True),
+ )
+ op.add_column(
+ "products",
+ sa.Column(
+ "support_assigned_fogbugz_project_id", sa.BigInteger(), nullable=True
+ ),
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("products", "support_assigned_fogbugz_project_id")
+ op.drop_column("products", "support_assigned_fogbugz_person_id")
+ # ### end Alembic commands ###
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/_common.py b/packages/postgres-database/src/simcore_postgres_database/models/_common.py
index 47bfeb6ebf08..6b2405548547 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/_common.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/_common.py
@@ -16,24 +16,28 @@ class RefActions:
NO_ACTION: Final[str] = "NO ACTION"
-def column_created_datetime(*, timezone: bool = True) -> sa.Column:
+def column_created_datetime(
+ *, timezone: bool = True, doc="Timestamp auto-generated upon creation"
+) -> sa.Column:
return sa.Column(
"created",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
- doc="Timestamp auto-generated upon creation",
+ doc=doc,
)
-def column_modified_datetime(*, timezone: bool = True) -> sa.Column:
+def column_modified_datetime(
+ *, timezone: bool = True, doc="Timestamp with last row update"
+) -> sa.Column:
return sa.Column(
"modified",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
onupdate=sa.sql.func.now(),
- doc="Timestamp with last row update",
+ doc=doc,
)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py b/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py
index 2c3f12eca3ab..02a2fc58bbc2 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py
@@ -75,7 +75,10 @@
"If set to NULL then the key does not expire.",
),
sa.UniqueConstraint(
- "display_name", "user_id", name="display_name_userid_uniqueness"
+ "display_name",
+ "user_id",
+ "product_name",
+ name="display_name_userid_product_name_uniqueness",
),
)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py
index a4e5645860c0..ac2387084e13 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py
@@ -1,6 +1,5 @@
-""" Computational Pipeline Table
+"""Computational Pipeline Table"""
-"""
import enum
import uuid
@@ -24,6 +23,7 @@ class StateType(enum.Enum):
ABORTED = "ABORTED"
WAITING_FOR_RESOURCES = "WAITING_FOR_RESOURCES"
WAITING_FOR_CLUSTER = "WAITING_FOR_CLUSTER"
+ UNKNOWN = "UNKNOWN"
def _new_uuid():
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
index efc1716cf106..13157505041c 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py
@@ -105,6 +105,13 @@
server_default=sa.text("'{}'::jsonb"),
nullable=False,
),
+ sa.Column(
+ "collection_run_id",
+ sa.String,
+ nullable=False,
+ ),
sa.UniqueConstraint("project_uuid", "user_id", "iteration"),
sa.Index("ix_comp_runs_user_id", "user_id"),
+ sa.Index("ix_comp_runs_collection_run_id", "collection_run_id"),
+ sa.UniqueConstraint("project_uuid", "collection_run_id"),
)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs_collections.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs_collections.py
new file mode 100644
index 000000000000..31439acc458d
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs_collections.py
@@ -0,0 +1,41 @@
+import sqlalchemy as sa
+from sqlalchemy.dialects.postgresql import UUID
+
+from ._common import column_created_datetime, column_modified_datetime
+from .base import metadata
+
+comp_runs_collections = sa.Table(
+ "comp_runs_collections",
+ metadata,
+ sa.Column(
+ "collection_run_id",
+ UUID(as_uuid=True),
+ server_default=sa.text("gen_random_uuid()"),
+ primary_key=True,
+ ),
+ sa.Column(
+ "client_or_system_generated_id",
+ sa.String,
+ nullable=False,
+ doc="Unique identifier for the collection run, generated by the client (ex. Third party using our public api) or system (ex. osparc webserver)",
+ ),
+ sa.Column(
+ "client_or_system_generated_display_name",
+ sa.String,
+ nullable=False,
+ ),
+ sa.Column(
+ "is_generated_by_system",
+ sa.Boolean,
+ nullable=False,
+ ),
+ column_created_datetime(timezone=True),
+ column_modified_datetime(timezone=True),
+ sa.Index(
+ "ix_comp_runs_collections_client_or_system_generated_id",
+ "client_or_system_generated_id",
+ ),
+ sa.UniqueConstraint(
+ "client_or_system_generated_id", name="client_or_system_generated_id_uniqueness"
+ ),
+)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/conversations.py b/packages/postgres-database/src/simcore_postgres_database/models/conversations.py
index 3072d91dda55..a301a7ea70a8 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/conversations.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/conversations.py
@@ -1,7 +1,7 @@
import enum
import sqlalchemy as sa
-from sqlalchemy.dialects.postgresql import UUID
+from sqlalchemy.dialects.postgresql import JSONB, UUID
from ._common import RefActions, column_created_datetime, column_modified_datetime
from .base import metadata
@@ -12,6 +12,7 @@
class ConversationType(enum.Enum):
PROJECT_STATIC = "PROJECT_STATIC" # Static conversation for the project
PROJECT_ANNOTATION = "PROJECT_ANNOTATION" # Something like sticky note, can be located anywhere in the pipeline UI
+ SUPPORT = "SUPPORT" # Support conversation
conversations = sa.Table(
@@ -70,6 +71,13 @@ class ConversationType(enum.Enum):
nullable=False,
doc="Product name identifier. If None, then the item is not exposed",
),
+ sa.Column(
+ "extra_context",
+ JSONB,
+ nullable=False,
+ server_default=sa.text("'{}'::jsonb"),
+ doc="Free JSON to store extra context",
+ ),
column_created_datetime(timezone=True),
column_modified_datetime(timezone=True),
)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_api_access_rights_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_api_access_rights_table.py
new file mode 100644
index 000000000000..04957c1c9b26
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_api_access_rights_table.py
@@ -0,0 +1,96 @@
+"""Function api access rights of groups (read, write, execute)"""
+
+import sqlalchemy as sa
+from simcore_postgres_database.models._common import (
+ RefActions,
+ column_created_datetime,
+ column_modified_datetime,
+)
+
+from .base import metadata
+
+funcapi_api_access_rights_table = sa.Table(
+ "funcapi_group_api_access_rights",
+ metadata,
+ sa.Column(
+ "group_id",
+ sa.ForeignKey(
+ "groups.gid",
+ name="fk_func_access_to_groups_group_id",
+ onupdate=RefActions.CASCADE,
+ ondelete=RefActions.CASCADE,
+ ),
+ nullable=False,
+ ),
+ sa.Column(
+ "product_name",
+ sa.ForeignKey(
+ "products.name",
+ name="fk_func_access_to_products_product_name",
+ onupdate=RefActions.CASCADE,
+ ondelete=RefActions.CASCADE,
+ ),
+ nullable=False,
+ ),
+ sa.Column(
+ "read_functions",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "write_functions",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "execute_functions",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "read_function_jobs",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "write_function_jobs",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "execute_function_jobs",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "read_function_job_collections",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "write_function_job_collections",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ sa.Column(
+ "execute_function_job_collections",
+ sa.Boolean,
+ default=False,
+ nullable=False,
+ ),
+ column_created_datetime(),
+ column_modified_datetime(),
+ sa.PrimaryKeyConstraint(
+ "group_id",
+ "product_name",
+ name="pk_func_group_product_name_to_api_access_rights",
+ ),
+)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_access_rights_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_access_rights_table.py
index b01f8d287790..a4a60b2dffb0 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_access_rights_table.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_access_rights_table.py
@@ -22,7 +22,6 @@
ondelete=RefActions.CASCADE,
),
nullable=False,
- doc="Unique identifier of the function",
),
sa.Column(
"group_id",
@@ -33,7 +32,6 @@
ondelete=RefActions.CASCADE,
),
nullable=False,
- doc="Group id",
),
sa.Column(
"product_name",
@@ -44,25 +42,21 @@
ondelete=RefActions.CASCADE,
),
nullable=False,
- doc="Name of the product",
),
sa.Column(
"read",
sa.Boolean,
default=False,
- doc="Read access right for the function",
),
sa.Column(
"write",
sa.Boolean,
default=False,
- doc="Write access right for the function",
),
sa.Column(
"execute",
sa.Boolean,
default=False,
- doc="Execute access right for the function",
),
column_created_datetime(),
column_modified_datetime(),
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/products.py b/packages/postgres-database/src/simcore_postgres_database/models/products.py
index bdb8e080d23e..414e7e4b2c06 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/products.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/products.py
@@ -32,7 +32,6 @@
class VendorUI(TypedDict, total=True):
logo_url: str # vendor logo url
strong_color: str # vendor main color
- project_alias: str # project alias for the product (e.g. "project" or "study")
class Vendor(TypedDict, total=False):
@@ -269,5 +268,32 @@ class ProductLoginSettingsDict(TypedDict, total=False):
nullable=True,
doc="Group associated to this product",
),
+ sa.Column(
+ "support_standard_group_id",
+ sa.BigInteger,
+ sa.ForeignKey(
+ groups.c.gid,
+ name="fk_products_support_standard_group_id",
+ ondelete=RefActions.SET_NULL,
+ onupdate=RefActions.CASCADE,
+ ),
+ unique=False,
+ nullable=True,
+ doc="Group associated to this product support",
+ ),
+ sa.Column(
+ "support_assigned_fogbugz_person_id",
+ sa.BigInteger,
+ unique=False,
+ nullable=True,
+ doc="Fogbugz person ID to assign support case",
+ ),
+ sa.Column(
+ "support_assigned_fogbugz_project_id",
+ sa.BigInteger,
+ unique=False,
+ nullable=True,
+ doc="Fogbugz project ID to assign support case",
+ ),
sa.PrimaryKeyConstraint("name", name="products_pk"),
)
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py
index 4f3859fb36e5..4013cf9e4355 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py
@@ -28,6 +28,12 @@
"the relative resource name is shelves/shelf1/jobs/job2, "
"the parent resource name is shelves/shelf1.",
),
+ sa.Column(
+ "storage_assets_deleted",
+ sa.Boolean,
+ nullable=False,
+ doc="Indicates whether the job's S3 assets have been actively deleted.",
+ ),
# Composite key (project_uuid, job_parent_resource_name) uniquely identifies very row
sa.UniqueConstraint(
"project_uuid",
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/users.py b/packages/postgres-database/src/simcore_postgres_database/models/users.py
index 7be2161ff864..62dffd58c66d 100644
--- a/packages/postgres-database/src/simcore_postgres_database/models/users.py
+++ b/packages/postgres-database/src/simcore_postgres_database/models/users.py
@@ -67,15 +67,6 @@
"NOTE: new policy (NK) is that the same phone can be reused therefore it does not has to be unique",
),
#
- # User Secrets ------------------
- #
- sa.Column(
- "password_hash",
- sa.String(),
- nullable=False,
- doc="Hashed password",
- ),
- #
# User Account ------------------
#
sa.Column(
diff --git a/packages/postgres-database/src/simcore_postgres_database/models/users_secrets.py b/packages/postgres-database/src/simcore_postgres_database/models/users_secrets.py
new file mode 100644
index 000000000000..1a1ae04ec637
--- /dev/null
+++ b/packages/postgres-database/src/simcore_postgres_database/models/users_secrets.py
@@ -0,0 +1,34 @@
+import sqlalchemy as sa
+
+from ._common import RefActions, column_modified_datetime
+from .base import metadata
+
+__all__: tuple[str, ...] = ("users_secrets",)
+
+users_secrets = sa.Table(
+ "users_secrets",
+ metadata,
+ #
+ # User Secrets ------------------
+ #
+ sa.Column(
+ "user_id",
+ sa.BigInteger(),
+ sa.ForeignKey(
+ "users.id",
+ name="fk_users_secrets_user_id_users",
+ onupdate=RefActions.CASCADE,
+ ondelete=RefActions.CASCADE,
+ ),
+ nullable=False,
+ ),
+ sa.Column(
+ "password_hash",
+ sa.String(),
+ nullable=False,
+ doc="Hashed password",
+ ),
+ column_modified_datetime(timezone=True, doc="Last password modification timestamp"),
+ # ---------------------------
+ sa.PrimaryKeyConstraint("user_id", name="users_secrets_pkey"),
+)
diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects.py
index 577f9441004b..ee6a8a132e89 100644
--- a/packages/postgres-database/src/simcore_postgres_database/utils_projects.py
+++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects.py
@@ -7,7 +7,7 @@
from sqlalchemy.ext.asyncio import AsyncConnection
from .models.projects import projects
-from .utils_repos import transaction_context
+from .utils_repos import pass_or_acquire_connection, transaction_context
class DBBaseProjectError(OsparcErrorMixin, Exception):
@@ -22,6 +22,23 @@ class ProjectsRepo:
def __init__(self, engine):
self.engine = engine
+ async def exists(
+ self,
+ project_uuid: uuid.UUID,
+ *,
+ connection: AsyncConnection | None = None,
+ ) -> bool:
+ async with pass_or_acquire_connection(self.engine, connection) as conn:
+ return (
+ await conn.scalar(
+ sa.select(1)
+ .select_from(projects)
+ .where(projects.c.uuid == f"{project_uuid}")
+ .limit(1)
+ )
+ is not None
+ )
+
async def get_project_last_change_date(
self,
project_uuid: uuid.UUID,
diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
index 6fc72990b30e..4bb6855b0bff 100644
--- a/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
+++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py
@@ -1,21 +1,21 @@
import datetime
import uuid
from dataclasses import dataclass
-from typing import Any
+from typing import Annotated, Any
import asyncpg.exceptions # type: ignore[import-untyped]
-import sqlalchemy
import sqlalchemy.exc
from common_library.async_tools import maybe_await
+from common_library.basic_types import DEFAULT_FACTORY
from common_library.errors_classes import OsparcErrorMixin
from pydantic import BaseModel, ConfigDict, Field
-from simcore_postgres_database.utils_aiosqlalchemy import map_db_exception
from sqlalchemy.dialects.postgresql import insert as pg_insert
from ._protocols import DBConnection
from .aiopg_errors import ForeignKeyViolation, UniqueViolation
from .models.projects_node_to_pricing_unit import projects_node_to_pricing_unit
from .models.projects_nodes import projects_nodes
+from .utils_aiosqlalchemy import map_db_exception
#
@@ -47,7 +47,9 @@ class ProjectNodesDuplicateNodeError(BaseProjectNodesError):
class ProjectNodeCreate(BaseModel):
node_id: uuid.UUID
- required_resources: dict[str, Any] = Field(default_factory=dict)
+ required_resources: Annotated[dict[str, Any], Field(default_factory=dict)] = (
+ DEFAULT_FACTORY
+ )
key: str
version: str
label: str
@@ -56,6 +58,7 @@ class ProjectNodeCreate(BaseModel):
input_access: dict[str, Any] | None = None
input_nodes: list[str] | None = None
inputs: dict[str, Any] | None = None
+ inputs_required: list[str] | None = None
inputs_units: dict[str, Any] | None = None
output_nodes: list[str] | None = None
outputs: dict[str, Any] | None = None
@@ -100,17 +103,18 @@ async def add(
"""
if not nodes:
return []
+
+ values = [
+ {
+ "project_uuid": f"{self.project_uuid}",
+ **node.model_dump(mode="json"),
+ }
+ for node in nodes
+ ]
+
insert_stmt = (
projects_nodes.insert()
- .values(
- [
- {
- "project_uuid": f"{self.project_uuid}",
- **node.model_dump(exclude_unset=True, mode="json"),
- }
- for node in nodes
- ]
- )
+ .values(values)
.returning(
*[
c
@@ -126,14 +130,17 @@ async def add(
rows = await maybe_await(result.fetchall())
assert isinstance(rows, list) # nosec
return [ProjectNode.model_validate(r) for r in rows]
+
except ForeignKeyViolation as exc:
# this happens when the project does not exist, as we first check the node exists
raise ProjectNodesProjectNotFoundError(
project_uuid=self.project_uuid
) from exc
+
except UniqueViolation as exc:
# this happens if the node already exists on creation
raise ProjectNodesDuplicateNodeError from exc
+
except sqlalchemy.exc.IntegrityError as exc:
raise map_db_exception(
exc,
diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_users.py b/packages/postgres-database/src/simcore_postgres_database/utils_users.py
index 587f90ee504b..baf0b0e16cb2 100644
--- a/packages/postgres-database/src/simcore_postgres_database/utils_users.py
+++ b/packages/postgres-database/src/simcore_postgres_database/utils_users.py
@@ -5,17 +5,21 @@
import re
import secrets
import string
+from dataclasses import dataclass, fields
from datetime import datetime
from typing import Any, Final
import sqlalchemy as sa
-from common_library.async_tools import maybe_await
from sqlalchemy import Column
+from sqlalchemy.engine.result import Row
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.ext.asyncio.engine import AsyncConnection, AsyncEngine
+from sqlalchemy.sql import Select
-from ._protocols import DBConnection
-from .aiopg_errors import UniqueViolation
from .models.users import UserRole, UserStatus, users
from .models.users_details import users_pre_registration_details
+from .models.users_secrets import users_secrets
+from .utils_repos import pass_or_acquire_connection, transaction_context
class BaseUserRepoError(Exception):
@@ -52,74 +56,126 @@ def generate_alternative_username(username: str) -> str:
return f"{username}_{_generate_random_chars()}"
+@dataclass(frozen=True)
+class UserRow:
+ id: int
+ name: str
+ email: str
+ role: UserRole
+ status: UserStatus
+ first_name: str | None = None
+ last_name: str | None = None
+ phone: str | None = None
+
+ @classmethod
+ def from_row(cls, row: Row) -> "UserRow":
+ return cls(**{f.name: getattr(row, f.name) for f in fields(cls)})
+
+
class UsersRepo:
- @staticmethod
+ _user_columns = (
+ users.c.id,
+ users.c.name,
+ users.c.email,
+ users.c.role,
+ users.c.status,
+ users.c.first_name,
+ users.c.last_name,
+ users.c.phone,
+ )
+
+ def __init__(self, engine: AsyncEngine):
+ self._engine = engine
+
+ async def _get_scalar_or_raise(
+ self,
+ query: Select,
+ connection: AsyncConnection | None = None,
+ ) -> Any:
+ """Execute a scalar query and raise UserNotFoundInRepoError if no value found."""
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
+ value = await conn.scalar(query)
+ if value is not None:
+ return value
+ raise UserNotFoundInRepoError
+
async def new_user(
- conn: DBConnection,
+ self,
+ connection: AsyncConnection | None = None,
+ *,
email: str,
password_hash: str,
status: UserStatus,
expires_at: datetime | None,
- ) -> Any:
- data: dict[str, Any] = {
+ role: UserRole = UserRole.USER,
+ ) -> UserRow:
+ user_data: dict[str, Any] = {
"name": _generate_username_from_email(email),
"email": email,
- "password_hash": password_hash,
"status": status,
- "role": UserRole.USER,
+ "role": role,
"expires_at": expires_at,
}
user_id = None
while user_id is None:
try:
- user_id = await conn.scalar(
- users.insert().values(**data).returning(users.c.id)
- )
- except UniqueViolation:
- data["name"] = generate_alternative_username(data["name"])
-
- result = await conn.execute(
- sa.select(
- users.c.id,
- users.c.name,
- users.c.email,
- users.c.role,
- users.c.status,
- ).where(users.c.id == user_id)
- )
- return await maybe_await(result.first())
+ async with transaction_context(self._engine, connection) as conn:
+ # Insert user record
+ user_id = await conn.scalar(
+ users.insert().values(**user_data).returning(users.c.id)
+ )
+
+ # Insert password hash into users_secrets table
+ await conn.execute(
+ users_secrets.insert().values(
+ user_id=user_id,
+ password_hash=password_hash,
+ )
+ )
+ except IntegrityError:
+ user_data["name"] = generate_alternative_username(user_data["name"])
+ user_id = None # Reset to retry with new username
+
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
+ result = await conn.execute(
+ sa.select(*self._user_columns).where(users.c.id == user_id)
+ )
+ return UserRow.from_row(result.one())
- @staticmethod
async def link_and_update_user_from_pre_registration(
- conn: DBConnection,
+ self,
+ connection: AsyncConnection | None = None,
*,
new_user_id: int,
new_user_email: str,
- update_user: bool = True,
) -> None:
"""After a user is created, it can be associated with information provided during invitation
- WARNING: Use ONLY upon new user creation. It might override user_details.user_id, users.first_name, users.last_name etc if already applied
- or changes happen in users table
+ Links ALL pre-registrations for the given email to the user, regardless of product_name.
+
+ WARNING: Use ONLY upon new user creation. It might override user_details.user_id,
+ users.first_name, users.last_name etc if already applied or changes happen in users table
"""
assert new_user_email # nosec
assert new_user_id > 0 # nosec
- # link both tables first
- result = await conn.execute(
- users_pre_registration_details.update()
- .where(users_pre_registration_details.c.pre_email == new_user_email)
- .values(user_id=new_user_id)
- )
+ async with transaction_context(self._engine, connection) as conn:
+ # Link ALL pre-registrations for this email to the user
+ result = await conn.execute(
+ users_pre_registration_details.update()
+ .where(users_pre_registration_details.c.pre_email == new_user_email)
+ .values(user_id=new_user_id)
+ )
- if update_user:
# COPIES some pre-registration details to the users table
pre_columns = (
users_pre_registration_details.c.pre_first_name,
users_pre_registration_details.c.pre_last_name,
- # NOTE: pre_phone is not copied since it has to be validated. Otherwise, if
- # phone is wrong, currently user won't be able to login!
+ # NOTE: pre_phone is NOT copied since it has to be validated.
+ # It remains here as informative. In the future it might be given
+ # as a hint to the front end?
+ # Otherwise, if phone is wrong, currently user won't be able to login!
)
assert {c.name for c in pre_columns} == { # nosec
@@ -133,103 +189,177 @@ async def link_and_update_user_from_pre_registration(
and c.name.startswith("pre_")
}, "Different pre-cols detected. This code might need an update update"
+ # Get the most recent pre-registration data to copy to users table
result = await conn.execute(
- sa.select(*pre_columns).where(
- users_pre_registration_details.c.pre_email == new_user_email
- )
+ sa.select(*pre_columns)
+ .where(users_pre_registration_details.c.pre_email == new_user_email)
+ .order_by(users_pre_registration_details.c.created.desc())
+ .limit(1)
)
- if pre_registration_details_data := result.first():
- # NOTE: could have many products! which to use?
+ if pre_registration_details_data := result.one_or_none():
await conn.execute(
users.update()
.where(users.c.id == new_user_id)
.values(
- first_name=pre_registration_details_data.pre_first_name, # type: ignore[union-attr]
- last_name=pre_registration_details_data.pre_last_name, # type: ignore[union-attr]
+ first_name=pre_registration_details_data.pre_first_name,
+ last_name=pre_registration_details_data.pre_last_name,
)
)
- @staticmethod
- def get_billing_details_query(user_id: int):
- return (
- sa.select(
- users.c.first_name,
- users.c.last_name,
- users_pre_registration_details.c.institution,
- users_pre_registration_details.c.address,
- users_pre_registration_details.c.city,
- users_pre_registration_details.c.state,
- users_pre_registration_details.c.country,
- users_pre_registration_details.c.postal_code,
- users.c.phone,
- )
- .select_from(
- users.join(
- users_pre_registration_details,
- users.c.id == users_pre_registration_details.c.user_id,
- )
- )
- .where(users.c.id == user_id)
+ async def get_role(
+ self, connection: AsyncConnection | None = None, *, user_id: int
+ ) -> UserRole:
+ value = await self._get_scalar_or_raise(
+ sa.select(users.c.role).where(users.c.id == user_id),
+ connection=connection,
)
-
- @staticmethod
- async def get_billing_details(conn: DBConnection, user_id: int) -> Any | None:
- result = await conn.execute(
- UsersRepo.get_billing_details_query(user_id=user_id)
+ assert isinstance(value, UserRole) # nosec
+ return UserRole(value)
+
+ async def get_email(
+ self, connection: AsyncConnection | None = None, *, user_id: int
+ ) -> str:
+ value = await self._get_scalar_or_raise(
+ sa.select(users.c.email).where(users.c.id == user_id),
+ connection=connection,
)
- return await maybe_await(result.fetchone())
+ assert isinstance(value, str) # nosec
+ return value
- @staticmethod
- async def get_role(conn: DBConnection, user_id: int) -> UserRole:
- value: UserRole | None = await conn.scalar(
- sa.select(users.c.role).where(users.c.id == user_id)
+ async def get_active_user_email(
+ self, connection: AsyncConnection | None = None, *, user_id: int
+ ) -> str:
+ value = await self._get_scalar_or_raise(
+ sa.select(users.c.email).where(
+ (users.c.status == UserStatus.ACTIVE) & (users.c.id == user_id)
+ ),
+ connection=connection,
)
- if value:
- assert isinstance(value, UserRole) # nosec
- return UserRole(value)
+ assert isinstance(value, str) # nosec
+ return value
+
+ async def get_password_hash(
+ self, connection: AsyncConnection | None = None, *, user_id: int
+ ) -> str:
+ value = await self._get_scalar_or_raise(
+ sa.select(users_secrets.c.password_hash).where(
+ users_secrets.c.user_id == user_id
+ ),
+ connection=connection,
+ )
+ assert isinstance(value, str) # nosec
+ return value
- raise UserNotFoundInRepoError
+ async def get_user_by_email_or_none(
+ self, connection: AsyncConnection | None = None, *, email: str
+ ) -> UserRow | None:
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
+ result = await conn.execute(
+ sa.select(*self._user_columns).where(users.c.email == email.lower())
+ )
+ row = result.one_or_none()
+ return UserRow.from_row(row) if row else None
- @staticmethod
- async def get_email(conn: DBConnection, user_id: int) -> str:
- value: str | None = await conn.scalar(
- sa.select(users.c.email).where(users.c.id == user_id)
- )
- if value:
- assert isinstance(value, str) # nosec
- return value
+ async def get_user_by_id_or_none(
+ self, connection: AsyncConnection | None = None, *, user_id: int
+ ) -> UserRow | None:
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
+ result = await conn.execute(
+ sa.select(*self._user_columns).where(users.c.id == user_id)
+ )
+ row = result.one_or_none()
+ return UserRow.from_row(row) if row else None
- raise UserNotFoundInRepoError
+ async def update_user_phone(
+ self, connection: AsyncConnection | None = None, *, user_id: int, phone: str
+ ) -> None:
+ async with transaction_context(self._engine, connection) as conn:
+ await conn.execute(
+ users.update().where(users.c.id == user_id).values(phone=phone)
+ )
- @staticmethod
- async def get_active_user_email(conn: DBConnection, user_id: int) -> str:
- value: str | None = await conn.scalar(
- sa.select(users.c.email).where(
- (users.c.status == UserStatus.ACTIVE) & (users.c.id == user_id)
+ async def update_user_password_hash(
+ self,
+ connection: AsyncConnection | None = None,
+ *,
+ user_id: int,
+ password_hash: str,
+ ) -> None:
+ async with transaction_context(self._engine, connection) as conn:
+ await self.get_password_hash(
+ connection=conn, user_id=user_id
+ ) # ensure user exists
+ await conn.execute(
+ users_secrets.update()
+ .where(users_secrets.c.user_id == user_id)
+ .values(password_hash=password_hash)
)
- )
- if value is not None:
- assert isinstance(value, str) # nosec
- return value
- raise UserNotFoundInRepoError
+ async def is_email_used(
+ self, connection: AsyncConnection | None = None, *, email: str
+ ) -> bool:
- @staticmethod
- async def is_email_used(conn: DBConnection, email: str) -> bool:
- email = email.lower()
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
- registered = await conn.scalar(
- sa.select(users.c.id).where(users.c.email == email)
- )
- if registered:
- return True
+ email = email.lower()
- pre_registered = await conn.scalar(
- sa.select(users_pre_registration_details.c.user_id).where(
- users_pre_registration_details.c.pre_email == email
+ registered = await conn.scalar(
+ sa.select(users.c.id).where(users.c.email == email)
)
- )
- return bool(pre_registered)
+ if registered:
+ return True
+
+ # Check if email exists in pre-registration, regardless of user_id status
+ pre_registered = await conn.scalar(
+ sa.select(users_pre_registration_details.c.id).where(
+ users_pre_registration_details.c.pre_email == email
+ )
+ )
+ return bool(pre_registered)
+
+ async def get_billing_details(
+ self,
+ connection: AsyncConnection | None = None,
+ *,
+ product_name: str,
+ user_id: int,
+ ) -> Any | None:
+ """Returns billing details for the specified user and product.
+
+ - If the user is registered without a product, returns details for that registration.
+ - Returns None if no billing details are found.
+ """
+ async with pass_or_acquire_connection(self._engine, connection) as conn:
+ result = await conn.execute(
+ sa.select(
+ users.c.first_name,
+ users.c.last_name,
+ users_pre_registration_details.c.institution,
+ users_pre_registration_details.c.address,
+ users_pre_registration_details.c.city,
+ users_pre_registration_details.c.state,
+ users_pre_registration_details.c.country,
+ users_pre_registration_details.c.postal_code,
+ users.c.phone,
+ )
+ .select_from(
+ users.join(
+ users_pre_registration_details,
+ users.c.id == users_pre_registration_details.c.user_id,
+ )
+ )
+ .where(
+ (users.c.id == user_id)
+ & (
+ (users_pre_registration_details.c.product_name == product_name)
+ | (users_pre_registration_details.c.product_name.is_(None))
+ )
+ )
+ .order_by(users_pre_registration_details.c.created.desc())
+ .limit(1)
+ # NOTE: might want to copy billing details to users table??
+ )
+ return result.one_or_none()
#
diff --git a/packages/postgres-database/tests/conftest.py b/packages/postgres-database/tests/conftest.py
index fdac39729b6b..9016cff0d322 100644
--- a/packages/postgres-database/tests/conftest.py
+++ b/packages/postgres-database/tests/conftest.py
@@ -18,11 +18,10 @@
from aiopg.sa.engine import Engine
from aiopg.sa.result import ResultProxy, RowProxy
from faker import Faker
-from pytest_simcore.helpers import postgres_tools
+from pytest_simcore.helpers import postgres_tools, postgres_users
from pytest_simcore.helpers.faker_factories import (
random_group,
random_project,
- random_user,
)
from simcore_postgres_database.models.products import products
from simcore_postgres_database.models.projects import projects
@@ -82,7 +81,7 @@ def sync_engine(postgres_service: str) -> Iterable[sqlalchemy.engine.Engine]:
def _make_asyncpg_engine(postgres_service: str) -> Callable[[bool], AsyncEngine]:
# NOTE: users is responsible of `await engine.dispose()`
dsn = postgres_service.replace("postgresql://", "postgresql+asyncpg://")
- minsize = 1
+ minsize = 2
maxsize = 50
def _(echo: bool):
@@ -268,10 +267,11 @@ def create_fake_user(sync_engine: sqlalchemy.engine.Engine) -> Iterator[Callable
async def _creator(
conn: SAConnection, group: RowProxy | None = None, **overrides
) -> RowProxy:
- user_id = await conn.scalar(
- users.insert().values(**random_user(**overrides)).returning(users.c.id)
+
+ user_id = await postgres_users.insert_user_and_secrets(
+ conn,
+ **overrides,
)
- assert user_id is not None
# This is done in two executions instead of one (e.g. returning(literal_column("*")) )
# to allow triggering function in db that
diff --git a/packages/postgres-database/tests/test_models_api_keys.py b/packages/postgres-database/tests/test_models_api_keys.py
index d8863f9ac748..d4852d199d6c 100644
--- a/packages/postgres-database/tests/test_models_api_keys.py
+++ b/packages/postgres-database/tests/test_models_api_keys.py
@@ -9,10 +9,10 @@
import sqlalchemy as sa
from aiopg.sa.connection import SAConnection
from aiopg.sa.result import RowProxy
+from pytest_simcore.helpers import postgres_users
from pytest_simcore.helpers.faker_factories import (
random_api_auth,
random_product,
- random_user,
)
from simcore_postgres_database.models.api_keys import api_keys
from simcore_postgres_database.models.products import products
@@ -21,13 +21,12 @@
@pytest.fixture
async def user_id(connection: SAConnection) -> AsyncIterable[int]:
- uid = await connection.scalar(
- users.insert().values(random_user()).returning(users.c.id)
- )
- assert uid
- yield uid
+ user_id = await postgres_users.insert_user_and_secrets(connection)
+
+ assert user_id
+ yield user_id
- await connection.execute(users.delete().where(users.c.id == uid))
+ await connection.execute(users.delete().where(users.c.id == user_id))
@pytest.fixture
@@ -84,7 +83,10 @@ async def test_get_session_identity_for_api_server(
# authorize a session
#
result = await connection.execute(
- sa.select(api_keys.c.user_id, api_keys.c.product_name,).where(
+ sa.select(
+ api_keys.c.user_id,
+ api_keys.c.product_name,
+ ).where(
(api_keys.c.api_key == session_auth.api_key)
& (api_keys.c.api_secret == session_auth.api_secret),
)
diff --git a/packages/postgres-database/tests/test_models_groups.py b/packages/postgres-database/tests/test_models_groups.py
index 6ce8a77c4cc3..a3c5ad154a30 100644
--- a/packages/postgres-database/tests/test_models_groups.py
+++ b/packages/postgres-database/tests/test_models_groups.py
@@ -10,7 +10,7 @@
from aiopg.sa.connection import SAConnection
from aiopg.sa.result import ResultProxy, RowProxy
from psycopg2.errors import ForeignKeyViolation, RaiseException, UniqueViolation
-from pytest_simcore.helpers.faker_factories import random_user
+from pytest_simcore.helpers import postgres_users
from simcore_postgres_database.webserver_models import (
GroupType,
groups,
@@ -64,9 +64,8 @@ async def test_all_group(
await connection.execute(groups.delete().where(groups.c.gid == all_group_gid))
# check adding a user is automatically added to the all group
- result = await connection.execute(
- users.insert().values(**random_user()).returning(literal_column("*"))
- )
+ user_id = await postgres_users.insert_user_and_secrets(connection)
+ result = await connection.execute(users.select().where(users.c.id == user_id))
user: RowProxy = await result.fetchone()
result = await connection.execute(
@@ -98,14 +97,10 @@ async def test_all_group(
async def test_own_group(
connection: SAConnection,
):
- result = await connection.execute(
- users.insert().values(**random_user()).returning(literal_column("*"))
- )
- user: RowProxy = await result.fetchone()
- assert not user.primary_gid
+ user_id = await postgres_users.insert_user_and_secrets(connection)
# now fetch the same user that shall have a primary group set by the db
- result = await connection.execute(users.select().where(users.c.id == user.id))
+ result = await connection.execute(users.select().where(users.c.id == user_id))
user: RowProxy = await result.fetchone()
assert user.primary_gid
diff --git a/packages/postgres-database/tests/test_models_projects_to_jobs.py b/packages/postgres-database/tests/test_models_projects_to_jobs.py
index d6f2879694d4..5b6436023d28 100644
--- a/packages/postgres-database/tests/test_models_projects_to_jobs.py
+++ b/packages/postgres-database/tests/test_models_projects_to_jobs.py
@@ -3,6 +3,7 @@
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
+import json
from collections.abc import Iterator
import pytest
@@ -10,12 +11,12 @@
import sqlalchemy as sa
import sqlalchemy.engine
import sqlalchemy.exc
+from common_library.users_enums import UserRole
from faker import Faker
from pytest_simcore.helpers import postgres_tools
from pytest_simcore.helpers.faker_factories import random_project, random_user
-from simcore_postgres_database.models.projects import projects
+from simcore_postgres_database.models.projects import ProjectType, projects
from simcore_postgres_database.models.projects_to_jobs import projects_to_jobs
-from simcore_postgres_database.models.users import users
@pytest.fixture
@@ -66,9 +67,24 @@ def test_populate_projects_to_jobs_during_migration(
# INSERT data (emulates data in-place)
user_data = random_user(
- faker, name="test_populate_projects_to_jobs_during_migration"
+ faker,
+ name="test_populate_projects_to_jobs_during_migration",
+ role=UserRole.USER.value,
)
- stmt = users.insert().values(**user_data).returning(users.c.id)
+ user_data["password_hash"] = (
+ "password_hash_was_still_here_at_this_migration_commit" # noqa: S105
+ )
+
+ columns = list(user_data.keys())
+ values_clause = ", ".join(f":{col}" for col in columns)
+ columns_clause = ", ".join(columns)
+ stmt = sa.text(
+ f"""
+ INSERT INTO users ({columns_clause})
+ VALUES ({values_clause})
+ RETURNING id
+ """ # noqa: S608
+ ).bindparams(**user_data)
result = conn.execute(stmt)
user_id = result.scalar()
@@ -82,7 +98,7 @@ def test_populate_projects_to_jobs_during_migration(
"Study associated to solver job:"
"""{
"id": "cd03450c-4c17-4c2c-85fd-0d951d7dcd5a",
- "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.2.1/jobs/cd03450c-4c2c-85fd-0d951d7dcd5a",
+ "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.2.1/jobs/cd03450c-4c17-4c2c-85fd-0d951d7dcd5a",
"inputs_checksum": "015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a",
"created_at": "2025-01-27T13:12:58.676564Z"
}
@@ -105,8 +121,37 @@ def test_populate_projects_to_jobs_during_migration(
prj_owner=user_id,
),
]
+
+ client_default_column_values = {
+ # NOTE: columns with `server_default values` must not be added here
+ "type": ProjectType.STANDARD.value,
+ "workbench": {},
+ "access_rights": {},
+ "published": False,
+ "hidden": False,
+ "workspace_id": None,
+ }
+
+ # NOTE: cannot use `projects` table directly here because it changes
+ # throughout time
for prj in projects_data:
- conn.execute(sa.insert(projects).values(prj))
+ for key, value in client_default_column_values.items():
+ prj.setdefault(key, value)
+
+ for key, value in prj.items():
+ if isinstance(value, dict):
+ prj[key] = json.dumps(value)
+
+ columns = list(prj.keys())
+ values_clause = ", ".join(f":{col}" for col in columns)
+ columns_clause = ", ".join(columns)
+ stmt = sa.text(
+ f"""
+ INSERT INTO projects ({columns_clause})
+ VALUES ({values_clause})
+ """ # noqa: S608
+ ).bindparams(**prj)
+ conn.execute(stmt)
# MIGRATE UPGRADE: this should populate
simcore_postgres_database.cli.upgrade.callback("head")
diff --git a/packages/postgres-database/tests/test_users.py b/packages/postgres-database/tests/test_users.py
index 8bfe2814ada1..038f9a53fa64 100644
--- a/packages/postgres-database/tests/test_users.py
+++ b/packages/postgres-database/tests/test_users.py
@@ -3,35 +3,41 @@
# pylint: disable=unused-argument
# pylint: disable=unused-variable
+from collections.abc import Iterator
from datetime import datetime, timedelta
import pytest
+import simcore_postgres_database.cli
import sqlalchemy as sa
-from aiopg.sa.connection import SAConnection
-from aiopg.sa.result import ResultProxy, RowProxy
+import sqlalchemy.engine
+import sqlalchemy.exc
from faker import Faker
+from pytest_simcore.helpers import postgres_tools
from pytest_simcore.helpers.faker_factories import random_user
-from simcore_postgres_database.aiopg_errors import (
- InvalidTextRepresentation,
- UniqueViolation,
-)
from simcore_postgres_database.models.users import UserRole, UserStatus, users
+from simcore_postgres_database.utils_repos import (
+ pass_or_acquire_connection,
+ transaction_context,
+)
from simcore_postgres_database.utils_users import (
UsersRepo,
_generate_username_from_email,
generate_alternative_username,
)
+from sqlalchemy.exc import DBAPIError, IntegrityError
+from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.sql import func
@pytest.fixture
-async def clean_users_db_table(connection: SAConnection):
+async def clean_users_db_table(asyncpg_engine: AsyncEngine):
yield
- await connection.execute(users.delete())
+ async with transaction_context(asyncpg_engine) as connection:
+ await connection.execute(users.delete())
async def test_user_status_as_pending(
- connection: SAConnection, faker: Faker, clean_users_db_table: None
+ asyncpg_engine: AsyncEngine, faker: Faker, clean_users_db_table: None
):
"""Checks a bug where the expression
@@ -51,10 +57,13 @@ async def test_user_status_as_pending(
# tests that the database never stores the word "PENDING"
data = random_user(faker, status="PENDING")
assert data["status"] == "PENDING"
- with pytest.raises(InvalidTextRepresentation) as err_info:
- await connection.execute(users.insert().values(data))
+ async with transaction_context(asyncpg_engine) as connection:
+ with pytest.raises(DBAPIError) as err_info:
+ await connection.execute(users.insert().values(data))
- assert 'invalid input value for enum userstatus: "PENDING"' in f"{err_info.value}"
+ assert (
+ 'invalid input value for enum userstatus: "PENDING"' in f"{err_info.value}"
+ )
@pytest.mark.parametrize(
@@ -66,27 +75,30 @@ async def test_user_status_as_pending(
)
async def test_user_status_inserted_as_enum_or_int(
status_value: UserStatus | str,
- connection: SAConnection,
+ asyncpg_engine: AsyncEngine,
faker: Faker,
clean_users_db_table: None,
):
# insert as `status_value`
data = random_user(faker, status=status_value)
assert data["status"] == status_value
- user_id = await connection.scalar(users.insert().values(data).returning(users.c.id))
- # get as UserStatus.CONFIRMATION_PENDING
- user = await (
- await connection.execute(users.select().where(users.c.id == user_id))
- ).first()
- assert user
+ async with transaction_context(asyncpg_engine) as connection:
+ user_id = await connection.scalar(
+ users.insert().values(data).returning(users.c.id)
+ )
- assert UserStatus(user.status) == UserStatus.CONFIRMATION_PENDING
- assert user.status == UserStatus.CONFIRMATION_PENDING
+ # get as UserStatus.CONFIRMATION_PENDING
+ result = await connection.execute(users.select().where(users.c.id == user_id))
+ user = result.one_or_none()
+ assert user
+
+ assert UserStatus(user.status) == UserStatus.CONFIRMATION_PENDING
+ assert user.status == UserStatus.CONFIRMATION_PENDING
async def test_unique_username(
- connection: SAConnection, faker: Faker, clean_users_db_table: None
+ asyncpg_engine: AsyncEngine, faker: Faker, clean_users_db_table: None
):
data = random_user(
faker,
@@ -96,33 +108,39 @@ async def test_unique_username(
first_name="Pedro",
last_name="Crespo Valero",
)
- user_id = await connection.scalar(users.insert().values(data).returning(users.c.id))
- user = await (
- await connection.execute(users.select().where(users.c.id == user_id))
- ).first()
- assert user
-
- assert user.id == user_id
- assert user.name == "pcrespov"
-
- # same name fails
- data["email"] = faker.email()
- with pytest.raises(UniqueViolation):
+ async with transaction_context(asyncpg_engine) as connection:
+ user_id = await connection.scalar(
+ users.insert().values(data).returning(users.c.id)
+ )
+ result = await connection.execute(users.select().where(users.c.id == user_id))
+ user = result.one_or_none()
+ assert user
+
+ assert user.id == user_id
+ assert user.name == "pcrespov"
+
+ async with transaction_context(asyncpg_engine) as connection:
+ # same name fails
+ data["email"] = faker.email()
+ with pytest.raises(IntegrityError):
+ await connection.scalar(users.insert().values(data).returning(users.c.id))
+
+ async with transaction_context(asyncpg_engine) as connection:
+ # generate new name
+ data["name"] = _generate_username_from_email(user.email)
+ data["email"] = faker.email()
await connection.scalar(users.insert().values(data).returning(users.c.id))
- # generate new name
- data["name"] = _generate_username_from_email(user.email)
- data["email"] = faker.email()
- await connection.scalar(users.insert().values(data).returning(users.c.id))
+ async with transaction_context(asyncpg_engine) as connection:
- # and another one
- data["name"] = generate_alternative_username(data["name"])
- data["email"] = faker.email()
- await connection.scalar(users.insert().values(data).returning(users.c.id))
+ # and another one
+ data["name"] = generate_alternative_username(data["name"])
+ data["email"] = faker.email()
+ await connection.scalar(users.insert().values(data).returning(users.c.id))
async def test_new_user(
- connection: SAConnection, faker: Faker, clean_users_db_table: None
+ asyncpg_engine: AsyncEngine, faker: Faker, clean_users_db_table: None
):
data = {
"email": faker.email(),
@@ -130,7 +148,8 @@ async def test_new_user(
"status": UserStatus.ACTIVE,
"expires_at": datetime.utcnow(),
}
- new_user = await UsersRepo.new_user(connection, **data)
+ repo = UsersRepo(asyncpg_engine)
+ new_user = await repo.new_user(**data)
assert new_user.email == data["email"]
assert new_user.status == data["status"]
@@ -140,51 +159,205 @@ async def test_new_user(
assert _generate_username_from_email(other_email) == new_user.name
other_data = {**data, "email": other_email}
- other_user = await UsersRepo.new_user(connection, **other_data)
+ other_user = await repo.new_user(**other_data)
assert other_user.email != new_user.email
assert other_user.name != new_user.name
- assert await UsersRepo.get_email(connection, other_user.id) == other_user.email
- assert await UsersRepo.get_role(connection, other_user.id) == other_user.role
- assert (
- await UsersRepo.get_active_user_email(connection, other_user.id)
- == other_user.email
- )
+ async with pass_or_acquire_connection(asyncpg_engine) as connection:
+ assert (
+ await repo.get_email(connection, user_id=other_user.id) == other_user.email
+ )
+ assert await repo.get_role(connection, user_id=other_user.id) == other_user.role
+ assert (
+ await repo.get_active_user_email(connection, user_id=other_user.id)
+ == other_user.email
+ )
-async def test_trial_accounts(connection: SAConnection, clean_users_db_table: None):
+async def test_trial_accounts(asyncpg_engine: AsyncEngine, clean_users_db_table: None):
EXPIRATION_INTERVAL = timedelta(minutes=5)
# creates trial user
client_now = datetime.utcnow()
- user_id: int | None = await connection.scalar(
- users.insert()
- .values(
- **random_user(
- status=UserStatus.ACTIVE,
- # Using some magic from sqlachemy ...
- expires_at=func.now() + EXPIRATION_INTERVAL,
+ async with transaction_context(asyncpg_engine) as connection:
+ user_id: int | None = await connection.scalar(
+ users.insert()
+ .values(
+ **random_user(
+ status=UserStatus.ACTIVE,
+ # Using some magic from sqlachemy ...
+ expires_at=func.now() + EXPIRATION_INTERVAL,
+ )
)
+ .returning(users.c.id)
)
- .returning(users.c.id)
- )
- assert user_id
+ assert user_id
- # check expiration date
- result: ResultProxy = await connection.execute(
- sa.select(users.c.status, users.c.created_at, users.c.expires_at).where(
- users.c.id == user_id
+ # check expiration date
+ result = await connection.execute(
+ sa.select(users.c.status, users.c.created_at, users.c.expires_at).where(
+ users.c.id == user_id
+ )
)
+ row = result.one_or_none()
+ assert row
+ assert row.created_at - client_now < timedelta(
+ minutes=1
+ ), "Difference between server and client now should not differ much"
+ assert row.expires_at - row.created_at == EXPIRATION_INTERVAL
+ assert row.status == UserStatus.ACTIVE
+
+ # sets user as expired
+ await connection.execute(
+ users.update()
+ .values(status=UserStatus.EXPIRED)
+ .where(users.c.id == user_id)
+ )
+
+
+@pytest.fixture
+def sync_engine_with_migration(
+ sync_engine: sqlalchemy.engine.Engine, db_metadata: sa.MetaData
+) -> Iterator[sqlalchemy.engine.Engine]:
+ # EXTENDS sync_engine fixture to include cleanup and prepare migration
+
+ # cleanup tables
+ db_metadata.drop_all(sync_engine)
+
+ # prepare migration upgrade
+ assert simcore_postgres_database.cli.discover.callback
+ assert simcore_postgres_database.cli.upgrade.callback
+
+ dsn = sync_engine.url
+ simcore_postgres_database.cli.discover.callback(
+ user=dsn.username,
+ password=dsn.password,
+ host=dsn.host,
+ database=dsn.database,
+ port=dsn.port,
)
- row: RowProxy | None = await result.first()
- assert row
- assert row.created_at - client_now < timedelta(
- minutes=1
- ), "Difference between server and client now should not differ much"
- assert row.expires_at - row.created_at == EXPIRATION_INTERVAL
- assert row.status == UserStatus.ACTIVE
-
- # sets user as expired
- await connection.execute(
- users.update().values(status=UserStatus.EXPIRED).where(users.c.id == user_id)
- )
+
+ yield sync_engine
+
+ # cleanup tables
+ postgres_tools.force_drop_all_tables(sync_engine)
+
+
+def test_users_secrets_migration_upgrade_downgrade(
+ sync_engine_with_migration: sqlalchemy.engine.Engine, faker: Faker
+):
+ """Tests the migration script that moves password_hash from users to users_secrets table.
+
+
+ testing
+ packages/postgres-database/src/simcore_postgres_database/migration/versions/5679165336c8_new_users_secrets.py
+
+ revision = "5679165336c8"
+ down_revision = "61b98a60e934"
+
+
+ NOTE: all statements in conn.execute(...) must be sa.text(...) since at that migration point the schemas of the
+ code models might not be the same
+ """
+ assert simcore_postgres_database.cli.discover.callback
+ assert simcore_postgres_database.cli.upgrade.callback
+ assert simcore_postgres_database.cli.downgrade.callback
+
+ # UPGRADE just one before 5679165336c8_new_users_secrets.py
+ simcore_postgres_database.cli.upgrade.callback("61b98a60e934")
+
+ with sync_engine_with_migration.connect() as conn:
+ # Ensure the users_secrets table does NOT exist yet
+ with pytest.raises(sqlalchemy.exc.ProgrammingError) as exc_info:
+ conn.execute(
+ sa.select(sa.func.count()).select_from(sa.table("users_secrets"))
+ ).scalar()
+ assert "psycopg2.errors.UndefinedTable" in f"{exc_info.value}"
+
+ # INSERT users with password hashes (emulates data in-place before migration)
+ users_data_with_hashed_password = [
+ {
+ **random_user(
+ faker,
+ name="user_with_password_1",
+ email="user1@example.com",
+ role=UserRole.USER.value,
+ status=UserStatus.ACTIVE,
+ ),
+ "password_hash": "hashed_password_1", # noqa: S106
+ },
+ {
+ **random_user(
+ faker,
+ name="user_with_password_2",
+ email="user2@example.com",
+ role=UserRole.USER.value,
+ status=UserStatus.ACTIVE,
+ ),
+ "password_hash": "hashed_password_2", # noqa: S106
+ },
+ ]
+
+ inserted_user_ids = []
+ for user_data in users_data_with_hashed_password:
+ columns = ", ".join(user_data.keys())
+ values_placeholders = ", ".join(f":{key}" for key in user_data)
+ user_id = conn.execute(
+ sa.text(
+ f"INSERT INTO users ({columns}) VALUES ({values_placeholders}) RETURNING id" # noqa: S608
+ ),
+ user_data,
+ ).scalar()
+ inserted_user_ids.append(user_id)
+
+ # Verify password hashes are in users table
+ result = conn.execute(
+ sa.text("SELECT id, password_hash FROM users WHERE id = ANY(:user_ids)"),
+ {"user_ids": inserted_user_ids},
+ ).fetchall()
+
+ password_hashes_before = {row.id: row.password_hash for row in result}
+ assert len(password_hashes_before) == 2
+ assert password_hashes_before[inserted_user_ids[0]] == "hashed_password_1"
+ assert password_hashes_before[inserted_user_ids[1]] == "hashed_password_2"
+
+ # MIGRATE UPGRADE: this should move password hashes to users_secrets
+ # packages/postgres-database/src/simcore_postgres_database/migration/versions/5679165336c8_new_users_secrets.py
+ simcore_postgres_database.cli.upgrade.callback("5679165336c8")
+
+ with sync_engine_with_migration.connect() as conn:
+ # Verify users_secrets table exists and contains the password hashes
+ result = conn.execute(
+ sa.text("SELECT user_id, password_hash FROM users_secrets ORDER BY user_id")
+ ).fetchall()
+
+ # Only users with non-null password hashes should be in users_secrets
+ assert len(result) == 2
+ secrets_data = {row.user_id: row.password_hash for row in result}
+ assert secrets_data[inserted_user_ids[0]] == "hashed_password_1"
+ assert secrets_data[inserted_user_ids[1]] == "hashed_password_2"
+
+ # Verify password_hash column is removed from users table
+ with pytest.raises(sqlalchemy.exc.ProgrammingError) as exc_info:
+ conn.execute(sa.text("SELECT password_hash FROM users"))
+ assert "psycopg2.errors.UndefinedColumn" in f"{exc_info.value}"
+
+ # MIGRATE DOWNGRADE: this should move password hashes back to users
+ simcore_postgres_database.cli.downgrade.callback("61b98a60e934")
+
+ with sync_engine_with_migration.connect() as conn:
+ # Verify users_secrets table no longer exists
+ with pytest.raises(sqlalchemy.exc.ProgrammingError) as exc_info:
+ conn.execute(sa.text("SELECT COUNT(*) FROM users_secrets")).scalar()
+ assert "psycopg2.errors.UndefinedTable" in f"{exc_info.value}"
+
+ # Verify password hashes are back in users table
+ result = conn.execute(
+ sa.text("SELECT id, password_hash FROM users WHERE id = ANY(:user_ids)"),
+ {"user_ids": inserted_user_ids},
+ ).fetchall()
+
+ password_hashes_after = {row.id: row.password_hash for row in result}
+ assert len(password_hashes_after) == 2
+ assert password_hashes_after[inserted_user_ids[0]] == "hashed_password_1"
+ assert password_hashes_after[inserted_user_ids[1]] == "hashed_password_2"
diff --git a/packages/postgres-database/tests/test_users_details.py b/packages/postgres-database/tests/test_users_details.py
index e4b6bfeb70fc..8c0f84d1b33e 100644
--- a/packages/postgres-database/tests/test_users_details.py
+++ b/packages/postgres-database/tests/test_users_details.py
@@ -257,15 +257,18 @@ async def test_create_and_link_user_from_pre_registration(
# Invitation link is clicked and the user is created and linked to the pre-registration
async with transaction_context(asyncpg_engine) as connection:
# user gets created
- new_user = await UsersRepo.new_user(
+ repo = UsersRepo(asyncpg_engine)
+ new_user = await repo.new_user(
connection,
email=pre_email,
password_hash="123456", # noqa: S106
status=UserStatus.ACTIVE,
expires_at=None,
)
- await UsersRepo.link_and_update_user_from_pre_registration(
- connection, new_user_id=new_user.id, new_user_email=new_user.email
+ await repo.link_and_update_user_from_pre_registration(
+ connection,
+ new_user_id=new_user.id,
+ new_user_email=new_user.email,
)
# Verify the user was created and linked
@@ -285,29 +288,32 @@ async def test_create_and_link_user_from_pre_registration(
async def test_get_billing_details_from_pre_registration(
asyncpg_engine: AsyncEngine,
pre_registered_user: tuple[str, dict[str, Any]],
+ product: dict[str, Any],
):
"""Test that billing details can be retrieved from pre-registration data."""
pre_email, fake_pre_registration_data = pre_registered_user
# Create the user
async with transaction_context(asyncpg_engine) as connection:
- new_user = await UsersRepo.new_user(
+ repo = UsersRepo(asyncpg_engine)
+ new_user = await repo.new_user(
connection,
email=pre_email,
password_hash="123456", # noqa: S106
status=UserStatus.ACTIVE,
expires_at=None,
)
- await UsersRepo.link_and_update_user_from_pre_registration(
- connection, new_user_id=new_user.id, new_user_email=new_user.email
+ await repo.link_and_update_user_from_pre_registration(
+ connection,
+ new_user_id=new_user.id,
+ new_user_email=new_user.email,
)
# Get billing details
- async with pass_or_acquire_connection(asyncpg_engine) as connection:
- invoice_data = await UsersRepo.get_billing_details(
- connection, user_id=new_user.id
- )
- assert invoice_data is not None
+ invoice_data = await repo.get_billing_details(
+ user_id=new_user.id, product_name=product["name"]
+ )
+ assert invoice_data is not None
# Test UserAddress model conversion
user_address = UserAddress.create_from_db(invoice_data)
@@ -331,15 +337,18 @@ async def test_update_user_from_pre_registration(
# Create the user and link to pre-registration
async with transaction_context(asyncpg_engine) as connection:
- new_user = await UsersRepo.new_user(
+ repo = UsersRepo(asyncpg_engine)
+ new_user = await repo.new_user(
connection,
email=pre_email,
password_hash="123456", # noqa: S106
status=UserStatus.ACTIVE,
expires_at=None,
)
- await UsersRepo.link_and_update_user_from_pre_registration(
- connection, new_user_id=new_user.id, new_user_email=new_user.email
+ await repo.link_and_update_user_from_pre_registration(
+ connection,
+ new_user_id=new_user.id,
+ new_user_email=new_user.email,
)
# Update the user manually
@@ -358,8 +367,11 @@ async def test_update_user_from_pre_registration(
# Re-link the user to pre-registration, which should override manual updates
async with transaction_context(asyncpg_engine) as connection:
- await UsersRepo.link_and_update_user_from_pre_registration(
- connection, new_user_id=new_user.id, new_user_email=new_user.email
+ repo = UsersRepo(asyncpg_engine)
+ await repo.link_and_update_user_from_pre_registration(
+ connection,
+ new_user_id=new_user.id,
+ new_user_email=new_user.email,
)
result = await connection.execute(
@@ -487,20 +499,24 @@ async def test_user_preregisters_for_multiple_products_with_different_outcomes(
assert registrations[1].account_request_reviewed_by == product_owner_user["id"]
assert registrations[1].account_request_reviewed_at is not None
- # 3.Now create a user account with the approved pre-registration
+ # 3. Now create a user account and link ALL pre-registrations for this email
async with transaction_context(asyncpg_engine) as connection:
- new_user = await UsersRepo.new_user(
+ repo = UsersRepo(asyncpg_engine)
+ new_user = await repo.new_user(
connection,
email=user_email,
password_hash="123456", # noqa: S106
status=UserStatus.ACTIVE,
expires_at=None,
)
- await UsersRepo.link_and_update_user_from_pre_registration(
- connection, new_user_id=new_user.id, new_user_email=new_user.email
+ # Link all pre-registrations for this email, regardless of approval status or product
+ await repo.link_and_update_user_from_pre_registration(
+ connection,
+ new_user_id=new_user.id,
+ new_user_email=new_user.email,
)
- # Verify both pre-registrations are linked to the new user
+ # Verify ALL pre-registrations for this email are linked to the user
async with pass_or_acquire_connection(asyncpg_engine) as connection:
result = await connection.execute(
sa.select(
@@ -515,5 +531,17 @@ async def test_user_preregisters_for_multiple_products_with_different_outcomes(
registrations = result.fetchall()
assert len(registrations) == 2
- # Both registrations should be linked to the same user, regardless of approval status
- assert all(reg.user_id == new_user.id for reg in registrations)
+ # Both pre-registrations should be linked to the user, regardless of approval status
+ product1_reg = next(
+ reg for reg in registrations if reg.product_name == product1["name"]
+ )
+ product2_reg = next(
+ reg for reg in registrations if reg.product_name == product2["name"]
+ )
+
+ assert product1_reg.user_id == new_user.id # Linked
+ assert product2_reg.user_id == new_user.id # Linked
+
+ # Verify approval status is preserved independently of linking
+ assert product1_reg.account_request_status == AccountRequestStatus.APPROVED
+ assert product2_reg.account_request_status == AccountRequestStatus.REJECTED
diff --git a/packages/postgres-database/tests/test_utils_comp_runs.py b/packages/postgres-database/tests/test_utils_comp_runs.py
index dc18abd8395e..97f33bbdc18d 100644
--- a/packages/postgres-database/tests/test_utils_comp_runs.py
+++ b/packages/postgres-database/tests/test_utils_comp_runs.py
@@ -4,13 +4,14 @@
import pytest
import sqlalchemy as sa
+from faker import Faker
from simcore_postgres_database.models.comp_runs import comp_runs
from simcore_postgres_database.utils_comp_runs import get_latest_run_id_for_project
from sqlalchemy.ext.asyncio import AsyncEngine
@pytest.fixture
-async def sample_comp_runs(asyncpg_engine: AsyncEngine):
+async def sample_comp_runs(asyncpg_engine: AsyncEngine, faker: Faker):
async with asyncpg_engine.begin() as conn:
await conn.execute(sa.text("SET session_replication_role = replica;"))
await conn.execute(sa.delete(comp_runs))
@@ -37,6 +38,7 @@ async def sample_comp_runs(asyncpg_engine: AsyncEngine):
"metadata": None,
"use_on_demand_clusters": False,
"dag_adjacency_list": {},
+ "collection_run_id": faker.uuid4(),
},
{
"run_id": 2,
@@ -58,6 +60,7 @@ async def sample_comp_runs(asyncpg_engine: AsyncEngine):
"metadata": None,
"use_on_demand_clusters": False,
"dag_adjacency_list": {},
+ "collection_run_id": faker.uuid4(),
},
{
"run_id": 3,
@@ -79,6 +82,7 @@ async def sample_comp_runs(asyncpg_engine: AsyncEngine):
"metadata": None,
"use_on_demand_clusters": False,
"dag_adjacency_list": {},
+ "collection_run_id": faker.uuid4(),
},
{
"run_id": 4,
@@ -100,6 +104,7 @@ async def sample_comp_runs(asyncpg_engine: AsyncEngine):
"metadata": None,
"use_on_demand_clusters": False,
"dag_adjacency_list": {},
+ "collection_run_id": faker.uuid4(),
},
],
)
diff --git a/packages/postgres-database/tests/test_utils_users.py b/packages/postgres-database/tests/test_utils_users.py
index d4a7039f1f3e..0f61ba27ed9d 100644
--- a/packages/postgres-database/tests/test_utils_users.py
+++ b/packages/postgres-database/tests/test_utils_users.py
@@ -8,14 +8,13 @@
from typing import Any
import pytest
+import sqlalchemy as sa
from faker import Faker
-from pytest_simcore.helpers.faker_factories import (
- random_user,
+from pytest_simcore.helpers.postgres_users import (
+ insert_and_get_user_and_secrets_lifespan,
)
-from pytest_simcore.helpers.postgres_tools import (
- insert_and_get_row_lifespan,
-)
-from simcore_postgres_database.models.users import UserRole, users
+from simcore_postgres_database.models.users import UserRole
+from simcore_postgres_database.models.users_secrets import users_secrets
from simcore_postgres_database.utils_repos import (
pass_or_acquire_connection,
)
@@ -28,24 +27,71 @@ async def user(
faker: Faker,
asyncpg_engine: AsyncEngine,
) -> AsyncIterable[dict[str, Any]]:
- async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ async with insert_and_get_user_and_secrets_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
asyncpg_engine,
- table=users,
- values=random_user(
- faker,
- role=faker.random_element(elements=UserRole),
- ),
- pk_col=users.c.id,
- ) as row:
- yield row
+ role=faker.random_element(elements=UserRole),
+ ) as user_and_secrets_row:
+ yield user_and_secrets_row
async def test_users_repo_get(asyncpg_engine: AsyncEngine, user: dict[str, Any]):
- repo = UsersRepo()
+ repo = UsersRepo(asyncpg_engine)
async with pass_or_acquire_connection(asyncpg_engine) as connection:
assert await repo.get_email(connection, user_id=user["id"]) == user["email"]
assert await repo.get_role(connection, user_id=user["id"]) == user["role"]
+ assert (
+ await repo.get_password_hash(connection, user_id=user["id"])
+ == user["password_hash"]
+ )
+ assert (
+ await repo.get_active_user_email(connection, user_id=user["id"])
+ == user["email"]
+ )
with pytest.raises(UserNotFoundInRepoError):
await repo.get_role(connection, user_id=55)
+ with pytest.raises(UserNotFoundInRepoError):
+ await repo.get_email(connection, user_id=55)
+ with pytest.raises(UserNotFoundInRepoError):
+ await repo.get_password_hash(connection, user_id=55)
+ with pytest.raises(UserNotFoundInRepoError):
+ await repo.get_active_user_email(connection, user_id=55)
+
+
+async def test_update_user_password_hash_updates_modified_column(
+ asyncpg_engine: AsyncEngine, user: dict[str, Any], faker: Faker
+):
+ repo = UsersRepo(asyncpg_engine)
+
+ async with pass_or_acquire_connection(asyncpg_engine) as connection:
+ # Get initial modified timestamp
+ result = await connection.execute(
+ sa.select(users_secrets.c.modified).where(
+ users_secrets.c.user_id == user["id"]
+ )
+ )
+ initial_modified = result.scalar_one()
+
+ # Update password hash
+ new_password_hash = faker.password()
+ await repo.update_user_password_hash(
+ connection, user_id=user["id"], password_hash=new_password_hash
+ )
+
+ # Get updated modified timestamp
+ result = await connection.execute(
+ sa.select(users_secrets.c.modified).where(
+ users_secrets.c.user_id == user["id"]
+ )
+ )
+ updated_modified = result.scalar_one()
+
+ # Verify modified timestamp changed
+ assert updated_modified > initial_modified
+
+ # Verify password hash was actually updated
+ assert (
+ await repo.get_password_hash(connection, user_id=user["id"])
+ == new_password_hash
+ )
diff --git a/packages/pytest-simcore/pyproject.toml b/packages/pytest-simcore/pyproject.toml
deleted file mode 100644
index 4bffe2cb6a8f..000000000000
--- a/packages/pytest-simcore/pyproject.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-[project]
-name = "pytest-simcore"
-version = "0.1.0"
-requires-python = ">=3.11"
-dependencies = [
- "fastapi[standard]>=0.115.12",
- "python-socketio>=5.12.1",
- "uvicorn>=0.34.0",
-]
diff --git a/packages/pytest-simcore/src/pytest_simcore/__init__.py b/packages/pytest-simcore/src/pytest_simcore/__init__.py
index 8716d997ef21..7a7da935aadc 100644
--- a/packages/pytest-simcore/src/pytest_simcore/__init__.py
+++ b/packages/pytest-simcore/src/pytest_simcore/__init__.py
@@ -25,7 +25,7 @@ def keep_docker_up(request: pytest.FixtureRequest) -> bool:
return flag
-@pytest.fixture
+@pytest.fixture(scope="session")
def is_pdb_enabled(request: pytest.FixtureRequest):
"""Returns true if tests are set to use interactive debugger, i.e. --pdb"""
options = request.config.option
diff --git a/packages/pytest-simcore/src/pytest_simcore/asyncio_event_loops.py b/packages/pytest-simcore/src/pytest_simcore/asyncio_event_loops.py
new file mode 100644
index 000000000000..a08a48f0b106
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/asyncio_event_loops.py
@@ -0,0 +1,63 @@
+"""
+Our choice of plugin to test asyncio functionality is pytest-asyncio
+
+Some other pytest plugins, e.g. pytest-aiohttp, define their own event loop
+policies and event loops, which can conflict with pytest-asyncio.
+
+This files unifies the event loop policy and event loop used by pytest-asyncio throughout
+all the tests in this repository.
+
+"""
+
+import asyncio
+
+import pytest
+import uvloop
+
+
+@pytest.fixture(scope="session")
+def event_loop_policy():
+ """Override the event loop policy to use uvloop which is the one we use in production
+
+ SEE https://pytest-asyncio.readthedocs.io/en/stable/how-to-guides/uvloop.html
+ """
+ return uvloop.EventLoopPolicy()
+
+
+async def test_using_uvloop_event_loop():
+ """Tests that `pytest_simcore.asyncio_event_loops` plugin is used and has an effect
+
+ Manually import and add it your test-suite to run this test.
+ """
+ assert isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy)
+
+
+@pytest.fixture
+async def loop() -> asyncio.AbstractEventLoop:
+ """Override the event loop inside `aiohttp.pytest_plugin` with the one from `pytest-asyncio`.
+
+ This provides the necessary fixtures to use pytest-asyncio with aiohttp!!!
+
+ USAGE:
+
+ pytest_plugins = [
+ "aiohttp.pytest_plugin", # No need to install pytest-aiohttp separately
+ ]
+
+
+ ERRORS:
+ Otherwise error like this will be raised:
+
+ > if connector._loop is not loop:
+ > raise RuntimeError("Session and connector has to use same event loop")
+ E RuntimeError: Session and connector has to use same event loop
+
+ .venv/lib/python3.11/site-packages/aiohttp/client.py:375: RuntimeError
+
+ > if connector._loop is not loop:
+ > raise RuntimeError("Session and connector has to use same event loop")
+ >E RuntimeError: Session and connector has to use same event loop
+
+ .venv/lib/python3.11/site-packages/aiohttp/client.py:375: RuntimeError
+ """
+ return asyncio.get_running_loop()
diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py b/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py
index f971ef9b8f7d..efca123aaa30 100644
--- a/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py
+++ b/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py
@@ -5,7 +5,7 @@
import contextlib
import datetime
import random
-from collections.abc import AsyncIterator, Callable
+from collections.abc import AsyncIterator, Awaitable, Callable
from typing import cast
import aioboto3
@@ -60,45 +60,79 @@ async def aws_vpc_id(
print(f"<-- Deleted Vpc in AWS with {vpc_id=}")
-@pytest.fixture(scope="session")
-def subnet_cidr_block() -> str:
- return "10.0.1.0/24"
+@pytest.fixture
+def create_subnet_cidr_block(faker: Faker) -> Callable[[], str]:
+ # Keep track of used subnet numbers to avoid overlaps
+ used_subnets: set[int] = set()
+
+ def _() -> str:
+ # Generate subnet CIDR blocks within the VPC range 10.0.0.0/16
+ # Using /24 subnets (10.0.X.0/24) where X is between 1-255
+ while True:
+ subnet_number = faker.random_int(min=1, max=255)
+ if subnet_number not in used_subnets:
+ used_subnets.add(subnet_number)
+ return f"10.0.{subnet_number}.0/24"
+
+ return _
@pytest.fixture
-async def aws_subnet_id(
+def subnet_cidr_block(create_subnet_cidr_block: Callable[[], str]) -> str:
+ return create_subnet_cidr_block()
+
+
+@pytest.fixture
+async def create_aws_subnet_id(
aws_vpc_id: str,
ec2_client: EC2Client,
- subnet_cidr_block: str,
-) -> AsyncIterator[str]:
- subnet = await ec2_client.create_subnet(
- CidrBlock=subnet_cidr_block, VpcId=aws_vpc_id
- )
- assert "Subnet" in subnet
- assert "SubnetId" in subnet["Subnet"]
- subnet_id = subnet["Subnet"]["SubnetId"]
- print(f"--> Created Subnet in AWS with {subnet_id=}")
+ create_subnet_cidr_block: Callable[[], str],
+) -> AsyncIterator[Callable[..., Awaitable[str]]]:
+ created_subnet_ids: set[str] = set()
- yield subnet_id
+ async def _(cidr_override: str | None = None) -> str:
+ subnet = await ec2_client.create_subnet(
+ CidrBlock=cidr_override or create_subnet_cidr_block(), VpcId=aws_vpc_id
+ )
+ assert "Subnet" in subnet
+ assert "SubnetId" in subnet["Subnet"]
+ subnet_id = subnet["Subnet"]["SubnetId"]
+ print(f"--> Created Subnet in AWS with {subnet_id=}")
+ created_subnet_ids.add(subnet_id)
+ return subnet_id
+ yield _
+
+ # cleanup
# all the instances in the subnet must be terminated before that works
- instances_in_subnet = await ec2_client.describe_instances(
- Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
- )
- if instances_in_subnet["Reservations"]:
- print(f"--> terminating {len(instances_in_subnet)} instances in subnet")
- await ec2_client.terminate_instances(
- InstanceIds=[
- instance["Instances"][0]["InstanceId"] # type: ignore
- for instance in instances_in_subnet["Reservations"]
- ]
+ for subnet_id in created_subnet_ids:
+ instances_in_subnet = await ec2_client.describe_instances(
+ Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
)
- print(f"<-- terminated {len(instances_in_subnet)} instances in subnet")
+ if instances_in_subnet["Reservations"]:
+ print(f"--> terminating {len(instances_in_subnet)} instances in subnet")
+ await ec2_client.terminate_instances(
+ InstanceIds=[
+ instance["Instances"][0]["InstanceId"] # type: ignore
+ for instance in instances_in_subnet["Reservations"]
+ ]
+ )
+ print(f"<-- terminated {len(instances_in_subnet)} instances in subnet")
- await ec2_client.delete_subnet(SubnetId=subnet_id)
- subnets = await ec2_client.describe_subnets()
- print(f"<-- Deleted Subnet in AWS with {subnet_id=}")
- print(f"current {subnets=}")
+ await ec2_client.delete_subnet(SubnetId=subnet_id)
+ subnets = await ec2_client.describe_subnets()
+ print(f"<-- Deleted Subnet in AWS with {subnet_id=}")
+ print(f"current {subnets=}")
+
+
+@pytest.fixture
+async def aws_subnet_id(
+ aws_vpc_id: str,
+ ec2_client: EC2Client,
+ subnet_cidr_block: str,
+ create_aws_subnet_id: Callable[[], Awaitable[str]],
+) -> str:
+ return await create_aws_subnet_id()
@pytest.fixture
@@ -133,7 +167,7 @@ def _creator(**overrides) -> EC2InstanceData:
return EC2InstanceData(
**(
{
- "launch_time": faker.date_time(tzinfo=datetime.timezone.utc),
+ "launch_time": faker.date_time(tzinfo=datetime.UTC),
"id": faker.uuid4(),
"aws_private_dns": f"ip-{faker.ipv4().replace('.', '-')}.ec2.internal",
"aws_public_ip": faker.ipv4(),
diff --git a/packages/pytest-simcore/src/pytest_simcore/celery_library_mocks.py b/packages/pytest-simcore/src/pytest_simcore/celery_library_mocks.py
new file mode 100644
index 000000000000..c027bc0cbd46
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/celery_library_mocks.py
@@ -0,0 +1,96 @@
+# pylint: disable=redefined-outer-name
+
+from collections.abc import Callable
+
+import pytest
+from faker import Faker
+from pytest_mock import MockerFixture, MockType
+from servicelib.celery.models import TaskStatus, TaskUUID
+from servicelib.celery.task_manager import Task, TaskManager
+
+_faker = Faker()
+
+
+@pytest.fixture
+def submit_task_return_value() -> TaskUUID:
+ return TaskUUID(_faker.uuid4())
+
+
+@pytest.fixture
+def cancel_task_return_value() -> None:
+ return None
+
+
+@pytest.fixture
+def get_task_result_return_value() -> dict:
+ return {"result": "example"}
+
+
+@pytest.fixture
+def get_task_status_return_value() -> TaskStatus:
+ example = TaskStatus.model_json_schema()["examples"][0]
+ return TaskStatus.model_validate(example)
+
+
+@pytest.fixture
+def list_tasks_return_value() -> list[Task]:
+ examples = Task.model_json_schema()["examples"]
+ assert len(examples) > 0
+ return [Task.model_validate(example) for example in examples]
+
+
+@pytest.fixture
+def set_task_progress_return_value() -> None:
+ return None
+
+
+@pytest.fixture
+def mock_task_manager_object(
+ mocker: MockerFixture,
+ submit_task_return_value: TaskUUID,
+ cancel_task_return_value: None,
+ get_task_result_return_value: dict,
+ get_task_status_return_value: TaskStatus,
+ list_tasks_return_value: list[Task],
+ set_task_progress_return_value: None,
+) -> MockType:
+ """
+ Returns a TaskManager mock with overridable return values for each method.
+ If a return value is an Exception, the method will raise it.
+ """
+ mock = mocker.Mock(spec=TaskManager)
+
+ def _set_return_or_raise(method, value):
+ if isinstance(value, Exception):
+ method.side_effect = lambda *a, **kw: (_ for _ in ()).throw(value)
+ else:
+ method.return_value = value
+
+ _set_return_or_raise(mock.submit_task, submit_task_return_value)
+ _set_return_or_raise(mock.cancel_task, cancel_task_return_value)
+ _set_return_or_raise(mock.get_task_result, get_task_result_return_value)
+ _set_return_or_raise(mock.get_task_status, get_task_status_return_value)
+ _set_return_or_raise(mock.list_tasks, list_tasks_return_value)
+ _set_return_or_raise(mock.set_task_progress, set_task_progress_return_value)
+ return mock
+
+
+@pytest.fixture
+def mock_task_manager_object_raising_factory(
+ mocker: MockerFixture,
+) -> Callable[[Exception], MockType]:
+ def _factory(task_manager_exception: Exception) -> MockType:
+ mock = mocker.Mock(spec=TaskManager)
+
+ def _raise_exc(*args, **kwargs):
+ raise task_manager_exception
+
+ mock.submit_task.side_effect = _raise_exc
+ mock.cancel_task.side_effect = _raise_exc
+ mock.get_task_result.side_effect = _raise_exc
+ mock.get_task_status.side_effect = _raise_exc
+ mock.list_tasks.side_effect = _raise_exc
+ mock.set_task_progress.side_effect = _raise_exc
+ return mock
+
+ return _factory
diff --git a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
index 15f28daf3162..1b1ec0ff762e 100644
--- a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
+++ b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py
@@ -3,6 +3,7 @@
# pylint:disable=redefined-outer-name
# pylint:disable=no-value-for-parameter
+import contextlib
from collections.abc import AsyncIterator, Awaitable, Callable, Iterator
from typing import Any
from uuid import uuid4
@@ -10,65 +11,77 @@
import pytest
import sqlalchemy as sa
from faker import Faker
+from models_library.products import ProductName
from models_library.projects import ProjectAtDB, ProjectID
from models_library.projects_nodes_io import NodeID
from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline
from simcore_postgres_database.models.comp_tasks import comp_tasks
+from simcore_postgres_database.models.products import products
from simcore_postgres_database.models.projects import ProjectType, projects
-from simcore_postgres_database.models.users import UserRole, UserStatus, users
+from simcore_postgres_database.models.projects_to_products import projects_to_products
+from simcore_postgres_database.models.services import services_access_rights
+from simcore_postgres_database.models.users import UserRole, UserStatus
from simcore_postgres_database.utils_projects_nodes import (
ProjectNodeCreate,
ProjectNodesRepo,
)
from sqlalchemy.ext.asyncio import AsyncEngine
+from .helpers.postgres_tools import insert_and_get_row_lifespan
+from .helpers.postgres_users import sync_insert_and_get_user_and_secrets_lifespan
+
@pytest.fixture()
def create_registered_user(
- postgres_db: sa.engine.Engine, faker: Faker
+ postgres_db: sa.engine.Engine,
) -> Iterator[Callable[..., dict]]:
+ """Fixture to create a registered user with secrets in the database."""
created_user_ids = []
- def creator(**user_kwargs) -> dict[str, Any]:
- with postgres_db.connect() as con:
- # removes all users before continuing
- user_config = {
- "id": len(created_user_ids) + 1,
- "name": faker.name(),
- "email": faker.email(),
- "password_hash": faker.password(),
- "status": UserStatus.ACTIVE,
- "role": UserRole.USER,
- }
- user_config.update(user_kwargs)
+ with contextlib.ExitStack() as stack:
- con.execute(
- users.insert().values(user_config).returning(sa.literal_column("*"))
+ def _(**user_kwargs) -> dict[str, Any]:
+ user_id = len(created_user_ids) + 1
+ user = stack.enter_context(
+ sync_insert_and_get_user_and_secrets_lifespan(
+ postgres_db,
+ status=UserStatus.ACTIVE,
+ role=UserRole.USER,
+ id=user_id,
+ **user_kwargs,
+ )
)
- # this is needed to get the primary_gid correctly
- result = con.execute(
- sa.select(users).where(users.c.id == user_config["id"])
- )
- user = result.first()
- assert user
+
print(f"--> created {user=}")
+ assert user["id"] == user_id
created_user_ids.append(user["id"])
- return dict(user._asdict())
+ return user
- yield creator
+ yield _
- with postgres_db.connect() as con:
- con.execute(users.delete().where(users.c.id.in_(created_user_ids)))
print(f"<-- deleted users {created_user_ids=}")
@pytest.fixture
-async def project(
- sqlalchemy_async_engine: AsyncEngine, faker: Faker
+async def with_product(
+ sqlalchemy_async_engine: AsyncEngine, product: dict[str, Any]
+) -> AsyncIterator[dict[str, Any]]:
+ async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine,
+ table=products,
+ values=product,
+ pk_col=products.c.name,
+ ) as created_product:
+ yield created_product
+
+
+@pytest.fixture
+async def create_project(
+ sqlalchemy_async_engine: AsyncEngine, faker: Faker, product_name: ProductName
) -> AsyncIterator[Callable[..., Awaitable[ProjectAtDB]]]:
created_project_ids: list[str] = []
- async def creator(
+ async def _(
user: dict[str, Any],
*,
project_nodes_overrides: dict[str, Any] | None = None,
@@ -108,15 +121,24 @@ async def creator(
await project_nodes_repo.add(
con,
nodes=[
- ProjectNodeCreate(node_id=NodeID(node_id), **default_node_config)
- for node_id in inserted_project.workbench
+ ProjectNodeCreate(
+ node_id=NodeID(node_id),
+ **(default_node_config | node_data.model_dump(mode="json")),
+ )
+ for node_id, node_data in inserted_project.workbench.items()
],
)
+ await con.execute(
+ projects_to_products.insert().values(
+ project_uuid=f"{inserted_project.uuid}",
+ product_name=product_name,
+ )
+ )
print(f"--> created {inserted_project=}")
created_project_ids.append(f"{inserted_project.uuid}")
return inserted_project
- yield creator
+ yield _
# cleanup
async with sqlalchemy_async_engine.begin() as con:
@@ -127,18 +149,20 @@ async def creator(
@pytest.fixture
-def pipeline(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., dict[str, Any]]]:
+async def create_pipeline(
+ sqlalchemy_async_engine: AsyncEngine,
+) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]:
created_pipeline_ids: list[str] = []
- def creator(**pipeline_kwargs) -> dict[str, Any]:
+ async def _(**pipeline_kwargs) -> dict[str, Any]:
pipeline_config = {
"project_id": f"{uuid4()}",
"dag_adjacency_list": {},
"state": StateType.NOT_STARTED,
}
pipeline_config.update(**pipeline_kwargs)
- with postgres_db.connect() as conn:
- result = conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ result = await conn.execute(
comp_pipeline.insert()
.values(**pipeline_config)
.returning(sa.literal_column("*"))
@@ -148,11 +172,11 @@ def creator(**pipeline_kwargs) -> dict[str, Any]:
created_pipeline_ids.append(new_pipeline["project_id"])
return new_pipeline
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
comp_pipeline.delete().where(
comp_pipeline.c.project_id.in_(created_pipeline_ids)
)
@@ -160,13 +184,15 @@ def creator(**pipeline_kwargs) -> dict[str, Any]:
@pytest.fixture
-def comp_task(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., dict[str, Any]]]:
+async def create_comp_task(
+ sqlalchemy_async_engine: AsyncEngine,
+) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]:
created_task_ids: list[int] = []
- def creator(project_id: ProjectID, **task_kwargs) -> dict[str, Any]:
+ async def _(project_id: ProjectID, **task_kwargs) -> dict[str, Any]:
task_config = {"project_id": f"{project_id}"} | task_kwargs
- with postgres_db.connect() as conn:
- result = conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ result = await conn.execute(
comp_tasks.insert()
.values(**task_config)
.returning(sa.literal_column("*"))
@@ -176,10 +202,81 @@ def creator(project_id: ProjectID, **task_kwargs) -> dict[str, Any]:
created_task_ids.append(new_task["task_id"])
return new_task
- yield creator
+ yield _
# cleanup
- with postgres_db.connect() as conn:
- conn.execute(
+ async with sqlalchemy_async_engine.begin() as conn:
+ await conn.execute(
comp_tasks.delete().where(comp_tasks.c.task_id.in_(created_task_ids))
)
+
+
+@pytest.fixture
+def grant_service_access_rights(
+ postgres_db: sa.engine.Engine,
+) -> Iterator[Callable[..., dict[str, Any]]]:
+ """Fixture to grant access rights on a service for a given group.
+
+ Creates a row in the services_access_rights table with the provided parameters and cleans up after the test.
+ """
+ created_entries: list[tuple[str, str, int, str]] = []
+
+ def _(
+ *,
+ service_key: str,
+ service_version: str,
+ group_id: int = 1,
+ product_name: str = "osparc",
+ execute_access: bool = True,
+ write_access: bool = False,
+ ) -> dict[str, Any]:
+ values = {
+ "key": service_key,
+ "version": service_version,
+ "gid": group_id,
+ "product_name": product_name,
+ "execute_access": execute_access,
+ "write_access": write_access,
+ }
+
+ # Directly use SQLAlchemy to insert and retrieve the row
+ with postgres_db.begin() as conn:
+ # Insert the row
+ conn.execute(services_access_rights.insert().values(**values))
+
+ # Retrieve the inserted row
+ result = conn.execute(
+ sa.select(services_access_rights).where(
+ sa.and_(
+ services_access_rights.c.key == service_key,
+ services_access_rights.c.version == service_version,
+ services_access_rights.c.gid == group_id,
+ services_access_rights.c.product_name == product_name,
+ )
+ )
+ )
+ row = result.one()
+
+ # Track the entry for cleanup
+ created_entries.append(
+ (service_key, service_version, group_id, product_name)
+ )
+
+ # Convert row to dict
+ return dict(row._asdict())
+
+ yield _
+
+ # Cleanup all created entries
+ with postgres_db.begin() as conn:
+ for key, version, gid, product in created_entries:
+ conn.execute(
+ services_access_rights.delete().where(
+ sa.and_(
+ services_access_rights.c.key == key,
+ services_access_rights.c.version == version,
+ services_access_rights.c.gid == gid,
+ services_access_rights.c.product_name == product,
+ )
+ )
+ )
diff --git a/packages/pytest-simcore/src/pytest_simcore/docker_registry.py b/packages/pytest-simcore/src/pytest_simcore/docker_registry.py
index 84b4d1e4b24a..a5b411947f12 100644
--- a/packages/pytest-simcore/src/pytest_simcore/docker_registry.py
+++ b/packages/pytest-simcore/src/pytest_simcore/docker_registry.py
@@ -15,6 +15,7 @@
import docker
import jsonschema
import pytest
+import pytest_asyncio
import tenacity
from pytest_simcore.helpers.logging_tools import log_context
from pytest_simcore.helpers.typing_env import EnvVarsDict
@@ -22,35 +23,45 @@
from .helpers.host import get_localhost_ip
-log = logging.getLogger(__name__)
+_logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def docker_registry(keep_docker_up: bool) -> Iterator[str]:
+ """sets up and runs a docker registry container locally and returns its URL"""
+ yield from _docker_registry_impl(keep_docker_up, registry_version="3")
+
+
+@pytest.fixture(scope="session")
+def docker_registry_v2() -> Iterator[str]:
+ """sets up and runs a docker registry v2 container locally and returns its URL"""
+ yield from _docker_registry_impl(keep_docker_up=False, registry_version="2")
+
+
+def _docker_registry_impl(keep_docker_up: bool, registry_version: str) -> Iterator[str]:
"""sets up and runs a docker registry container locally and returns its URL"""
# run the registry outside of the stack
docker_client = docker.from_env()
# try to login to private registry
host = "127.0.0.1"
- port = 5000
+ port = 5000 if registry_version == "3" else 5001
url = f"{host}:{port}"
+ container_name = f"pytest_registry_v{registry_version}"
+ volume_name = f"pytest_registry_v{registry_version}_data"
+
container = None
try:
docker_client.login(registry=url, username="simcore")
- container = docker_client.containers.list(filters={"name": "pytest_registry"})[
- 0
- ]
+ container = docker_client.containers.list(filters={"name": container_name})[0]
print("Warning: docker registry is already up!")
except Exception: # pylint: disable=broad-except
container = docker_client.containers.run(
- "registry:2",
- ports={"5000": "5000"},
- name="pytest_registry",
+ f"registry:{registry_version}",
+ ports={"5000": port},
+ name=container_name,
environment=["REGISTRY_STORAGE_DELETE_ENABLED=true"],
restart_policy={"Name": "always"},
- volumes={
- "pytest_registry_data": {"bind": "/var/lib/registry", "mode": "rw"}
- },
+ volumes={volume_name: {"bind": "/var/lib/registry", "mode": "rw"}},
detach=True,
)
@@ -63,7 +74,7 @@ def docker_registry(keep_docker_up: bool) -> Iterator[str]:
docker_client.login(registry=url, username="simcore")
# tag the image
repo = url + "/hello-world:dev"
- assert hello_world_image.tag(repo) == True
+ assert hello_world_image.tag(repo)
# push the image to the private registry
docker_client.images.push(repo)
# wipe the images
@@ -79,9 +90,9 @@ def docker_registry(keep_docker_up: bool) -> Iterator[str]:
os.environ["REGISTRY_SSL"] = "False"
os.environ["REGISTRY_AUTH"] = "False"
# the registry URL is how to access from the container (e.g. for accessing the API)
- os.environ["REGISTRY_URL"] = f"{get_localhost_ip()}:5000"
+ os.environ["REGISTRY_URL"] = f"{get_localhost_ip()}:{port}"
# the registry PATH is how the docker engine shall access the images (usually same as REGISTRY_URL but for testing)
- os.environ["REGISTRY_PATH"] = "127.0.0.1:5000"
+ os.environ["REGISTRY_PATH"] = f"127.0.0.1:{port}"
os.environ["REGISTRY_USER"] = "simcore"
os.environ["REGISTRY_PW"] = ""
@@ -124,7 +135,7 @@ def registry_settings(
@tenacity.retry(
wait=tenacity.wait_fixed(2),
stop=tenacity.stop_after_delay(20),
- before_sleep=tenacity.before_sleep_log(log, logging.INFO),
+ before_sleep=tenacity.before_sleep_log(_logger, logging.INFO),
reraise=True,
)
def wait_till_registry_is_responsive(url: str) -> bool:
@@ -136,7 +147,7 @@ def wait_till_registry_is_responsive(url: str) -> bool:
# ********************************************************* Services ***************************************
-def _pull_push_service(
+async def _pull_push_service(
pull_key: str,
tag: str,
new_registry: str,
@@ -145,40 +156,45 @@ def _pull_push_service(
) -> dict[str, Any]:
client = docker.from_env()
# pull image from original location
- print(f"Pulling {pull_key}:{tag} ...")
- image = client.images.pull(pull_key, tag=tag)
- assert image, f"image {pull_key}:{tag} could NOT be pulled!"
+ with log_context(logging.INFO, msg=f"Pulling {pull_key}:{tag} ..."):
+ image = client.images.pull(pull_key, tag=tag)
+ assert image, f"image {pull_key}:{tag} could NOT be pulled!"
# get io.simcore.* labels
image_labels: dict = dict(image.labels)
if owner_email:
- print(f"Overriding labels to take ownership as {owner_email} ...")
- # By overriding these labels, user owner_email gets ownership of the service
- # and the catalog service automatically gives full access rights for testing it
- # otherwise it does not even get read rights
-
- image_labels.update({"io.simcore.contact": f'{{"contact": "{owner_email}"}}'})
- image_labels.update(
- {
- "io.simcore.authors": f'{{"authors": [{{"name": "Tester", "email": "{owner_email}", "affiliation": "IT\'IS Foundation"}}] }}'
- }
- )
- image_labels.update({"maintainer": f"{owner_email}"})
-
- df_path = Path("Dockerfile").resolve()
- df_path.write_text(f"FROM {pull_key}:{tag}")
+ with log_context(
+ logging.INFO,
+ msg=f"Overriding labels to take ownership as {owner_email} ...",
+ ):
+ # By overriding these labels, user owner_email gets ownership of the service
+ # and the catalog service automatically gives full access rights for testing it
+ # otherwise it does not even get read rights
- try:
- # Rebuild to override image labels AND re-tag
- image2, _ = client.images.build(
- path=str(df_path.parent), labels=image_labels, tag=f"{tag}-owned"
+ image_labels.update(
+ {"io.simcore.contact": f'{{"contact": "{owner_email}"}}'}
+ )
+ image_labels.update(
+ {
+ "io.simcore.authors": f'{{"authors": [{{"name": "Tester", "email": "{owner_email}", "affiliation": "IT\'IS Foundation"}}] }}'
+ }
)
- print(json.dumps(image2.labels, indent=2))
- image = image2
+ image_labels.update({"maintainer": f"{owner_email}"})
- finally:
- df_path.unlink()
+ df_path = Path("Dockerfile").resolve()
+ df_path.write_text(f"FROM {pull_key}:{tag}")
+
+ try:
+ # Rebuild to override image labels AND re-tag
+ image2, _ = client.images.build(
+ path=str(df_path.parent), labels=image_labels, tag=f"{tag}-owned"
+ )
+ print(json.dumps(image2.labels, indent=2))
+ image = image2
+
+ finally:
+ df_path.unlink()
assert image_labels
io_simcore_labels = {
@@ -195,11 +211,16 @@ def _pull_push_service(
new_image_tag = (
f"{new_registry}/{io_simcore_labels['key']}:{io_simcore_labels['version']}"
)
- assert image.tag(new_image_tag) == True
+ assert image.tag(new_image_tag)
# push the image to the new location
- print(f"Pushing {pull_key}:{tag} -> {new_image_tag}...")
- client.images.push(new_image_tag)
+ async with aiodocker.Docker() as client:
+ await client.images.push(new_image_tag)
+ # with log_context(
+ # logging.INFO,
+ # msg=f"Pushing {pull_key}:{tag} -> {new_image_tag} ...",
+ # ):
+ # client.images.push(new_image_tag)
# return image io.simcore.* labels
image_labels = dict(image.labels)
@@ -212,10 +233,10 @@ def _pull_push_service(
}
-@pytest.fixture(scope="session")
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
def docker_registry_image_injector(
docker_registry: str, node_meta_schema: dict
-) -> Callable[..., dict[str, Any]]:
+) -> Callable[[str, str, str | None], Awaitable[dict[str, Any]]]:
def inject_image(
source_image_repo: str, source_image_tag: str, owner_email: str | None = None
):
@@ -231,29 +252,33 @@ def inject_image(
@pytest.fixture
-def osparc_service(
+async def osparc_service(
docker_registry: str, node_meta_schema: dict, service_repo: str, service_tag: str
) -> dict[str, Any]:
"""pulls the service from service_repo:service_tag and pushes to docker_registry using the oSparc node meta schema
NOTE: 'service_repo' and 'service_tag' defined as parametrization
"""
- return _pull_push_service(
+ return await _pull_push_service(
service_repo, service_tag, docker_registry, node_meta_schema
)
-@pytest.fixture(scope="session")
-def sleeper_service(docker_registry: str, node_meta_schema: dict) -> dict[str, Any]:
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
+async def sleeper_service(
+ docker_registry: str, node_meta_schema: dict
+) -> dict[str, Any]:
"""Adds a itisfoundation/sleeper in docker registry"""
- return _pull_push_service(
+ return await _pull_push_service(
"itisfoundation/sleeper", "1.0.0", docker_registry, node_meta_schema
)
-@pytest.fixture(scope="session")
-def jupyter_service(docker_registry: str, node_meta_schema: dict) -> dict[str, Any]:
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
+async def jupyter_service(
+ docker_registry: str, node_meta_schema: dict
+) -> dict[str, Any]:
"""Adds a itisfoundation/jupyter-base-notebook in docker registry"""
- return _pull_push_service(
+ return await _pull_push_service(
"itisfoundation/jupyter-base-notebook",
"2.13.0",
docker_registry,
@@ -261,20 +286,20 @@ def jupyter_service(docker_registry: str, node_meta_schema: dict) -> dict[str, A
)
-@pytest.fixture(scope="session", params=["2.0.7"])
+@pytest_asyncio.fixture(scope="session", loop_scope="session", params=["2.0.7"])
def dy_static_file_server_version(request: pytest.FixtureRequest):
return request.param
-@pytest.fixture(scope="session")
-def dy_static_file_server_service(
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
+async def dy_static_file_server_service(
docker_registry: str, node_meta_schema: dict, dy_static_file_server_version: str
) -> dict[str, Any]:
"""
Adds the below service in docker registry
itisfoundation/dy-static-file-server
"""
- return _pull_push_service(
+ return await _pull_push_service(
"itisfoundation/dy-static-file-server",
dy_static_file_server_version,
docker_registry,
@@ -282,15 +307,15 @@ def dy_static_file_server_service(
)
-@pytest.fixture(scope="session")
-def dy_static_file_server_dynamic_sidecar_service(
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
+async def dy_static_file_server_dynamic_sidecar_service(
docker_registry: str, node_meta_schema: dict, dy_static_file_server_version: str
) -> dict[str, Any]:
"""
Adds the below service in docker registry
itisfoundation/dy-static-file-server-dynamic-sidecar
"""
- return _pull_push_service(
+ return await _pull_push_service(
"itisfoundation/dy-static-file-server-dynamic-sidecar",
dy_static_file_server_version,
docker_registry,
@@ -298,15 +323,15 @@ def dy_static_file_server_dynamic_sidecar_service(
)
-@pytest.fixture(scope="session")
-def dy_static_file_server_dynamic_sidecar_compose_spec_service(
+@pytest_asyncio.fixture(scope="session", loop_scope="session")
+async def dy_static_file_server_dynamic_sidecar_compose_spec_service(
docker_registry: str, node_meta_schema: dict, dy_static_file_server_version: str
) -> dict[str, Any]:
"""
Adds the below service in docker registry
itisfoundation/dy-static-file-server-dynamic-sidecar-compose-spec
"""
- return _pull_push_service(
+ return await _pull_push_service(
"itisfoundation/dy-static-file-server-dynamic-sidecar-compose-spec",
dy_static_file_server_version,
docker_registry,
diff --git a/packages/pytest-simcore/src/pytest_simcore/environment_configs.py b/packages/pytest-simcore/src/pytest_simcore/environment_configs.py
index 6495f1f7cc1a..6a87e3536e95 100644
--- a/packages/pytest-simcore/src/pytest_simcore/environment_configs.py
+++ b/packages/pytest-simcore/src/pytest_simcore/environment_configs.py
@@ -3,15 +3,19 @@
# pylint: disable=unused-variable
+import logging
import re
from pathlib import Path
from typing import Any
import pytest
+from faker import Faker
from .helpers.monkeypatch_envs import load_dotenv, setenvs_from_dict
from .helpers.typing_env import EnvVarsDict
+_logger = logging.getLogger(__name__)
+
def pytest_addoption(parser: pytest.Parser):
simcore_group = parser.getgroup("simcore")
@@ -20,12 +24,17 @@ def pytest_addoption(parser: pytest.Parser):
action="store",
type=Path,
default=None,
- help="Path to an env file. Consider passing a link to repo configs, i.e. `ln -s /path/to/osparc-ops-config/repo.config`",
+ help="Path to an env file. Replaces .env-devel in the tests by an external envfile."
+ "e.g. consider "
+ " `ln -s /path/to/osparc-ops-config/repo.config .secrets` and then "
+ " `pytest --external-envfile=.secrets --pdb tests/unit/test_core_settings.py`",
)
@pytest.fixture(scope="session")
-def external_envfile_dict(request: pytest.FixtureRequest) -> EnvVarsDict:
+def external_envfile_dict(
+ request: pytest.FixtureRequest, osparc_simcore_root_dir: Path
+) -> EnvVarsDict:
"""
If a file under test folder prefixed with `.env-secret` is present,
then this fixture captures it.
@@ -35,19 +44,43 @@ def external_envfile_dict(request: pytest.FixtureRequest) -> EnvVarsDict:
"""
envs = {}
if envfile := request.config.getoption("--external-envfile"):
- print("🚨 EXTERNAL `envfile` option detected. Loading", envfile, "...")
+ _logger.warning(
+ "🚨 EXTERNAL `envfile` option detected. Loading '%s' ...", envfile
+ )
assert isinstance(envfile, Path)
assert envfile.exists()
assert envfile.is_file()
+ envfile = envfile.resolve()
+ osparc_simcore_root_dir = osparc_simcore_root_dir.resolve()
+
+ if osparc_simcore_root_dir in envfile.parents and not any(
+ term in envfile.name.lower() for term in ("ignore", "secret")
+ ):
+ _logger.warning(
+ "🚨 CAUTION: The external envfile '%s' may contain sensitive data and could be accidentally versioned. "
+ "To prevent this, include the words 'secret' or 'ignore' in the filename.",
+ envfile.name,
+ )
+
envs = load_dotenv(envfile)
+ if envs:
+ response = input(
+ f"🚨 CAUTION: You are about to run tests using environment variables loaded from '{envfile}'.\n"
+ "This may cause tests to interact with or modify real external systems (e.g., production or staging environments).\n"
+ "Proceeding could result in data loss or unintended side effects.\n"
+ "Are you sure you want to continue? [y/N]: "
+ )
+ if response.strip().lower() not in ("y", "yes"):
+ pytest.exit("Aborted by user due to external envfile usage.")
+
return envs
@pytest.fixture(scope="session")
-def skip_if_external_envfile_dict(external_envfile_dict: EnvVarsDict) -> None:
+def skip_if_no_external_envfile(external_envfile_dict: EnvVarsDict) -> None:
if not external_envfile_dict:
pytest.skip(reason="Skipping test since external-envfile is not set")
@@ -80,7 +113,7 @@ def service_name(project_slug_dir: Path) -> str:
@pytest.fixture(scope="session")
-def services_docker_compose_dict(services_docker_compose_file: Path) -> EnvVarsDict:
+def docker_compose_services_dict(services_docker_compose_file: Path) -> EnvVarsDict:
# NOTE: By keeping import here, this library is ONLY required when the fixture is used
import yaml
@@ -89,11 +122,30 @@ def services_docker_compose_dict(services_docker_compose_file: Path) -> EnvVarsD
return content
+@pytest.fixture
+def docker_compose_service_hostname(
+ faker: Faker, service_name: str, docker_compose_services_dict: dict[str, Any]
+) -> str:
+ """Evaluates `hostname` from docker-compose service"""
+ hostname_template = docker_compose_services_dict["services"][service_name][
+ "hostname"
+ ]
+
+ # Generate fake values to replace Docker Swarm template variables
+ node_hostname = faker.hostname(levels=1)
+ task_slot = faker.random_int(min=0, max=10)
+
+ # Replace the Docker Swarm template variables with faker values
+ return hostname_template.replace("{{.Node.Hostname}}", node_hostname).replace(
+ "{{.Task.Slot}}", str(task_slot)
+ )
+
+
@pytest.fixture
def docker_compose_service_environment_dict(
- services_docker_compose_dict: dict[str, Any],
- env_devel_dict: EnvVarsDict,
+ docker_compose_services_dict: dict[str, Any],
service_name: str,
+ env_devel_dict: EnvVarsDict,
env_devel_file: Path,
) -> EnvVarsDict:
"""Returns env vars dict from the docker-compose `environment` section
@@ -101,10 +153,10 @@ def docker_compose_service_environment_dict(
- env_devel_dict in environment_configs plugin
- service_name needs to be defined
"""
- service = services_docker_compose_dict["services"][service_name]
+ service = docker_compose_services_dict["services"][service_name]
def _substitute(key, value) -> tuple[str, str]:
- if m := re.match(r"\${([^{}:-]\w+)", value):
+ if m := re.match(r"\${([^{}:-]\w+)", f"{value}"):
expected_env_var = m.group(1)
try:
# NOTE: if this raises, then the RHS env-vars in the docker-compose are
diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py
index e55c1e489f09..e91f21ca7e6b 100644
--- a/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py
+++ b/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py
@@ -4,9 +4,9 @@
# pylint: disable=unused-argument
# pylint: disable=unused-variable
"""
- Fixtures to produce fake data for a product:
- - it is self-consistent
- - granular customization by overriding fixtures
+Fixtures to produce fake data for a product:
+ - it is self-consistent
+ - granular customization by overriding fixtures
"""
from typing import Any
@@ -65,11 +65,25 @@ def bcc_email(request: pytest.FixtureRequest, product_name: ProductName) -> Emai
)
+@pytest.fixture
+def support_standard_group_id(faker: Faker) -> int | None:
+ # NOTE: override to change
+ return None
+
+
@pytest.fixture
def product(
- faker: Faker, product_name: ProductName, support_email: EmailStr
+ faker: Faker,
+ product_name: ProductName,
+ support_email: EmailStr,
+ support_standard_group_id: int | None,
) -> dict[str, Any]:
- return random_product(name=product_name, support_email=support_email, fake=faker)
+ return random_product(
+ name=product_name,
+ support_email=support_email,
+ support_standard_group_id=support_standard_group_id,
+ fake=faker,
+ )
@pytest.fixture
diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py
index 4e59b6db93a4..070087982e7d 100644
--- a/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py
+++ b/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py
@@ -3,9 +3,9 @@
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
"""
- Fixtures to produce fake data for a user:
- - it is self-consistent
- - granular customization by overriding fixtures
+Fixtures to produce fake data for a user:
+ - it is self-consistent
+ - granular customization by overriding fixtures
"""
from typing import Any
@@ -16,7 +16,11 @@
from models_library.users import UserID
from pydantic import EmailStr, TypeAdapter
-from .helpers.faker_factories import DEFAULT_TEST_PASSWORD, random_user
+from .helpers.faker_factories import (
+ DEFAULT_TEST_PASSWORD,
+ random_user,
+ random_user_secrets,
+)
_MESSAGE = (
"If set, it overrides the fake value of `{}` fixture."
@@ -125,12 +129,17 @@ def user(
user_name: IDStr,
user_password: str,
) -> dict[str, Any]:
- return random_user(
- id=user_id,
- email=user_email,
- name=user_name,
- first_name=user_first_name,
- last_name=user_last_name,
- password=user_password,
- fake=faker,
- )
+ """NOTE: it returns user data including poassword and password_hash"""
+ secrets = random_user_secrets(fake=faker, user_id=user_id, password=user_password)
+ assert secrets["user_id"] == user_id
+ return {
+ **random_user(
+ id=user_id,
+ email=user_email,
+ name=user_name,
+ first_name=user_first_name,
+ last_name=user_last_name,
+ fake=faker,
+ ),
+ "password_hash": secrets["password_hash"],
+ }
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py b/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py
index fc931cbebd59..afc6cdf15a3f 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py
@@ -97,3 +97,17 @@ def _do_assert_error(
assert expected_error_code in codes
return data, error
+
+
+def assert_equal_ignoring_none(expected: dict, actual: dict):
+ for key, exp_value in expected.items():
+ if exp_value is None:
+ continue
+ assert key in actual, f"Missing key {key}"
+ act_value = actual[key]
+ if isinstance(exp_value, dict) and isinstance(act_value, dict):
+ assert_equal_ignoring_none(exp_value, act_value)
+ else:
+ assert (
+ act_value == exp_value
+ ), f"Mismatch in {key}: {act_value} != {exp_value}"
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/async_jobs_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/async_jobs_server.py
new file mode 100644
index 000000000000..eba9867b529d
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/async_jobs_server.py
@@ -0,0 +1,88 @@
+# pylint: disable=unused-argument
+
+from dataclasses import dataclass
+
+from models_library.api_schemas_rpc_async_jobs.async_jobs import (
+ AsyncJobGet,
+ AsyncJobId,
+ AsyncJobResult,
+ AsyncJobStatus,
+)
+from models_library.api_schemas_rpc_async_jobs.exceptions import BaseAsyncjobRpcError
+from models_library.progress_bar import ProgressReport
+from models_library.rabbitmq_basic_types import RPCNamespace
+from pydantic import validate_call
+from pytest_mock import MockType
+from servicelib.celery.models import OwnerMetadata
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+
+
+@dataclass
+class AsyncJobSideEffects:
+ exception: BaseAsyncjobRpcError | None = None
+
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def cancel(
+ self,
+ rabbitmq_rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ rpc_namespace: RPCNamespace,
+ job_id: AsyncJobId,
+ owner_metadata: OwnerMetadata,
+ ) -> None:
+ if self.exception is not None:
+ raise self.exception
+ return None
+
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def status(
+ self,
+ rabbitmq_rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ rpc_namespace: RPCNamespace,
+ job_id: AsyncJobId,
+ owner_metadata: OwnerMetadata,
+ ) -> AsyncJobStatus:
+ if self.exception is not None:
+ raise self.exception
+
+ return AsyncJobStatus(
+ job_id=job_id,
+ progress=ProgressReport(
+ actual_value=50.0,
+ total=100.0,
+ attempt=1,
+ ),
+ done=False,
+ )
+
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def result(
+ self,
+ rabbitmq_rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ rpc_namespace: RPCNamespace,
+ job_id: AsyncJobId,
+ owner_metadata: OwnerMetadata,
+ ) -> AsyncJobResult:
+ if self.exception is not None:
+ raise self.exception
+ return AsyncJobResult(result="Success")
+
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def list_jobs(
+ self,
+ rabbitmq_rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ rpc_namespace: RPCNamespace,
+ owner_metadata: OwnerMetadata,
+ filter_: str = "",
+ ) -> list[AsyncJobGet]:
+ if self.exception is not None:
+ raise self.exception
+ return [
+ AsyncJobGet(
+ job_id=AsyncJobId("123e4567-e89b-12d3-a456-426614174000"),
+ job_name="Example Job",
+ )
+ ]
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py b/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py
index 2d6c278d92c9..3648284faa75 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py
@@ -39,7 +39,7 @@ def create_fake_association(
):
fake_node_to_instance_map = {}
- async def _fake_node_creator(
+ def _fake_node_creator(
_nodes: list[Node], ec2_instances: list[EC2InstanceData]
) -> tuple[list[AssociatedInstance], list[EC2InstanceData]]:
def _create_fake_node_with_labels(instance: EC2InstanceData) -> Node:
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/director_v2_rpc_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/director_v2_rpc_server.py
new file mode 100644
index 000000000000..8b8d9b6cc4b6
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/director_v2_rpc_server.py
@@ -0,0 +1,30 @@
+# pylint: disable=no-self-use
+# pylint: disable=not-context-manager
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+
+from models_library.api_schemas_directorv2.computations import TaskLogFileIdGet
+from models_library.projects import ProjectID
+from pydantic import TypeAdapter, validate_call
+from pytest_mock import MockType
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+
+
+class DirectorV2SideEffects:
+ # pylint: disable=no-self-use
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def get_computation_task_log_file_ids(
+ self,
+ rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ project_id: ProjectID,
+ ) -> list[TaskLogFileIdGet]:
+ assert rpc_client
+ assert project_id
+
+ return TypeAdapter(list[TaskLogFileIdGet]).validate_python(
+ TaskLogFileIdGet.model_json_schema()["examples"],
+ )
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py
index 5aeb6c3be667..b03d29c67005 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py
@@ -20,10 +20,18 @@
from uuid import uuid4
import arrow
-import faker
from faker import Faker
-DEFAULT_FAKER: Final = faker.Faker()
+DEFAULT_FAKER: Final = Faker()
+
+
+def random_service_key(fake: Faker = DEFAULT_FAKER, *, name: str | None = None) -> str:
+ suffix = fake.unique.pystr(min_chars=2) if name is None else name
+ return f"simcore/services/{fake.random_element(['dynamic', 'comp', 'frontend'])}/{suffix.lower()}"
+
+
+def random_service_version(fake: Faker = DEFAULT_FAKER) -> str:
+ return ".".join([str(fake.pyint(0, 100)) for _ in range(3)])
def random_icon_url(fake: Faker):
@@ -34,6 +42,15 @@ def random_thumbnail_url(fake: Faker):
return fake.image_url(width=32, height=32)
+def random_phone_number(fake: Faker = DEFAULT_FAKER) -> str:
+ # NOTE: faker.phone_number() does not validate with `phonenumbers` library.
+ phone = fake.random_element(
+ ["+41763456789", "+19104630364", "+13013044567", "+34 950 453 837"]
+ )
+ tail = f"{fake.pyint(100, 999)}"
+ return phone[: -len(tail)] + tail # ensure phone keeps its length
+
+
def _compute_hash(password: str) -> str:
try:
# 'passlib' will be used only if already installed.
@@ -55,9 +72,7 @@ def _compute_hash(password: str) -> str:
_DEFAULT_HASH = _compute_hash(DEFAULT_TEST_PASSWORD)
-def random_user(
- fake: Faker = DEFAULT_FAKER, password: str | None = None, **overrides
-) -> dict[str, Any]:
+def random_user(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]:
from simcore_postgres_database.models.users import users
from simcore_postgres_database.webserver_models import UserStatus
@@ -67,12 +82,35 @@ def random_user(
# NOTE: ensures user name is unique to avoid flaky tests
"name": f"{fake.user_name()}_{fake.uuid4()}",
"email": f"{fake.uuid4()}_{fake.email().lower()}",
- "password_hash": _DEFAULT_HASH,
"status": UserStatus.ACTIVE,
}
+ data.update(overrides)
assert set(data.keys()).issubset({c.name for c in users.columns})
+ return data
+
+
+def random_user_secrets(
+ fake: Faker = DEFAULT_FAKER,
+ *,
+ # foreign keys
+ user_id: int,
+ password: str | None = None,
+ **overrides,
+) -> dict[str, Any]:
+ from simcore_postgres_database.models.users_secrets import users_secrets
+
+ assert fake # nosec
+
+ assert set(overrides.keys()).issubset({c.name for c in users_secrets.columns})
+
+ data = {
+ "user_id": user_id,
+ "password_hash": _DEFAULT_HASH,
+ }
+ assert set(data.keys()).issubset({c.name for c in users_secrets.columns})
+
# transform password in hash
if password:
assert len(password) >= 12
@@ -105,7 +143,7 @@ def random_pre_registration_details(
"pre_first_name": fake.first_name(),
"pre_last_name": fake.last_name(),
"pre_email": fake.email(),
- "pre_phone": fake.phone_number(),
+ "pre_phone": random_phone_number(fake),
"institution": fake.company(),
"address": fake.address().replace("\n", ", "),
"city": fake.city(),
@@ -158,6 +196,26 @@ def random_project(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]:
return data
+def random_project_node(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]:
+ """Generates random fake data project nodes DATABASE table"""
+ from simcore_postgres_database.models.projects_nodes import projects_nodes
+
+ fake_name = fake.name()
+
+ data = {
+ "node_id": fake.uuid4(),
+ "project_uuid": fake.uuid4(),
+ "key": random_service_key(fake, name=fake_name),
+ "version": random_service_version(fake),
+ "label": fake_name,
+ }
+
+ assert set(data.keys()).issubset({c.name for c in projects_nodes.columns})
+
+ data.update(overrides)
+ return data
+
+
def random_group(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]:
from simcore_postgres_database.models.groups import groups
from simcore_postgres_database.webserver_models import GroupType
@@ -227,6 +285,7 @@ def fake_task(**overrides) -> dict[str, Any]:
def random_product(
*,
group_id: int | None = None,
+ support_standard_group_id: int | None = None,
registration_email_template: str | None = None,
fake: Faker = DEFAULT_FAKER,
**overrides,
@@ -265,7 +324,6 @@ def random_product(
ui=VendorUI(
logo_url="https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/static-webserver/client/source/resource/osparc/osparc-black.svg",
strong_color=fake.color(),
- project_alias=fake.random_element(elements=["project", "study"]),
),
),
"registration_email_template": registration_email_template,
@@ -274,6 +332,7 @@ def random_product(
"priority": fake.pyint(0, 10),
"max_open_studies_per_user": fake.pyint(1, 10),
"group_id": group_id,
+ "support_standard_group_id": support_standard_group_id,
}
if ui := fake.random_element(
@@ -453,7 +512,7 @@ def random_service_meta_data(
) -> dict[str, Any]:
from simcore_postgres_database.models.services import services_meta_data
- _version = ".".join([str(fake.pyint()) for _ in range(3)])
+ _version = random_service_version(fake)
_name = fake.name()
data: dict[str, Any] = {
@@ -541,3 +600,32 @@ def random_itis_vip_available_download_item(
data.update(**overrides)
return data
+
+
+def random_service_consume_filetype(
+ *,
+ service_key: str,
+ service_version: str,
+ fake: Faker = DEFAULT_FAKER,
+ **overrides,
+) -> dict[str, Any]:
+ from simcore_postgres_database.models.services_consume_filetypes import (
+ services_consume_filetypes,
+ )
+
+ data = {
+ "service_key": service_key,
+ "service_version": service_version,
+ "service_display_name": fake.company(),
+ "service_input_port": fake.word(),
+ "filetype": fake.random_element(["CSV", "VTK", "H5", "JSON", "TXT"]),
+ "preference_order": fake.pyint(min_value=0, max_value=10),
+ "is_guest_allowed": fake.pybool(),
+ }
+
+ assert set(data.keys()).issubset( # nosec
+ {c.name for c in services_consume_filetypes.columns}
+ )
+
+ data.update(overrides)
+ return data
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py b/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py
index c12649c1c3d4..32960d9bc7a2 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py
@@ -1,10 +1,11 @@
import datetime
import logging
-from collections.abc import Iterator
+import warnings
+from collections.abc import Callable, Iterator
from contextlib import contextmanager
from dataclasses import dataclass, field
from types import SimpleNamespace
-from typing import TypeAlias
+from typing import Final, TypeAlias
def _timedelta_as_minute_second_ms(delta: datetime.timedelta) -> str:
@@ -29,6 +30,19 @@ def _timedelta_as_minute_second_ms(delta: datetime.timedelta) -> str:
return f"{sign}{result.strip()}"
+def _resolve(val: str | Callable[[], str], prefix: str, suffix: str) -> str:
+ try:
+ return f"{prefix}{val if isinstance(val, str) else val()}{suffix}"
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ warnings.warn(
+ f"Failed to generate {val} message: {exc!r}. "
+ f"Fix the callable to return a string without raising exceptions.",
+ UserWarning,
+ stacklevel=3,
+ )
+ return f"❌❌❌ [{val} message generation failed TIP: Check how the {val} message is generated!] ❌❌❌"
+
+
class DynamicIndentFormatter(logging.Formatter):
indent_char: str = " "
_cls_indent_level: int = 0
@@ -74,15 +88,26 @@ def setup(cls, logger: logging.Logger) -> None:
DynamicIndentFormatter.setup(test_logger)
+# Message formatting constants
+_STARTING_PREFIX: Final[str] = "--> "
+_STARTING_SUFFIX: Final[str] = " ⏳"
+_DONE_PREFIX: Final[str] = "<-- "
+_DONE_SUFFIX: Final[str] = " ✅"
+_RAISED_PREFIX: Final[str] = "❌❌❌ Error: "
+_RAISED_SUFFIX: Final[str] = " ❌❌❌"
+
+
@dataclass
class ContextMessages:
- starting: str
- done: str
- raised: str = field(default="")
+ starting: str | Callable[[], str]
+ done: str | Callable[[], str]
+ raised: str | Callable[[], str] = field(default="")
def __post_init__(self):
if not self.raised:
- self.raised = f"{self.done} [with error]"
+ self.raised = (
+ lambda: f"{self.done if isinstance(self.done, str) else self.done()} [with raised error]"
+ )
LogLevelInt: TypeAlias = int
@@ -127,9 +152,9 @@ def log_context(
if isinstance(msg, str):
ctx_msg = ContextMessages(
- starting=f"-> {msg} starting ...",
- done=f"<- {msg} done",
- raised=f"! {msg} raised",
+ starting=f"{msg}",
+ done=f"{msg}",
+ raised=f"{msg}",
)
elif isinstance(msg, tuple):
ctx_msg = ContextMessages(*msg)
@@ -140,13 +165,16 @@ def log_context(
try:
DynamicIndentFormatter.cls_increase_indent()
- logger.log(level, ctx_msg.starting, *args, **kwargs)
+ logger.log(
+ level,
+ _resolve(ctx_msg.starting, _STARTING_PREFIX, _STARTING_SUFFIX),
+ *args,
+ **kwargs,
+ )
with _increased_logger_indent(logger):
yield SimpleNamespace(logger=logger, messages=ctx_msg)
elapsed_time = datetime.datetime.now(tz=datetime.UTC) - started_time
- done_message = (
- f"{ctx_msg.done} ({_timedelta_as_minute_second_ms(elapsed_time)})"
- )
+ done_message = f"{_resolve(ctx_msg.done, _DONE_PREFIX, _DONE_SUFFIX)} ({_timedelta_as_minute_second_ms(elapsed_time)})"
logger.log(
level,
done_message,
@@ -156,9 +184,7 @@ def log_context(
except:
elapsed_time = datetime.datetime.now(tz=datetime.UTC) - started_time
- error_message = (
- f"{ctx_msg.raised} ({_timedelta_as_minute_second_ms(elapsed_time)})"
- )
+ error_message = f"{_resolve(ctx_msg.raised, _RAISED_PREFIX, _RAISED_SUFFIX)} ({_timedelta_as_minute_second_ms(elapsed_time)})"
logger.exception(
error_message,
*args,
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/long_running_tasks.py b/packages/pytest-simcore/src/pytest_simcore/helpers/long_running_tasks.py
new file mode 100644
index 000000000000..ad85744951f1
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/long_running_tasks.py
@@ -0,0 +1,39 @@
+# pylint: disable=protected-access
+
+import pytest
+from fastapi import FastAPI
+from servicelib.long_running_tasks.errors import TaskNotFoundError
+from servicelib.long_running_tasks.manager import (
+ LongRunningManager,
+)
+from servicelib.long_running_tasks.models import TaskContext
+from servicelib.long_running_tasks.task import TaskId
+from tenacity import (
+ AsyncRetrying,
+ retry_if_not_exception_type,
+ stop_after_delay,
+ wait_fixed,
+)
+
+
+def get_fastapi_long_running_manager(app: FastAPI) -> LongRunningManager:
+ manager = app.state.long_running_manager
+ assert isinstance(manager, LongRunningManager)
+ return manager
+
+
+async def assert_task_is_no_longer_present(
+ manager: LongRunningManager, task_id: TaskId, task_context: TaskContext
+) -> None:
+ async for attempt in AsyncRetrying(
+ reraise=True,
+ wait=wait_fixed(0.1),
+ stop=stop_after_delay(60),
+ retry=retry_if_not_exception_type(TaskNotFoundError),
+ ):
+ with attempt: # noqa: SIM117
+ with pytest.raises(TaskNotFoundError):
+ # use internals to detirmine when it's no longer here
+ await manager._tasks_manager._get_tracked_task( # noqa: SLF001
+ task_id, task_context
+ )
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py b/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py
index d81356144304..13ca659d2a4a 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py
@@ -3,6 +3,7 @@
"""
import os
+from collections.abc import Mapping
from io import StringIO
from pathlib import Path
@@ -17,7 +18,7 @@
def setenvs_from_dict(
- monkeypatch: pytest.MonkeyPatch, envs: dict[str, str | bool]
+ monkeypatch: pytest.MonkeyPatch, envs: Mapping[str, str | bool]
) -> EnvVarsDict:
env_vars = {}
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py
index 64b86cba7bb1..8e99c459b4ef 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py
@@ -43,7 +43,7 @@
wait_fixed,
)
-from .logging_tools import log_context
+from .logging_tools import ContextMessages, log_context
_logger = logging.getLogger(__name__)
@@ -274,8 +274,8 @@ def __call__(self, message: str) -> bool:
decoded_message = decode_socketio_42_message(message)
if (
(decoded_message.name == _OSparcMessages.PROJECT_STATE_UPDATED.value)
- and (decoded_message.obj["data"]["locked"]["status"] == "CLOSED")
- and (decoded_message.obj["data"]["locked"]["value"] is False)
+ and (decoded_message.obj["data"]["shareState"]["status"] == "CLOSED")
+ and (decoded_message.obj["data"]["shareState"]["locked"] is False)
):
self.logger.info("project successfully closed")
return True
@@ -302,6 +302,29 @@ def __call__(self, message: str) -> bool:
return False
+@dataclass
+class SocketIOWaitNodeForOutputs:
+ logger: logging.Logger
+ expected_number_of_outputs: int
+ node_id: str
+
+ def __call__(self, message: str) -> bool:
+ if message.startswith(SOCKETIO_MESSAGE_PREFIX):
+ decoded_message = decode_socketio_42_message(message)
+ if decoded_message.name == _OSparcMessages.NODE_UPDATED:
+ assert "data" in decoded_message.obj
+ assert "node_id" in decoded_message.obj
+ if decoded_message.obj["node_id"] == self.node_id:
+ assert "outputs" in decoded_message.obj["data"]
+
+ return (
+ len(decoded_message.obj["data"]["outputs"])
+ == self.expected_number_of_outputs
+ )
+
+ return False
+
+
@dataclass
class SocketIOOsparcMessagePrinter:
include_logger_messages: bool = False
@@ -532,9 +555,10 @@ def wait_for_pipeline_state(
if current_state in if_in_states:
with log_context(
logging.INFO,
- msg=(
- f"pipeline is in {current_state=}, waiting for one of {expected_states=}",
- f"pipeline is now in {current_state=}",
+ msg=ContextMessages(
+ starting=f"wait for one of {expected_states=}",
+ done=lambda: f"wait for one of {expected_states=}, pipeline reached {current_state=}",
+ raised=lambda: f"pipeline failed or timed out with {current_state}. Expected one of {expected_states=}",
),
):
waiter = SocketIOProjectStateUpdatedWaiter(
@@ -551,7 +575,7 @@ def wait_for_pipeline_state(
and current_state not in expected_states
):
pytest.fail(
- f"❌ Pipeline failed with state {current_state}. Expected one of {expected_states} ❌"
+ f"❌ Pipeline failed fast with state {current_state}. Expected one of {expected_states} ❌"
)
return current_state
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py
index 1e854e8b6874..1086a61fc7ae 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py
@@ -88,20 +88,54 @@ async def _async_insert_and_get_row(
conn: AsyncConnection,
table: sa.Table,
values: dict[str, Any],
- pk_col: sa.Column,
+ pk_col: sa.Column | None = None,
pk_value: Any | None = None,
+ pk_cols: list[sa.Column] | None = None,
+ pk_values: list[Any] | None = None,
) -> sa.engine.Row:
- result = await conn.execute(table.insert().values(**values).returning(pk_col))
+ # Validate parameters
+ single_pk_provided = pk_col is not None
+ composite_pk_provided = pk_cols is not None
+
+ if single_pk_provided == composite_pk_provided:
+ msg = "Must provide either pk_col or pk_cols, but not both"
+ raise ValueError(msg)
+
+ if composite_pk_provided:
+ if pk_values is not None and len(pk_cols) != len(pk_values):
+ msg = "pk_cols and pk_values must have the same length"
+ raise ValueError(msg)
+ returning_cols = pk_cols
+ else:
+ returning_cols = [pk_col]
+
+ result = await conn.execute(
+ table.insert().values(**values).returning(*returning_cols)
+ )
row = result.one()
- # Get the pk_value from the row if not provided
- if pk_value is None:
- pk_value = getattr(row, pk_col.name)
+ if composite_pk_provided:
+ # Handle composite primary keys
+ if pk_values is None:
+ pk_values = [getattr(row, col.name) for col in pk_cols]
+ else:
+ for col, expected_value in zip(pk_cols, pk_values, strict=True):
+ assert getattr(row, col.name) == expected_value
+
+ # Build WHERE clause for composite key
+ where_clause = sa.and_(
+ *[col == val for col, val in zip(pk_cols, pk_values, strict=True)]
+ )
else:
- # NOTE: DO NO USE row[pk_col] since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)
- assert getattr(row, pk_col.name) == pk_value
+ # Handle single primary key (existing logic)
+ if pk_value is None:
+ pk_value = getattr(row, pk_col.name)
+ else:
+ assert getattr(row, pk_col.name) == pk_value
+
+ where_clause = pk_col == pk_value
- result = await conn.execute(sa.select(table).where(pk_col == pk_value))
+ result = await conn.execute(sa.select(table).where(where_clause))
return result.one()
@@ -109,20 +143,52 @@ def _sync_insert_and_get_row(
conn: sa.engine.Connection,
table: sa.Table,
values: dict[str, Any],
- pk_col: sa.Column,
+ pk_col: sa.Column | None = None,
pk_value: Any | None = None,
+ pk_cols: list[sa.Column] | None = None,
+ pk_values: list[Any] | None = None,
) -> sa.engine.Row:
- result = conn.execute(table.insert().values(**values).returning(pk_col))
+ # Validate parameters
+ single_pk_provided = pk_col is not None
+ composite_pk_provided = pk_cols is not None
+
+ if single_pk_provided == composite_pk_provided:
+ msg = "Must provide either pk_col or pk_cols, but not both"
+ raise ValueError(msg)
+
+ if composite_pk_provided:
+ if pk_values is not None and len(pk_cols) != len(pk_values):
+ msg = "pk_cols and pk_values must have the same length"
+ raise ValueError(msg)
+ returning_cols = pk_cols
+ else:
+ returning_cols = [pk_col]
+
+ result = conn.execute(table.insert().values(**values).returning(*returning_cols))
row = result.one()
- # Get the pk_value from the row if not provided
- if pk_value is None:
- pk_value = getattr(row, pk_col.name)
+ if composite_pk_provided:
+ # Handle composite primary keys
+ if pk_values is None:
+ pk_values = [getattr(row, col.name) for col in pk_cols]
+ else:
+ for col, expected_value in zip(pk_cols, pk_values, strict=True):
+ assert getattr(row, col.name) == expected_value
+
+ # Build WHERE clause for composite key
+ where_clause = sa.and_(
+ *[col == val for col, val in zip(pk_cols, pk_values, strict=True)]
+ )
else:
- # NOTE: DO NO USE row[pk_col] since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)
- assert getattr(row, pk_col.name) == pk_value
+ # Handle single primary key (existing logic)
+ if pk_value is None:
+ pk_value = getattr(row, pk_col.name)
+ else:
+ assert getattr(row, pk_col.name) == pk_value
+
+ where_clause = pk_col == pk_value
- result = conn.execute(sa.select(table).where(pk_col == pk_value))
+ result = conn.execute(sa.select(table).where(where_clause))
return result.one()
@@ -132,17 +198,125 @@ async def insert_and_get_row_lifespan(
*,
table: sa.Table,
values: dict[str, Any],
- pk_col: sa.Column,
+ pk_col: sa.Column | None = None,
pk_value: Any | None = None,
+ pk_cols: list[sa.Column] | None = None,
+ pk_values: list[Any] | None = None,
) -> AsyncIterator[dict[str, Any]]:
+ """
+ Context manager that inserts a row into a table and automatically deletes it on exit.
+
+ Args:
+ sqlalchemy_async_engine: Async SQLAlchemy engine
+ table: The table to insert into
+ values: Dictionary of column values to insert
+ pk_col: Primary key column for deletion (for single-column primary keys)
+ pk_value: Optional primary key value (if None, will be taken from inserted row)
+ pk_cols: List of primary key columns (for composite primary keys)
+ pk_values: Optional list of primary key values (if None, will be taken from inserted row)
+
+ Yields:
+ dict: The inserted row as a dictionary
+
+ Examples:
+ ## Single primary key usage:
+
+ @pytest.fixture
+ async def user_in_db(asyncpg_engine: AsyncEngine) -> AsyncIterator[dict]:
+ user_data = random_user(name="test_user", email="test@example.com")
+ async with insert_and_get_row_lifespan(
+ asyncpg_engine,
+ table=users,
+ values=user_data,
+ pk_col=users.c.id,
+ ) as row:
+ yield row
+
+ ##Composite primary key usage:
+
+ @pytest.fixture
+ async def service_in_db(asyncpg_engine: AsyncEngine) -> AsyncIterator[dict]:
+ service_data = {"key": "simcore/services/comp/test", "version": "1.0.0", "name": "Test Service"}
+ async with insert_and_get_row_lifespan(
+ asyncpg_engine,
+ table=services,
+ values=service_data,
+ pk_cols=[services.c.key, services.c.version],
+ ) as row:
+ yield row
+
+ ##Multiple rows with single primary keys using AsyncExitStack:
+
+ @pytest.fixture
+ async def users_in_db(asyncpg_engine: AsyncEngine) -> AsyncIterator[list[dict]]:
+ users_data = [
+ random_user(name="user1", email="user1@example.com"),
+ random_user(name="user2", email="user2@example.com"),
+ ]
+
+ async with AsyncExitStack() as stack:
+ created_users = []
+ for user_data in users_data:
+ row = await stack.enter_async_context(
+ insert_and_get_row_lifespan(
+ asyncpg_engine,
+ table=users,
+ values=user_data,
+ pk_col=users.c.id,
+ )
+ )
+ created_users.append(row)
+
+ yield created_users
+
+ ## Multiple rows with composite primary keys using AsyncExitStack:
+
+ @pytest.fixture
+ async def services_in_db(asyncpg_engine: AsyncEngine) -> AsyncIterator[list[dict]]:
+ services_data = [
+ {"key": "simcore/services/comp/service1", "version": "1.0.0", "name": "Service 1"},
+ {"key": "simcore/services/comp/service2", "version": "2.0.0", "name": "Service 2"},
+ {"key": "simcore/services/comp/service1", "version": "2.0.0", "name": "Service 1 v2"},
+ ]
+
+ async with AsyncExitStack() as stack:
+ created_services = []
+ for service_data in services_data:
+ row = await stack.enter_async_context(
+ insert_and_get_row_lifespan(
+ asyncpg_engine,
+ table=services,
+ values=service_data,
+ pk_cols=[services.c.key, services.c.version],
+ )
+ )
+ created_services.append(row)
+
+ yield created_services
+ """
# SETUP: insert & get
async with sqlalchemy_async_engine.begin() as conn:
row = await _async_insert_and_get_row(
- conn, table=table, values=values, pk_col=pk_col, pk_value=pk_value
+ conn,
+ table=table,
+ values=values,
+ pk_col=pk_col,
+ pk_value=pk_value,
+ pk_cols=pk_cols,
+ pk_values=pk_values,
)
- # If pk_value was None, get it from the row for deletion later
- if pk_value is None:
- pk_value = getattr(row, pk_col.name)
+
+ # Get pk values for deletion
+ if pk_cols is not None:
+ if pk_values is None:
+ pk_values = [getattr(row, col.name) for col in pk_cols]
+ where_clause = sa.and_(
+ *[col == val for col, val in zip(pk_cols, pk_values, strict=True)]
+ )
+ else:
+ if pk_value is None:
+ pk_value = getattr(row, pk_col.name)
+ where_clause = pk_col == pk_value
assert row
@@ -150,9 +324,9 @@ async def insert_and_get_row_lifespan(
# pylint: disable=protected-access
yield row._asdict()
- # TEAD-DOWN: delete row
+ # TEARDOWN: delete row
async with sqlalchemy_async_engine.begin() as conn:
- await conn.execute(table.delete().where(pk_col == pk_value))
+ await conn.execute(table.delete().where(where_clause))
@contextmanager
@@ -161,23 +335,43 @@ def sync_insert_and_get_row_lifespan(
*,
table: sa.Table,
values: dict[str, Any],
- pk_col: sa.Column,
+ pk_col: sa.Column | None = None,
pk_value: Any | None = None,
+ pk_cols: list[sa.Column] | None = None,
+ pk_values: list[Any] | None = None,
) -> Iterator[dict[str, Any]]:
"""sync version of insert_and_get_row_lifespan.
TIP: more convenient for **module-scope fixtures** that setup the
database tables before the app starts since it does not require an `event_loop`
- fixture (which is funcition-scoped )
+ fixture (which is function-scoped)
+
+ Supports both single and composite primary keys using the same parameter patterns
+ as the async version.
"""
# SETUP: insert & get
with sqlalchemy_sync_engine.begin() as conn:
row = _sync_insert_and_get_row(
- conn, table=table, values=values, pk_col=pk_col, pk_value=pk_value
+ conn,
+ table=table,
+ values=values,
+ pk_col=pk_col,
+ pk_value=pk_value,
+ pk_cols=pk_cols,
+ pk_values=pk_values,
)
- # If pk_value was None, get it from the row for deletion later
- if pk_value is None:
- pk_value = getattr(row, pk_col.name)
+
+ # Get pk values for deletion
+ if pk_cols is not None:
+ if pk_values is None:
+ pk_values = [getattr(row, col.name) for col in pk_cols]
+ where_clause = sa.and_(
+ *[col == val for col, val in zip(pk_cols, pk_values, strict=True)]
+ )
+ else:
+ if pk_value is None:
+ pk_value = getattr(row, pk_col.name)
+ where_clause = pk_col == pk_value
assert row
@@ -187,4 +381,4 @@ def sync_insert_and_get_row_lifespan(
# TEARDOWN: delete row
with sqlalchemy_sync_engine.begin() as conn:
- conn.execute(table.delete().where(pk_col == pk_value))
+ conn.execute(table.delete().where(where_clause))
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_users.py b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_users.py
new file mode 100644
index 000000000000..dd4039619cd3
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_users.py
@@ -0,0 +1,106 @@
+import contextlib
+
+import sqlalchemy as sa
+from simcore_postgres_database.models.users import users
+from simcore_postgres_database.models.users_secrets import users_secrets
+from sqlalchemy.ext.asyncio import AsyncEngine
+
+from .faker_factories import random_user, random_user_secrets
+from .postgres_tools import (
+ insert_and_get_row_lifespan,
+ sync_insert_and_get_row_lifespan,
+)
+
+
+def _get_kwargs_from_overrides(overrides: dict) -> tuple[dict, dict]:
+ user_kwargs = overrides.copy()
+ secrets_kwargs = {"password": user_kwargs.pop("password", None)}
+ if "password_hash" in user_kwargs:
+ secrets_kwargs["password_hash"] = user_kwargs.pop("password_hash")
+ return user_kwargs, secrets_kwargs
+
+
+@contextlib.asynccontextmanager
+async def insert_and_get_user_and_secrets_lifespan(
+ sqlalchemy_async_engine: AsyncEngine, **overrides
+):
+ user_kwargs, secrets_kwargs = _get_kwargs_from_overrides(overrides)
+
+ async with contextlib.AsyncExitStack() as stack:
+ # users
+ user = await stack.enter_async_context(
+ insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine,
+ table=users,
+ values=random_user(**user_kwargs),
+ pk_col=users.c.id,
+ )
+ )
+
+ # users_secrets
+ secrets = await stack.enter_async_context(
+ insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine,
+ table=users_secrets,
+ values=random_user_secrets(user_id=user["id"], **secrets_kwargs),
+ pk_col=users_secrets.c.user_id,
+ )
+ )
+
+ assert secrets.pop("user_id", None) == user["id"]
+
+ yield {**user, **secrets}
+
+
+@contextlib.contextmanager
+def sync_insert_and_get_user_and_secrets_lifespan(
+ sqlalchemy_sync_engine: sa.engine.Engine, **overrides
+):
+ user_kwargs, secrets_kwargs = _get_kwargs_from_overrides(overrides)
+
+ with contextlib.ExitStack() as stack:
+ # users
+ user = stack.enter_context(
+ sync_insert_and_get_row_lifespan(
+ sqlalchemy_sync_engine,
+ table=users,
+ values=random_user(**user_kwargs),
+ pk_col=users.c.id,
+ )
+ )
+
+ # users_secrets
+ secrets = stack.enter_context(
+ sync_insert_and_get_row_lifespan(
+ sqlalchemy_sync_engine,
+ table=users_secrets,
+ values=random_user_secrets(user_id=user["id"], **secrets_kwargs),
+ pk_col=users_secrets.c.user_id,
+ )
+ )
+
+ assert secrets.pop("user_id", None) == user["id"]
+
+ yield {**user, **secrets}
+
+
+async def insert_user_and_secrets(conn, **overrides) -> int:
+ # NOTE: DEPRECATED: Legacy adapter. Use insert_and_get_user_and_secrets_lifespan instead
+ # Temporarily used where conn is produce by aiopg_engine
+
+ user_kwargs, secrets_kwargs = _get_kwargs_from_overrides(overrides)
+
+ # user data
+ user_id = await conn.scalar(
+ users.insert().values(**random_user(**user_kwargs)).returning(users.c.id)
+ )
+ assert user_id is not None
+
+ # secrets
+ await conn.execute(
+ users_secrets.insert().values(
+ **random_user_secrets(user_id=user_id, **secrets_kwargs)
+ )
+ )
+
+ return user_id
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/storage_rpc_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_rpc_server.py
new file mode 100644
index 000000000000..72dc62ca438f
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_rpc_server.py
@@ -0,0 +1,43 @@
+# pylint: disable=no-self-use
+# pylint: disable=not-context-manager
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+
+from typing import Literal
+
+from models_library.api_schemas_rpc_async_jobs.async_jobs import (
+ AsyncJobGet,
+)
+from models_library.api_schemas_webserver.storage import PathToExport
+from models_library.users import UserID
+from pydantic import TypeAdapter, validate_call
+from pytest_mock import MockType
+from servicelib.celery.models import OwnerMetadata
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+
+
+class StorageSideEffects:
+ # pylint: disable=no-self-use
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def start_export_data(
+ self,
+ rabbitmq_rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ paths_to_export: list[PathToExport],
+ export_as: Literal["path", "download_link"],
+ owner_metadata: OwnerMetadata,
+ user_id: UserID
+ ) -> tuple[AsyncJobGet, OwnerMetadata]:
+ assert rabbitmq_rpc_client
+ assert owner_metadata
+ assert paths_to_export
+ assert export_as
+
+ async_job_get = TypeAdapter(AsyncJobGet).validate_python(
+ AsyncJobGet.model_json_schema()["examples"][0],
+ )
+
+ return async_job_get, owner_metadata
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py
index 32d91d46c783..f3bb3c003f0b 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py
@@ -1,6 +1,7 @@
"""
- NOTE: avoid creating dependencies
+NOTE: avoid creating dependencies
"""
+
from typing import Any
FAKE_FILE_CONSUMER_SERVICES = [
@@ -55,7 +56,7 @@ def list_fake_file_consumers() -> list[dict[str, Any]]:
consumers = []
for service in FAKE_FILE_CONSUMER_SERVICES:
for consumable in service["consumes"]:
- filetype, port, *_ = consumable.split(":") + ["input_1"]
+ filetype, port, *_ = [*consumable.split(":"), "input_1"]
consumer = {
"key": service["key"],
"version": service["version"],
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py
index d055e3a110c0..6d843ceacdf3 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py
@@ -1,48 +1,21 @@
import contextlib
import re
from collections.abc import AsyncIterator
-from datetime import datetime
-from typing import Any, TypedDict
+from typing import Any
-from aiohttp import web
from aiohttp.test_utils import TestClient
-from models_library.users import UserID
from servicelib.aiohttp import status
-from simcore_service_webserver.db.models import UserRole, UserStatus
-from simcore_service_webserver.groups.api import auto_add_user_to_product_group
-from simcore_service_webserver.login._constants import MSG_LOGGED_IN
from simcore_service_webserver.login._invitations_service import create_invitation_token
from simcore_service_webserver.login._login_repository_legacy import (
- AsyncpgStorage,
get_plugin_storage,
)
-from simcore_service_webserver.products.products_service import list_products
-from simcore_service_webserver.security.api import clean_auth_policy_cache
+from simcore_service_webserver.login.constants import MSG_LOGGED_IN
+from simcore_service_webserver.security import security_service
from yarl import URL
from .assert_checks import assert_status
-from .faker_factories import DEFAULT_FAKER, DEFAULT_TEST_PASSWORD, random_user
-
-
-# WARNING: DO NOT use UserDict is already in https://docs.python.org/3/library/collections.html#collections.UserDictclass UserRowDict(TypedDict):
-# NOTE: this is modified dict version of packages/postgres-database/src/simcore_postgres_database/models/users.py for testing purposes
-class _UserInfoDictRequired(TypedDict, total=True):
- id: int
- name: str
- email: str
- primary_gid: str
- raw_password: str
- status: UserStatus
- role: UserRole
-
-
-class UserInfoDict(_UserInfoDictRequired, total=False):
- created_at: datetime
- password_hash: str
- first_name: str
- last_name: str
- phone: str
-
+from .faker_factories import DEFAULT_FAKER
+from .webserver_users import NewUser, UserInfoDict, _create_account_in_db
TEST_MARKS = re.compile(r"TEST (\w+):(.*)")
@@ -65,76 +38,21 @@ def parse_link(text):
return URL(link).path
-async def _create_user(app: web.Application, data=None) -> UserInfoDict:
- db: AsyncpgStorage = get_plugin_storage(app)
-
- # create
- data = data or {}
- data.setdefault("status", UserStatus.ACTIVE.name)
- data.setdefault("role", UserRole.USER.name)
- data.setdefault("password", DEFAULT_TEST_PASSWORD)
- user = await db.create_user(random_user(**data))
-
- # get
- user = await db.get_user({"id": user["id"]})
- assert "first_name" in user
- assert "last_name" in user
-
- # adds extras
- extras = {"raw_password": data["password"]}
-
- return UserInfoDict(
- **{
- key: user[key]
- for key in [
- "id",
- "name",
- "email",
- "primary_gid",
- "status",
- "role",
- "created_at",
- "password_hash",
- "first_name",
- "last_name",
- "phone",
- ]
- },
- **extras,
- )
-
-
-async def _register_user_in_default_product(app: web.Application, user_id: UserID):
- products = list_products(app)
- assert products
- product_name = products[0].name
-
- return await auto_add_user_to_product_group(app, user_id, product_name=product_name)
-
-
-async def _create_account(
- app: web.Application,
- user_data: dict[str, Any] | None = None,
-) -> UserInfoDict:
- # users, groups in db
- user = await _create_user(app, user_data)
- # user has default product
- await _register_user_in_default_product(app, user_id=user["id"])
- return user
-
-
async def log_client_in(
client: TestClient,
user_data: dict[str, Any] | None = None,
*,
- enable_check=True,
+ exit_stack: contextlib.AsyncExitStack,
+ enable_check: bool = True,
) -> UserInfoDict:
assert client.app
# create account
- user = await _create_account(client.app, user_data=user_data)
+ user = await _create_account_in_db(
+ client.app, exit_stack=exit_stack, user_data=user_data
+ )
- # login
+ # login (requires)
url = client.app.router["auth_login"].url_for()
reponse = await client.post(
str(url),
@@ -150,26 +68,6 @@ async def log_client_in(
return user
-class NewUser:
- def __init__(
- self,
- user_data: dict[str, Any] | None = None,
- app: web.Application | None = None,
- ):
- self.user_data = user_data
- self.user = None
- assert app
- self.db = get_plugin_storage(app)
- self.app = app
-
- async def __aenter__(self) -> UserInfoDict:
- self.user = await _create_account(self.app, self.user_data)
- return self.user
-
- async def __aexit__(self, *args):
- await self.db.delete_user(self.user)
-
-
class LoggedUser(NewUser):
def __init__(self, client: TestClient, user_data=None, *, check_if_succeeds=True):
super().__init__(user_data, client.app)
@@ -179,7 +77,10 @@ def __init__(self, client: TestClient, user_data=None, *, check_if_succeeds=True
async def __aenter__(self) -> UserInfoDict:
self.user = await log_client_in(
- self.client, self.user_data, enable_check=self.enable_check
+ self.client,
+ self.user_data,
+ exit_stack=self.exit_stack,
+ enable_check=self.enable_check,
)
return self.user
@@ -187,7 +88,7 @@ async def __aexit__(self, *args):
assert self.client.app
# NOTE: cache key is based on an email. If the email is
# reused during the test, then it creates quite some noise
- await clean_auth_policy_cache(self.client.app)
+ await security_service.clean_auth_policy_cache(self.client.app)
return await super().__aexit__(*args)
@@ -231,11 +132,12 @@ def __init__(
self.confirmation = None
self.trial_days = trial_days
self.extra_credits_in_usd = extra_credits_in_usd
+ self.db = get_plugin_storage(self.app)
async def __aenter__(self) -> "NewInvitation":
# creates host user
assert self.client.app
- self.user = await _create_user(self.client.app, self.user_data)
+ self.user = await super().__aenter__()
self.confirmation = await create_invitation_token(
self.db,
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
index 99ee393f3949..917d70d24cca 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py
@@ -4,7 +4,6 @@
import json
import uuid as uuidlib
-from http import HTTPStatus
from pathlib import Path
from typing import Any
@@ -187,7 +186,7 @@ async def __aexit__(self, *args):
async def assert_get_same_project(
client: TestClient,
project: ProjectDict,
- expected: HTTPStatus,
+ expected: int,
api_vtag="/v0",
) -> dict:
# GET /v0/projects/{project_id}
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py
index 17d8051d096e..3cedcff8aba2 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py
@@ -26,6 +26,14 @@
class WebserverRpcSideEffects:
# pylint: disable=no-self-use
+ def __init__(
+ self,
+ project_job_rpc_get: ProjectJobRpcGet = ProjectJobRpcGet.model_validate(
+ ProjectJobRpcGet.model_json_schema()["examples"][0]
+ ),
+ ):
+ self.project_job_rpc_get = project_job_rpc_get
+
@validate_call(config={"arbitrary_types_allowed": True})
async def mark_project_as_job(
self,
@@ -35,12 +43,14 @@ async def mark_project_as_job(
user_id: UserID,
project_uuid: ProjectID,
job_parent_resource_name: str,
+ storage_assets_deleted: bool,
) -> None:
assert rpc_client
assert not job_parent_resource_name.startswith("/") # nosec
assert "/" in job_parent_resource_name # nosec
assert not job_parent_resource_name.endswith("/") # nosec
+ assert isinstance(storage_assets_deleted, bool)
assert product_name
assert user_id
@@ -84,3 +94,25 @@ async def list_projects_marked_as_jobs(
limit=limit,
offset=offset,
)
+
+ @validate_call(config={"arbitrary_types_allowed": True})
+ async def get_project_marked_as_job(
+ self,
+ rpc_client: RabbitMQRPCClient | MockType,
+ *,
+ product_name: ProductName,
+ user_id: UserID,
+ project_uuid: ProjectID,
+ job_parent_resource_name: str,
+ ) -> ProjectJobRpcGet:
+ assert rpc_client
+ assert product_name
+ assert user_id
+ assert project_uuid
+ assert job_parent_resource_name
+
+ # Return a valid example from the schema
+ _data = self.project_job_rpc_get.model_dump()
+ _data["uuid"] = str(project_uuid)
+ _data["job_parent_resource_name"] = job_parent_resource_name
+ return ProjectJobRpcGet.model_validate(_data)
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_users.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_users.py
new file mode 100644
index 000000000000..edb3399a14fa
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_users.py
@@ -0,0 +1,139 @@
+import contextlib
+from datetime import datetime
+from typing import Any, TypedDict
+
+from aiohttp import web
+from common_library.users_enums import UserRole, UserStatus
+from models_library.users import UserID
+from simcore_service_webserver.db.plugin import get_asyncpg_engine
+from simcore_service_webserver.groups import api as groups_service
+from simcore_service_webserver.products.products_service import list_products
+from sqlalchemy.ext.asyncio import AsyncEngine
+
+from .faker_factories import DEFAULT_TEST_PASSWORD
+from .postgres_users import insert_and_get_user_and_secrets_lifespan
+
+
+# WARNING: DO NOT use UserDict is already in https://docs.python.org/3/library/collections.html#collections.UserDictclass UserRowDict(TypedDict):
+# NOTE: this is modified dict version of packages/postgres-database/src/simcore_postgres_database/models/users.py for testing purposes
+class _UserInfoDictRequired(TypedDict, total=True):
+ id: int
+ name: str
+ email: str
+ primary_gid: str
+ raw_password: str
+ status: UserStatus
+ role: UserRole
+
+
+class UserInfoDict(_UserInfoDictRequired, total=False):
+ created_at: datetime
+ password_hash: str
+ first_name: str
+ last_name: str
+ phone: str
+
+
+async def _create_user_in_db(
+ sqlalchemy_async_engine: AsyncEngine,
+ exit_stack: contextlib.AsyncExitStack,
+ data: dict | None = None,
+) -> UserInfoDict:
+
+ # create fake
+ data = data or {}
+ data.setdefault("status", UserStatus.ACTIVE.name)
+ data.setdefault("role", UserRole.USER.name)
+ data.setdefault("password", DEFAULT_TEST_PASSWORD)
+
+ raw_password = data["password"]
+
+ # inject in db
+ user = await exit_stack.enter_async_context(
+ insert_and_get_user_and_secrets_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ sqlalchemy_async_engine, **data
+ )
+ )
+ assert "first_name" in user
+ assert "last_name" in user
+
+ return UserInfoDict(
+ # required
+ # - in db
+ id=user["id"],
+ name=user["name"],
+ email=user["email"],
+ primary_gid=user["primary_gid"],
+ status=(
+ UserStatus(user["status"])
+ if not isinstance(user["status"], UserStatus)
+ else user["status"]
+ ),
+ role=(
+ UserRole(user["role"])
+ if not isinstance(user["role"], UserRole)
+ else user["role"]
+ ),
+ # optional
+ # - in db
+ created_at=(
+ user["created_at"]
+ if isinstance(user["created_at"], datetime)
+ else datetime.fromisoformat(user["created_at"])
+ ),
+ password_hash=user["password_hash"],
+ first_name=user["first_name"],
+ last_name=user["last_name"],
+ phone=user["phone"],
+ # extras
+ raw_password=raw_password,
+ )
+
+
+async def _register_user_in_default_product(app: web.Application, user_id: UserID):
+ products = list_products(app)
+ assert products
+ product_name = products[0].name
+
+ return await groups_service.auto_add_user_to_product_group(
+ app, user_id, product_name=product_name
+ )
+
+
+async def _create_account_in_db(
+ app: web.Application,
+ exit_stack: contextlib.AsyncExitStack,
+ user_data: dict[str, Any] | None = None,
+) -> UserInfoDict:
+ # users, groups in db
+ user = await _create_user_in_db(
+ get_asyncpg_engine(app), exit_stack=exit_stack, data=user_data
+ )
+
+ # user has default product
+ await _register_user_in_default_product(app, user_id=user["id"])
+ return user
+
+
+class NewUser:
+ def __init__(
+ self,
+ user_data: dict[str, Any] | None = None,
+ app: web.Application | None = None,
+ ):
+ self.user_data = user_data
+ self.user = None
+
+ assert app
+ self.app = app
+
+ self.exit_stack = contextlib.AsyncExitStack()
+
+ async def __aenter__(self) -> UserInfoDict:
+ self.user = await _create_account_in_db(
+ self.app, self.exit_stack, self.user_data
+ )
+ return self.user
+
+ async def __aexit__(self, *args):
+ await self.exit_stack.aclose()
diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py
index 1dbe5ebeb42a..adf767b49455 100644
--- a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py
+++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py
@@ -5,7 +5,7 @@
from simcore_postgres_database.models.workspaces_access_rights import (
workspaces_access_rights,
)
-from simcore_service_webserver.db.plugin import get_database_engine
+from simcore_service_webserver.db.plugin import get_database_engine_legacy
from sqlalchemy.dialects.postgresql import insert as pg_insert
@@ -18,7 +18,7 @@ async def update_or_insert_workspace_group(
write: bool,
delete: bool,
) -> None:
- async with get_database_engine(app).acquire() as conn:
+ async with get_database_engine_legacy(app).acquire() as conn:
insert_stmt = pg_insert(workspaces_access_rights).values(
workspace_id=workspace_id,
gid=group_id,
diff --git a/packages/pytest-simcore/src/pytest_simcore/logging.py b/packages/pytest-simcore/src/pytest_simcore/logging.py
new file mode 100644
index 000000000000..2db9ae93c5d3
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/logging.py
@@ -0,0 +1,42 @@
+# In conftest.py or test_logging_utils.py
+import contextlib
+import logging
+from collections.abc import Iterator
+from contextlib import contextmanager
+
+import pytest
+from pytest_mock import MockerFixture
+from servicelib.logging_utils import async_loggers
+
+
+@pytest.fixture(autouse=True)
+def preserve_caplog_for_async_logging(mocker: MockerFixture) -> None:
+ # Patch async_loggers to preserve caplog handlers,
+ # and pytest logs in general as pytest captures logs in a special way
+ # that is not compatible with the queue handler used in async logging.
+ original_setup = async_loggers
+
+ @contextmanager
+ def patched_async_loggers(**kwargs) -> Iterator[None]:
+ # Find caplog's handler in root logger
+ root_logger = logging.getLogger()
+ caplog_handlers = [
+ h for h in root_logger.handlers if "LogCaptureHandler" in f"{type(h)}"
+ ]
+
+ with original_setup(**kwargs):
+ # After setup, restore caplog handlers alongside queue handler
+ for handler in caplog_handlers:
+ if handler not in root_logger.handlers:
+ root_logger.addHandler(handler)
+ yield
+
+ methods_to_patch = [
+ "servicelib.logging_utils.async_loggers",
+ "servicelib.fastapi.logging_lifespan.async_loggers",
+ "tests.test_logging_utils.async_loggers",
+ ]
+ for method in methods_to_patch:
+ with contextlib.suppress(AttributeError, ModuleNotFoundError):
+ # Patch the method to use our patched version
+ mocker.patch(method, patched_async_loggers)
diff --git a/packages/pytest-simcore/src/pytest_simcore/long_running_tasks.py b/packages/pytest-simcore/src/pytest_simcore/long_running_tasks.py
new file mode 100644
index 000000000000..e3911dc62f5a
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/long_running_tasks.py
@@ -0,0 +1,14 @@
+from datetime import timedelta
+
+import pytest
+from pytest_mock import MockerFixture
+
+
+@pytest.fixture
+async def fast_long_running_tasks_cancellation(
+ mocker: MockerFixture,
+) -> None:
+ mocker.patch(
+ "servicelib.long_running_tasks.task._CANCEL_TASKS_CHECK_INTERVAL",
+ new=timedelta(seconds=1),
+ )
diff --git a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py
index 19d9247e8eab..7814d413c07c 100644
--- a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py
+++ b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py
@@ -250,7 +250,6 @@ def postgres_env_vars_dict(postgres_dsn: PostgresTestConfig) -> EnvVarsDict:
"POSTGRES_DB": postgres_dsn["database"],
"POSTGRES_HOST": postgres_dsn["host"],
"POSTGRES_PORT": f"{postgres_dsn['port']}",
- "POSTGRES_ENDPOINT": f"{postgres_dsn['host']}:{postgres_dsn['port']}",
}
diff --git a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
index e8691a10724c..8266de0947bb 100644
--- a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
+++ b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py
@@ -97,7 +97,6 @@ def _is_model_cls(obj) -> bool:
assert inspect.ismodule(module)
for model_name, model_cls in inspect.getmembers(module, _is_model_cls):
-
yield from iter_model_examples_in_class(model_cls, model_name)
@@ -172,7 +171,7 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]:
"""
warnings.warn(
"The 'model_cls_examples' fixture is deprecated and will be removed in a future version. "
- "Please use 'iter_model_example_in_class' or 'iter_model_examples_in_module' as an alternative.",
+ "Please use 'iter_model_examples_in_class' or 'iter_model_examples_in_module' as an alternative.",
DeprecationWarning,
stacklevel=2,
)
diff --git a/packages/pytest-simcore/src/pytest_simcore/redis_service.py b/packages/pytest-simcore/src/pytest_simcore/redis_service.py
index 05aec86a2340..04177c7f9e28 100644
--- a/packages/pytest-simcore/src/pytest_simcore/redis_service.py
+++ b/packages/pytest-simcore/src/pytest_simcore/redis_service.py
@@ -4,10 +4,10 @@
import logging
from collections.abc import AsyncIterator
-from datetime import timedelta
import pytest
import tenacity
+from fakeredis import FakeAsyncRedis
from pytest_mock import MockerFixture
from redis.asyncio import Redis, from_url
from settings_library.basic_types import PortInt
@@ -116,8 +116,6 @@ async def wait_till_redis_responsive(redis_url: URL | str) -> None:
@pytest.fixture
-def mock_redis_socket_timeout(mocker: MockerFixture) -> None:
- # lowered to allow CI to properly shutdown RedisClientSDK instances
- mocker.patch(
- "servicelib.redis._client.DEFAULT_SOCKET_TIMEOUT", timedelta(seconds=0.25)
- )
+async def use_in_memory_redis(mocker: MockerFixture) -> RedisSettings:
+ mocker.patch("redis.asyncio.from_url", FakeAsyncRedis)
+ return RedisSettings()
diff --git a/packages/pytest-simcore/src/pytest_simcore/repository_paths.py b/packages/pytest-simcore/src/pytest_simcore/repository_paths.py
index 6112cef627bd..0f52de8f8444 100644
--- a/packages/pytest-simcore/src/pytest_simcore/repository_paths.py
+++ b/packages/pytest-simcore/src/pytest_simcore/repository_paths.py
@@ -85,6 +85,14 @@ def services_docker_compose_file(services_dir: Path) -> Path:
return dcpath
+@pytest.fixture(scope="session")
+def services_docker_compose_dev_vendors_file(osparc_simcore_services_dir: Path) -> Path:
+ """Path to osparc-simcore/services/docker-compose-dev-vendors.yml file"""
+ dcpath = osparc_simcore_services_dir / "docker-compose-dev-vendors.yml"
+ assert dcpath.exists()
+ return dcpath
+
+
@pytest.fixture(scope="session")
def pylintrc(osparc_simcore_root_dir: Path) -> Path:
pylintrc = osparc_simcore_root_dir / ".pylintrc"
diff --git a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
index a001bb1d5d81..c3036d7c9de7 100644
--- a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
+++ b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py
@@ -8,7 +8,7 @@
from urllib.parse import urlparse, urlunparse
import pytest
-from aioresponses import aioresponses as AioResponsesMock
+from aioresponses import aioresponses as AioResponsesMock # noqa: N812
from aioresponses.core import CallbackResult
from faker import Faker
from models_library.api_schemas_directorv2.computations import (
diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_services.py b/packages/pytest-simcore/src/pytest_simcore/simcore_services.py
index 2a4f6d2ff4dc..274a8edb44a7 100644
--- a/packages/pytest-simcore/src/pytest_simcore/simcore_services.py
+++ b/packages/pytest-simcore/src/pytest_simcore/simcore_services.py
@@ -29,6 +29,7 @@
_SERVICES_TO_SKIP: Final[set[str]] = {
+ "api-worker",
"agent", # global mode deploy (NO exposed ports, has http API)
"dask-sidecar", # global mode deploy (NO exposed ports, **NO** http API)
"migration",
@@ -40,6 +41,7 @@
"whoami",
"sto-worker",
"sto-worker-cpu-bound",
+ "traefik-config-placeholder",
}
# TODO: unify healthcheck policies see https://github.com/ITISFoundation/osparc-simcore/pull/2281
DEFAULT_SERVICE_HEALTHCHECK_ENTRYPOINT: Final[str] = "/v0/"
diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py
index e897b9ced75e..a41d4876612d 100644
--- a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py
+++ b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py
@@ -18,7 +18,8 @@
from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine
-from .helpers.faker_factories import DEFAULT_FAKER, random_project, random_user
+from .helpers.faker_factories import DEFAULT_FAKER, random_project
+from .helpers.postgres_users import insert_and_get_user_and_secrets_lifespan
@asynccontextmanager
@@ -30,19 +31,10 @@ async def _user_context(
# NOTE: Ideally this (and next fixture) should be done via webserver API but at this point
# in time, the webserver service would bring more dependencies to other services
# which would turn this test too complex.
-
- # pylint: disable=no-value-for-parameter
- stmt = users.insert().values(**random_user(name=name)).returning(users.c.id)
- async with sqlalchemy_async_engine.begin() as conn:
- result = await conn.execute(stmt)
- row = result.one()
- assert isinstance(row.id, int)
-
- try:
- yield TypeAdapter(UserID).validate_python(row.id)
- finally:
- async with sqlalchemy_async_engine.begin() as conn:
- await conn.execute(users.delete().where(users.c.id == row.id))
+ async with insert_and_get_user_and_secrets_lifespan(
+ sqlalchemy_async_engine, name=name
+ ) as user:
+ yield TypeAdapter(UserID).validate_python(user["id"])
@pytest.fixture
diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py
index cc31177abcec..1cc8ca080fdc 100644
--- a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py
+++ b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py
@@ -3,9 +3,9 @@
# pylint: disable=unused-variable
"""
- Fixtures for groups
+Fixtures for groups
- NOTE: These fixtures are used in integration and unit tests
+NOTE: These fixtures are used in integration and unit tests
"""
@@ -18,7 +18,7 @@
from models_library.api_schemas_webserver.groups import GroupGet
from models_library.groups import GroupsByTypeTuple, StandardGroupCreate
from models_library.users import UserID
-from pytest_simcore.helpers.webserver_login import NewUser, UserInfoDict
+from pytest_simcore.helpers.webserver_users import NewUser, UserInfoDict
from simcore_service_webserver.groups._groups_service import (
add_user_in_group,
create_standard_group,
diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py
index 2533cad65dda..aa88a5b5d82f 100644
--- a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py
+++ b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py
@@ -70,7 +70,11 @@ def request_desc(self) -> str:
"prjOwner": "foo@bar.com",
"tags": [],
"state": {
- "locked": {"value": False, "status": "CLOSED"},
+ "shareState": {
+ "status": "CLOSED",
+ "locked": False,
+ "currentUserGroupids": [],
+ },
"state": {"value": "NOT_STARTED"},
},
"dev": None,
@@ -114,7 +118,11 @@ def request_desc(self) -> str:
"quality": {},
"tags": [],
"state": {
- "locked": {"value": False, "status": "CLOSED"},
+ "shareState": {
+ "status": "CLOSED",
+ "locked": False,
+ "currentUserGroupids": [],
+ },
"state": {"value": "NOT_STARTED"},
},
"workspace_id": None,
@@ -149,14 +157,10 @@ def request_desc(self) -> str:
"quality": {},
"tags": [],
"state": {
- "locked": {
- "value": True,
- "owner": {
- "user_id": 1,
- "first_name": "crespo",
- "last_name": "",
- },
+ "shareState": {
"status": "OPENED",
+ "locked": True,
+ "currentUserGroupids": [1],
},
"state": {"value": "NOT_STARTED"},
},
@@ -284,14 +288,10 @@ def request_desc(self) -> str:
},
"tags": [],
"state": {
- "locked": {
- "value": True,
- "owner": {
- "user_id": 1,
- "first_name": "crespo",
- "last_name": "",
- },
+ "shareState": {
"status": "OPENED",
+ "locked": True,
+ "currentUserGroupids": [1],
},
"state": {"value": "NOT_STARTED"},
},
@@ -547,14 +547,10 @@ def request_desc(self) -> str:
},
"tags": [],
"state": {
- "locked": {
- "value": True,
- "owner": {
- "user_id": 1,
- "first_name": "crespo",
- "last_name": "",
- },
+ "shareState": {
"status": "OPENED",
+ "locked": True,
+ "currentUserGroupids": [1],
},
"state": {"value": "NOT_STARTED"},
},
@@ -734,7 +730,11 @@ def request_desc(self) -> str:
},
"tags": [],
"state": {
- "locked": {"value": False, "status": "CLOSED"},
+ "shareState": {
+ "status": "CLOSED",
+ "locked": False,
+ "currentUserGroupids": [],
+ },
"state": {"value": "NOT_STARTED"},
},
}
@@ -988,7 +988,11 @@ def request_desc(self) -> str:
"prjOwner": "user@company.com",
"tags": [22],
"state": {
- "locked": {"value": False, "status": "CLOSED"},
+ "shareState": {
+ "status": "CLOSED",
+ "locked": False,
+ "currentUserGroupids": [],
+ },
"state": {"value": "NOT_STARTED"},
},
}
diff --git a/packages/pytest-simcore/src/pytest_simcore/socketio.py b/packages/pytest-simcore/src/pytest_simcore/socketio.py
index fd1f21c24a8e..586e9d67e74c 100644
--- a/packages/pytest-simcore/src/pytest_simcore/socketio.py
+++ b/packages/pytest-simcore/src/pytest_simcore/socketio.py
@@ -70,7 +70,7 @@ async def web_server(
@pytest.fixture
async def server_url(web_server: URL) -> str:
- return f'{web_server.with_path("/")}'
+ return f"{web_server.with_path('/')}"
@pytest.fixture
diff --git a/packages/pytest-simcore/src/pytest_simcore/socketio_client.py b/packages/pytest-simcore/src/pytest_simcore/socketio_client.py
index 23b9ee0b190c..99b769112056 100644
--- a/packages/pytest-simcore/src/pytest_simcore/socketio_client.py
+++ b/packages/pytest-simcore/src/pytest_simcore/socketio_client.py
@@ -9,10 +9,12 @@
import pytest
import socketio
from aiohttp.test_utils import TestClient
-from pytest_simcore.helpers.assert_checks import assert_status
+from pytest_simcore.helpers.logging_tools import log_context
from servicelib.aiohttp import status
from yarl import URL
+from .helpers.assert_checks import assert_status
+
logger = logging.getLogger(__name__)
@@ -44,33 +46,30 @@ async def _create(client_override: TestClient | None = None) -> str:
assert data
assert not error
- return (
- resp.request_info.headers["Cookie"]
- if "Cookie" in resp.request_info.headers
- else ""
- )
+ return resp.request_info.headers.get("Cookie", "")
return _create
@pytest.fixture
-async def socketio_client_factory(
- socketio_url_factory: Callable,
- security_cookie_factory: Callable,
- client_session_id_factory: Callable,
+async def create_socketio_connection(
+ socketio_url_factory: Callable[[TestClient | None], str],
+ security_cookie_factory: Callable[[TestClient | None], Awaitable[str]],
+ client_session_id_factory: Callable[[], str],
) -> AsyncIterable[
- Callable[[str | None, TestClient | None], Awaitable[socketio.AsyncClient]]
+ Callable[
+ [str | None, TestClient | None], Awaitable[tuple[socketio.AsyncClient, str]]
+ ]
]:
clients: list[socketio.AsyncClient] = []
async def _connect(
client_session_id: str | None = None, client: TestClient | None = None
- ) -> socketio.AsyncClient:
+ ) -> tuple[socketio.AsyncClient, str]:
if client_session_id is None:
client_session_id = client_session_id_factory()
sio = socketio.AsyncClient(ssl_verify=False)
- # enginio 3.10.0 introduced ssl verification
assert client_session_id
url = str(
URL(socketio_url_factory(client)).with_query(
@@ -83,21 +82,27 @@ async def _connect(
# WARNING: engineio fails with empty cookies. Expects "key=value"
headers.update({"Cookie": cookie})
- print(f"--> Connecting socketio client to {url} ...")
- await sio.connect(url, headers=headers, wait_timeout=10)
- assert sio.sid
- print("... connection done")
+ with log_context(logging.INFO, f"socketio_client: connecting to {url}"):
+ print(f"--> Connecting socketio client to {url} ...")
+ sio.on(
+ "connect",
+ handler=lambda: logger.info("Connected successfully with %s", sio.sid),
+ )
+ sio.on(
+ "disconnect",
+ handler=lambda: logger.info("Disconnected from %s", sio.sid),
+ )
+ await sio.connect(url, headers=headers, wait_timeout=10)
+ assert sio.sid
clients.append(sio)
- return sio
+ return sio, client_session_id
yield _connect
# cleans up clients produce by _connect(*) calls
for sio in clients:
if sio.connected:
- print(f"<--Disconnecting socketio client {sio}")
- await sio.disconnect()
- await sio.wait()
- print(f"... disconnection from {sio} done.")
- assert not sio.connected
- assert not sio.sid
+ with log_context(logging.INFO, f"socketio_client: disconnecting {sio}"):
+ await sio.disconnect()
+ await sio.wait()
+ assert not sio.connected
diff --git a/packages/pytest-simcore/src/pytest_simcore/tracing.py b/packages/pytest-simcore/src/pytest_simcore/tracing.py
new file mode 100644
index 000000000000..e9bed749c2c8
--- /dev/null
+++ b/packages/pytest-simcore/src/pytest_simcore/tracing.py
@@ -0,0 +1,38 @@
+import pytest
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+from pytest_mock import MockerFixture
+
+
+@pytest.fixture
+async def setup_tracing_fastapi(
+ mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch
+) -> InMemorySpanExporter:
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ mocker.patch(
+ "servicelib.fastapi.tracing._create_span_processor", return_value=span_processor
+ )
+
+ monkeypatch.setenv(
+ "TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT", "http://opentelemetry-collector"
+ )
+ monkeypatch.setenv("TRACING_OPENTELEMETRY_COLLECTOR_PORT", "4318")
+ return memory_exporter
+
+
+@pytest.fixture
+async def setup_tracing_aiohttp(
+ mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch
+) -> InMemorySpanExporter:
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ mocker.patch(
+ "servicelib.aiohttp.tracing._create_span_processor", return_value=span_processor
+ )
+
+ monkeypatch.setenv(
+ "TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT", "http://opentelemetry-collector"
+ )
+ monkeypatch.setenv("TRACING_OPENTELEMETRY_COLLECTOR_PORT", "4318")
+ return memory_exporter
diff --git a/packages/pytest-simcore/tests/test_helpers_asserts_checks.py b/packages/pytest-simcore/tests/test_helpers_asserts_checks.py
new file mode 100644
index 000000000000..189d84f9f81e
--- /dev/null
+++ b/packages/pytest-simcore/tests/test_helpers_asserts_checks.py
@@ -0,0 +1,35 @@
+import pytest
+from pytest_simcore.helpers.assert_checks import assert_equal_ignoring_none
+
+
+@pytest.mark.parametrize(
+ "expected, actual",
+ [
+ ({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}),
+ ({"a": 1, "b": None}, {"a": 1, "b": 42}),
+ ({"a": {"x": 10, "y": None}}, {"a": {"x": 10, "y": 99}}),
+ ({"a": {"x": 10, "y": 20}}, {"a": {"x": 10, "y": 20, "z": 30}}),
+ ({}, {"foo": "bar"}),
+ ],
+)
+def test_assert_equal_ignoring_none_passes(expected, actual):
+ assert_equal_ignoring_none(expected, actual)
+
+
+@pytest.mark.parametrize(
+ "expected, actual, error_msg",
+ [
+ ({"a": 1, "b": 2}, {"a": 1}, "Missing key b"),
+ ({"a": 1, "b": 2}, {"a": 1, "b": 3}, "Mismatch in b: 3 != 2"),
+ (
+ {"a": {"x": 10, "y": 20}},
+ {"a": {"x": 10, "y": 99}},
+ "Mismatch in y: 99 != 20",
+ ),
+ ({"a": {"x": 10}}, {"a": {}}, "Missing key x"),
+ ],
+)
+def test_assert_equal_ignoring_none_fails(expected, actual, error_msg):
+ with pytest.raises(AssertionError) as exc_info:
+ assert_equal_ignoring_none(expected, actual)
+ assert error_msg in str(exc_info.value)
diff --git a/packages/pytest-simcore/uv.lock b/packages/pytest-simcore/uv.lock
deleted file mode 100644
index 57c794b678f1..000000000000
--- a/packages/pytest-simcore/uv.lock
+++ /dev/null
@@ -1,728 +0,0 @@
-version = 1
-revision = 1
-requires-python = ">=3.11"
-
-[[package]]
-name = "annotated-types"
-version = "0.7.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
-]
-
-[[package]]
-name = "anyio"
-version = "4.9.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "idna" },
- { name = "sniffio" },
- { name = "typing-extensions", marker = "python_full_version < '3.13'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916 },
-]
-
-[[package]]
-name = "bidict"
-version = "0.23.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764 },
-]
-
-[[package]]
-name = "certifi"
-version = "2025.1.31"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
-]
-
-[[package]]
-name = "click"
-version = "8.1.8"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 },
-]
-
-[[package]]
-name = "colorama"
-version = "0.4.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
-]
-
-[[package]]
-name = "dnspython"
-version = "2.7.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 },
-]
-
-[[package]]
-name = "email-validator"
-version = "2.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "dnspython" },
- { name = "idna" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521 },
-]
-
-[[package]]
-name = "fastapi"
-version = "0.115.12"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pydantic" },
- { name = "starlette" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/f4/55/ae499352d82338331ca1e28c7f4a63bfd09479b16395dce38cf50a39e2c2/fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681", size = 295236 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/50/b3/b51f09c2ba432a576fe63758bddc81f78f0c6309d9e5c10d194313bf021e/fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d", size = 95164 },
-]
-
-[package.optional-dependencies]
-standard = [
- { name = "email-validator" },
- { name = "fastapi-cli", extra = ["standard"] },
- { name = "httpx" },
- { name = "jinja2" },
- { name = "python-multipart" },
- { name = "uvicorn", extra = ["standard"] },
-]
-
-[[package]]
-name = "fastapi-cli"
-version = "0.0.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "rich-toolkit" },
- { name = "typer" },
- { name = "uvicorn", extra = ["standard"] },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fe/73/82a5831fbbf8ed75905bacf5b2d9d3dfd6f04d6968b29fe6f72a5ae9ceb1/fastapi_cli-0.0.7.tar.gz", hash = "sha256:02b3b65956f526412515907a0793c9094abd4bfb5457b389f645b0ea6ba3605e", size = 16753 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a1/e6/5daefc851b514ce2287d8f5d358ae4341089185f78f3217a69d0ce3a390c/fastapi_cli-0.0.7-py3-none-any.whl", hash = "sha256:d549368ff584b2804336c61f192d86ddea080c11255f375959627911944804f4", size = 10705 },
-]
-
-[package.optional-dependencies]
-standard = [
- { name = "uvicorn", extra = ["standard"] },
-]
-
-[[package]]
-name = "h11"
-version = "0.14.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 },
-]
-
-[[package]]
-name = "httpcore"
-version = "1.0.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "certifi" },
- { name = "h11" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 },
-]
-
-[[package]]
-name = "httptools"
-version = "0.6.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a7/9a/ce5e1f7e131522e6d3426e8e7a490b3a01f39a6696602e1c4f33f9e94277/httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c", size = 240639 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7b/26/bb526d4d14c2774fe07113ca1db7255737ffbb119315839af2065abfdac3/httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069", size = 199029 },
- { url = "https://files.pythonhosted.org/packages/a6/17/3e0d3e9b901c732987a45f4f94d4e2c62b89a041d93db89eafb262afd8d5/httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a", size = 103492 },
- { url = "https://files.pythonhosted.org/packages/b7/24/0fe235d7b69c42423c7698d086d4db96475f9b50b6ad26a718ef27a0bce6/httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975", size = 462891 },
- { url = "https://files.pythonhosted.org/packages/b1/2f/205d1f2a190b72da6ffb5f41a3736c26d6fa7871101212b15e9b5cd8f61d/httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636", size = 459788 },
- { url = "https://files.pythonhosted.org/packages/6e/4c/d09ce0eff09057a206a74575ae8f1e1e2f0364d20e2442224f9e6612c8b9/httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721", size = 433214 },
- { url = "https://files.pythonhosted.org/packages/3e/d2/84c9e23edbccc4a4c6f96a1b8d99dfd2350289e94f00e9ccc7aadde26fb5/httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988", size = 434120 },
- { url = "https://files.pythonhosted.org/packages/d0/46/4d8e7ba9581416de1c425b8264e2cadd201eb709ec1584c381f3e98f51c1/httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17", size = 88565 },
- { url = "https://files.pythonhosted.org/packages/bb/0e/d0b71465c66b9185f90a091ab36389a7352985fe857e352801c39d6127c8/httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2", size = 200683 },
- { url = "https://files.pythonhosted.org/packages/e2/b8/412a9bb28d0a8988de3296e01efa0bd62068b33856cdda47fe1b5e890954/httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44", size = 104337 },
- { url = "https://files.pythonhosted.org/packages/9b/01/6fb20be3196ffdc8eeec4e653bc2a275eca7f36634c86302242c4fbb2760/httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1", size = 508796 },
- { url = "https://files.pythonhosted.org/packages/f7/d8/b644c44acc1368938317d76ac991c9bba1166311880bcc0ac297cb9d6bd7/httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2", size = 510837 },
- { url = "https://files.pythonhosted.org/packages/52/d8/254d16a31d543073a0e57f1c329ca7378d8924e7e292eda72d0064987486/httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81", size = 485289 },
- { url = "https://files.pythonhosted.org/packages/5f/3c/4aee161b4b7a971660b8be71a92c24d6c64372c1ab3ae7f366b3680df20f/httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f", size = 489779 },
- { url = "https://files.pythonhosted.org/packages/12/b7/5cae71a8868e555f3f67a50ee7f673ce36eac970f029c0c5e9d584352961/httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970", size = 88634 },
- { url = "https://files.pythonhosted.org/packages/94/a3/9fe9ad23fd35f7de6b91eeb60848986058bd8b5a5c1e256f5860a160cc3e/httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660", size = 197214 },
- { url = "https://files.pythonhosted.org/packages/ea/d9/82d5e68bab783b632023f2fa31db20bebb4e89dfc4d2293945fd68484ee4/httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083", size = 102431 },
- { url = "https://files.pythonhosted.org/packages/96/c1/cb499655cbdbfb57b577734fde02f6fa0bbc3fe9fb4d87b742b512908dff/httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3", size = 473121 },
- { url = "https://files.pythonhosted.org/packages/af/71/ee32fd358f8a3bb199b03261f10921716990808a675d8160b5383487a317/httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071", size = 473805 },
- { url = "https://files.pythonhosted.org/packages/8a/0a/0d4df132bfca1507114198b766f1737d57580c9ad1cf93c1ff673e3387be/httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5", size = 448858 },
- { url = "https://files.pythonhosted.org/packages/1e/6a/787004fdef2cabea27bad1073bf6a33f2437b4dbd3b6fb4a9d71172b1c7c/httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0", size = 452042 },
- { url = "https://files.pythonhosted.org/packages/4d/dc/7decab5c404d1d2cdc1bb330b1bf70e83d6af0396fd4fc76fc60c0d522bf/httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8", size = 87682 },
-]
-
-[[package]]
-name = "httpx"
-version = "0.28.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
- { name = "certifi" },
- { name = "httpcore" },
- { name = "idna" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 },
-]
-
-[[package]]
-name = "idna"
-version = "3.10"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
-]
-
-[[package]]
-name = "jinja2"
-version = "3.1.6"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markupsafe" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 },
-]
-
-[[package]]
-name = "markdown-it-py"
-version = "3.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "mdurl" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
-]
-
-[[package]]
-name = "markupsafe"
-version = "3.0.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 },
- { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 },
- { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 },
- { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 },
- { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 },
- { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 },
- { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 },
- { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 },
- { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 },
- { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 },
- { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 },
- { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 },
- { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 },
- { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 },
- { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 },
- { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 },
- { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 },
- { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 },
- { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 },
- { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 },
- { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 },
- { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 },
- { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 },
- { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 },
- { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 },
- { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 },
- { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 },
- { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 },
- { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 },
- { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 },
- { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 },
- { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 },
- { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 },
- { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 },
- { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 },
- { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 },
- { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 },
- { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 },
- { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 },
- { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 },
-]
-
-[[package]]
-name = "mdurl"
-version = "0.1.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
-]
-
-[[package]]
-name = "pydantic"
-version = "2.11.3"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "annotated-types" },
- { name = "pydantic-core" },
- { name = "typing-extensions" },
- { name = "typing-inspection" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/10/2e/ca897f093ee6c5f3b0bee123ee4465c50e75431c3d5b6a3b44a47134e891/pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3", size = 785513 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/b0/1d/407b29780a289868ed696d1616f4aad49d6388e5a77f567dcd2629dcd7b8/pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f", size = 443591 },
-]
-
-[[package]]
-name = "pydantic-core"
-version = "2.33.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/17/19/ed6a078a5287aea7922de6841ef4c06157931622c89c2a47940837b5eecd/pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df", size = 434395 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d6/7f/c6298830cb780c46b4f46bb24298d01019ffa4d21769f39b908cd14bbd50/pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24", size = 2044224 },
- { url = "https://files.pythonhosted.org/packages/a8/65/6ab3a536776cad5343f625245bd38165d6663256ad43f3a200e5936afd6c/pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30", size = 1858845 },
- { url = "https://files.pythonhosted.org/packages/e9/15/9a22fd26ba5ee8c669d4b8c9c244238e940cd5d818649603ca81d1c69861/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595", size = 1910029 },
- { url = "https://files.pythonhosted.org/packages/d5/33/8cb1a62818974045086f55f604044bf35b9342900318f9a2a029a1bec460/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e", size = 1997784 },
- { url = "https://files.pythonhosted.org/packages/c0/ca/49958e4df7715c71773e1ea5be1c74544923d10319173264e6db122543f9/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a", size = 2141075 },
- { url = "https://files.pythonhosted.org/packages/7b/a6/0b3a167a9773c79ba834b959b4e18c3ae9216b8319bd8422792abc8a41b1/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505", size = 2745849 },
- { url = "https://files.pythonhosted.org/packages/0b/60/516484135173aa9e5861d7a0663dce82e4746d2e7f803627d8c25dfa5578/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f", size = 2005794 },
- { url = "https://files.pythonhosted.org/packages/86/70/05b1eb77459ad47de00cf78ee003016da0cedf8b9170260488d7c21e9181/pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77", size = 2123237 },
- { url = "https://files.pythonhosted.org/packages/c7/57/12667a1409c04ae7dc95d3b43158948eb0368e9c790be8b095cb60611459/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961", size = 2086351 },
- { url = "https://files.pythonhosted.org/packages/57/61/cc6d1d1c1664b58fdd6ecc64c84366c34ec9b606aeb66cafab6f4088974c/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1", size = 2258914 },
- { url = "https://files.pythonhosted.org/packages/d1/0a/edb137176a1f5419b2ddee8bde6a0a548cfa3c74f657f63e56232df8de88/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c", size = 2257385 },
- { url = "https://files.pythonhosted.org/packages/26/3c/48ca982d50e4b0e1d9954919c887bdc1c2b462801bf408613ccc641b3daa/pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896", size = 1923765 },
- { url = "https://files.pythonhosted.org/packages/33/cd/7ab70b99e5e21559f5de38a0928ea84e6f23fdef2b0d16a6feaf942b003c/pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83", size = 1950688 },
- { url = "https://files.pythonhosted.org/packages/4b/ae/db1fc237b82e2cacd379f63e3335748ab88b5adde98bf7544a1b1bd10a84/pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89", size = 1908185 },
- { url = "https://files.pythonhosted.org/packages/c8/ce/3cb22b07c29938f97ff5f5bb27521f95e2ebec399b882392deb68d6c440e/pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8", size = 2026640 },
- { url = "https://files.pythonhosted.org/packages/19/78/f381d643b12378fee782a72126ec5d793081ef03791c28a0fd542a5bee64/pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498", size = 1852649 },
- { url = "https://files.pythonhosted.org/packages/9d/2b/98a37b80b15aac9eb2c6cfc6dbd35e5058a352891c5cce3a8472d77665a6/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939", size = 1892472 },
- { url = "https://files.pythonhosted.org/packages/4e/d4/3c59514e0f55a161004792b9ff3039da52448f43f5834f905abef9db6e4a/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d", size = 1977509 },
- { url = "https://files.pythonhosted.org/packages/a9/b6/c2c7946ef70576f79a25db59a576bce088bdc5952d1b93c9789b091df716/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e", size = 2128702 },
- { url = "https://files.pythonhosted.org/packages/88/fe/65a880f81e3f2a974312b61f82a03d85528f89a010ce21ad92f109d94deb/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3", size = 2679428 },
- { url = "https://files.pythonhosted.org/packages/6f/ff/4459e4146afd0462fb483bb98aa2436d69c484737feaceba1341615fb0ac/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d", size = 2008753 },
- { url = "https://files.pythonhosted.org/packages/7c/76/1c42e384e8d78452ededac8b583fe2550c84abfef83a0552e0e7478ccbc3/pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b", size = 2114849 },
- { url = "https://files.pythonhosted.org/packages/00/72/7d0cf05095c15f7ffe0eb78914b166d591c0eed72f294da68378da205101/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39", size = 2069541 },
- { url = "https://files.pythonhosted.org/packages/b3/69/94a514066bb7d8be499aa764926937409d2389c09be0b5107a970286ef81/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a", size = 2239225 },
- { url = "https://files.pythonhosted.org/packages/84/b0/e390071eadb44b41f4f54c3cef64d8bf5f9612c92686c9299eaa09e267e2/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db", size = 2248373 },
- { url = "https://files.pythonhosted.org/packages/d6/b2/288b3579ffc07e92af66e2f1a11be3b056fe1214aab314748461f21a31c3/pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda", size = 1907034 },
- { url = "https://files.pythonhosted.org/packages/02/28/58442ad1c22b5b6742b992ba9518420235adced665513868f99a1c2638a5/pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4", size = 1956848 },
- { url = "https://files.pythonhosted.org/packages/a1/eb/f54809b51c7e2a1d9f439f158b8dd94359321abcc98767e16fc48ae5a77e/pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea", size = 1903986 },
- { url = "https://files.pythonhosted.org/packages/7a/24/eed3466a4308d79155f1cdd5c7432c80ddcc4530ba8623b79d5ced021641/pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a", size = 2033551 },
- { url = "https://files.pythonhosted.org/packages/ab/14/df54b1a0bc9b6ded9b758b73139d2c11b4e8eb43e8ab9c5847c0a2913ada/pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266", size = 1852785 },
- { url = "https://files.pythonhosted.org/packages/fa/96/e275f15ff3d34bb04b0125d9bc8848bf69f25d784d92a63676112451bfb9/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3", size = 1897758 },
- { url = "https://files.pythonhosted.org/packages/b7/d8/96bc536e975b69e3a924b507d2a19aedbf50b24e08c80fb00e35f9baaed8/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a", size = 1986109 },
- { url = "https://files.pythonhosted.org/packages/90/72/ab58e43ce7e900b88cb571ed057b2fcd0e95b708a2e0bed475b10130393e/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516", size = 2129159 },
- { url = "https://files.pythonhosted.org/packages/dc/3f/52d85781406886c6870ac995ec0ba7ccc028b530b0798c9080531b409fdb/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764", size = 2680222 },
- { url = "https://files.pythonhosted.org/packages/f4/56/6e2ef42f363a0eec0fd92f74a91e0ac48cd2e49b695aac1509ad81eee86a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d", size = 2006980 },
- { url = "https://files.pythonhosted.org/packages/4c/c0/604536c4379cc78359f9ee0aa319f4aedf6b652ec2854953f5a14fc38c5a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4", size = 2120840 },
- { url = "https://files.pythonhosted.org/packages/1f/46/9eb764814f508f0edfb291a0f75d10854d78113fa13900ce13729aaec3ae/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde", size = 2072518 },
- { url = "https://files.pythonhosted.org/packages/42/e3/fb6b2a732b82d1666fa6bf53e3627867ea3131c5f39f98ce92141e3e3dc1/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e", size = 2248025 },
- { url = "https://files.pythonhosted.org/packages/5c/9d/fbe8fe9d1aa4dac88723f10a921bc7418bd3378a567cb5e21193a3c48b43/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd", size = 2254991 },
- { url = "https://files.pythonhosted.org/packages/aa/99/07e2237b8a66438d9b26482332cda99a9acccb58d284af7bc7c946a42fd3/pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f", size = 1915262 },
- { url = "https://files.pythonhosted.org/packages/8a/f4/e457a7849beeed1e5defbcf5051c6f7b3c91a0624dd31543a64fc9adcf52/pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40", size = 1956626 },
- { url = "https://files.pythonhosted.org/packages/20/d0/e8d567a7cff7b04e017ae164d98011f1e1894269fe8e90ea187a3cbfb562/pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523", size = 1909590 },
- { url = "https://files.pythonhosted.org/packages/ef/fd/24ea4302d7a527d672c5be06e17df16aabfb4e9fdc6e0b345c21580f3d2a/pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d", size = 1812963 },
- { url = "https://files.pythonhosted.org/packages/5f/95/4fbc2ecdeb5c1c53f1175a32d870250194eb2fdf6291b795ab08c8646d5d/pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c", size = 1986896 },
- { url = "https://files.pythonhosted.org/packages/71/ae/fe31e7f4a62431222d8f65a3bd02e3fa7e6026d154a00818e6d30520ea77/pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18", size = 1931810 },
- { url = "https://files.pythonhosted.org/packages/0b/76/1794e440c1801ed35415238d2c728f26cd12695df9057154ad768b7b991c/pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a", size = 2042858 },
- { url = "https://files.pythonhosted.org/packages/73/b4/9cd7b081fb0b1b4f8150507cd59d27b275c3e22ad60b35cb19ea0977d9b9/pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc", size = 1873745 },
- { url = "https://files.pythonhosted.org/packages/e1/d7/9ddb7575d4321e40d0363903c2576c8c0c3280ebea137777e5ab58d723e3/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b", size = 1904188 },
- { url = "https://files.pythonhosted.org/packages/d1/a8/3194ccfe461bb08da19377ebec8cb4f13c9bd82e13baebc53c5c7c39a029/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe", size = 2083479 },
- { url = "https://files.pythonhosted.org/packages/42/c7/84cb569555d7179ca0b3f838cef08f66f7089b54432f5b8599aac6e9533e/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5", size = 2118415 },
- { url = "https://files.pythonhosted.org/packages/3b/67/72abb8c73e0837716afbb58a59cc9e3ae43d1aa8677f3b4bc72c16142716/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761", size = 2079623 },
- { url = "https://files.pythonhosted.org/packages/0b/cd/c59707e35a47ba4cbbf153c3f7c56420c58653b5801b055dc52cccc8e2dc/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850", size = 2250175 },
- { url = "https://files.pythonhosted.org/packages/84/32/e4325a6676b0bed32d5b084566ec86ed7fd1e9bcbfc49c578b1755bde920/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544", size = 2254674 },
- { url = "https://files.pythonhosted.org/packages/12/6f/5596dc418f2e292ffc661d21931ab34591952e2843e7168ea5a52591f6ff/pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5", size = 2080951 },
-]
-
-[[package]]
-name = "pygments"
-version = "2.19.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 },
-]
-
-[[package]]
-name = "pytest-simcore"
-version = "0.1.0"
-source = { virtual = "." }
-dependencies = [
- { name = "fastapi", extra = ["standard"] },
- { name = "python-socketio" },
- { name = "uvicorn" },
-]
-
-[package.metadata]
-requires-dist = [
- { name = "fastapi", extras = ["standard"], specifier = ">=0.115.12" },
- { name = "python-socketio", specifier = ">=5.12.1" },
- { name = "uvicorn", specifier = ">=0.34.0" },
-]
-
-[[package]]
-name = "python-dotenv"
-version = "1.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256 },
-]
-
-[[package]]
-name = "python-engineio"
-version = "4.11.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "simple-websocket" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/52/e0/a9e0fe427ce7f1b7dbf9531fa00ffe4b557c4a7bc8e71891c115af123170/python_engineio-4.11.2.tar.gz", hash = "sha256:145bb0daceb904b4bb2d3eb2d93f7dbb7bb87a6a0c4f20a94cc8654dec977129", size = 91381 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/07/8f/978a0b913e3f8ad33a9a2fe204d32efe3d1ee34ecb1f2829c1cfbdd92082/python_engineio-4.11.2-py3-none-any.whl", hash = "sha256:f0971ac4c65accc489154fe12efd88f53ca8caf04754c46a66e85f5102ef22ad", size = 59239 },
-]
-
-[[package]]
-name = "python-multipart"
-version = "0.0.20"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546 },
-]
-
-[[package]]
-name = "python-socketio"
-version = "5.12.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "bidict" },
- { name = "python-engineio" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/ce/d0/40ed38076e8aee94785d546d3e3a1cae393da5806a8530be877187e2875f/python_socketio-5.12.1.tar.gz", hash = "sha256:0299ff1f470b676c09c1bfab1dead25405077d227b2c13cf217a34dadc68ba9c", size = 119991 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8a/a3/c69806f30dd81df5a99d592e7db4c930c3a9b098555aa97b0eb866b20b11/python_socketio-5.12.1-py3-none-any.whl", hash = "sha256:24a0ea7cfff0e021eb28c68edbf7914ee4111bdf030b95e4d250c4dc9af7a386", size = 76947 },
-]
-
-[[package]]
-name = "pyyaml"
-version = "6.0.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 },
- { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 },
- { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 },
- { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 },
- { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 },
- { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 },
- { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 },
- { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 },
- { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 },
- { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 },
- { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 },
- { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 },
- { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 },
- { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 },
- { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 },
- { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 },
- { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 },
- { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 },
- { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 },
- { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 },
- { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 },
- { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 },
- { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 },
- { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 },
- { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 },
- { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 },
- { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 },
-]
-
-[[package]]
-name = "rich"
-version = "14.0.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markdown-it-py" },
- { name = "pygments" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229 },
-]
-
-[[package]]
-name = "rich-toolkit"
-version = "0.14.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "rich" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/2e/ea/13945d58d556a28dfb0f774ad5c8af759527390e59505a40d164bf8ce1ce/rich_toolkit-0.14.1.tar.gz", hash = "sha256:9248e2d087bfc01f3e4c5c8987e05f7fa744d00dd22fa2be3aa6e50255790b3f", size = 104416 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/66/e8/61c5b12d1567fdba41a6775db12a090d88b8305424ee7c47259c70d33cb4/rich_toolkit-0.14.1-py3-none-any.whl", hash = "sha256:dc92c0117d752446d04fdc828dbca5873bcded213a091a5d3742a2beec2e6559", size = 24177 },
-]
-
-[[package]]
-name = "shellingham"
-version = "1.5.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 },
-]
-
-[[package]]
-name = "simple-websocket"
-version = "1.1.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "wsproto" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b0/d4/bfa032f961103eba93de583b161f0e6a5b63cebb8f2c7d0c6e6efe1e3d2e/simple_websocket-1.1.0.tar.gz", hash = "sha256:7939234e7aa067c534abdab3a9ed933ec9ce4691b0713c78acb195560aa52ae4", size = 17300 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/52/59/0782e51887ac6b07ffd1570e0364cf901ebc36345fea669969d2084baebb/simple_websocket-1.1.0-py3-none-any.whl", hash = "sha256:4af6069630a38ed6c561010f0e11a5bc0d4ca569b36306eb257cd9a192497c8c", size = 13842 },
-]
-
-[[package]]
-name = "sniffio"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
-]
-
-[[package]]
-name = "starlette"
-version = "0.46.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/04/1b/52b27f2e13ceedc79a908e29eac426a63465a1a01248e5f24aa36a62aeb3/starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230", size = 2580102 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 },
-]
-
-[[package]]
-name = "typer"
-version = "0.15.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "rich" },
- { name = "shellingham" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/8b/6f/3991f0f1c7fcb2df31aef28e0594d8d54b05393a0e4e34c65e475c2a5d41/typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5", size = 100711 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/7f/fc/5b29fea8cee020515ca82cc68e3b8e1e34bb19a3535ad854cac9257b414c/typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc", size = 45061 },
-]
-
-[[package]]
-name = "typing-extensions"
-version = "4.13.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 },
-]
-
-[[package]]
-name = "typing-inspection"
-version = "0.4.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 },
-]
-
-[[package]]
-name = "uvicorn"
-version = "0.34.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "h11" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 },
-]
-
-[package.optional-dependencies]
-standard = [
- { name = "colorama", marker = "sys_platform == 'win32'" },
- { name = "httptools" },
- { name = "python-dotenv" },
- { name = "pyyaml" },
- { name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" },
- { name = "watchfiles" },
- { name = "websockets" },
-]
-
-[[package]]
-name = "uvloop"
-version = "0.21.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/af/c0/854216d09d33c543f12a44b393c402e89a920b1a0a7dc634c42de91b9cf6/uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3", size = 2492741 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/57/a7/4cf0334105c1160dd6819f3297f8700fda7fc30ab4f61fbf3e725acbc7cc/uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8", size = 1447410 },
- { url = "https://files.pythonhosted.org/packages/8c/7c/1517b0bbc2dbe784b563d6ab54f2ef88c890fdad77232c98ed490aa07132/uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0", size = 805476 },
- { url = "https://files.pythonhosted.org/packages/ee/ea/0bfae1aceb82a503f358d8d2fa126ca9dbdb2ba9c7866974faec1cb5875c/uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e", size = 3960855 },
- { url = "https://files.pythonhosted.org/packages/8a/ca/0864176a649838b838f36d44bf31c451597ab363b60dc9e09c9630619d41/uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb", size = 3973185 },
- { url = "https://files.pythonhosted.org/packages/30/bf/08ad29979a936d63787ba47a540de2132169f140d54aa25bc8c3df3e67f4/uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6", size = 3820256 },
- { url = "https://files.pythonhosted.org/packages/da/e2/5cf6ef37e3daf2f06e651aae5ea108ad30df3cb269102678b61ebf1fdf42/uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d", size = 3937323 },
- { url = "https://files.pythonhosted.org/packages/8c/4c/03f93178830dc7ce8b4cdee1d36770d2f5ebb6f3d37d354e061eefc73545/uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c", size = 1471284 },
- { url = "https://files.pythonhosted.org/packages/43/3e/92c03f4d05e50f09251bd8b2b2b584a2a7f8fe600008bcc4523337abe676/uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2", size = 821349 },
- { url = "https://files.pythonhosted.org/packages/a6/ef/a02ec5da49909dbbfb1fd205a9a1ac4e88ea92dcae885e7c961847cd51e2/uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d", size = 4580089 },
- { url = "https://files.pythonhosted.org/packages/06/a7/b4e6a19925c900be9f98bec0a75e6e8f79bb53bdeb891916609ab3958967/uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc", size = 4693770 },
- { url = "https://files.pythonhosted.org/packages/ce/0c/f07435a18a4b94ce6bd0677d8319cd3de61f3a9eeb1e5f8ab4e8b5edfcb3/uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb", size = 4451321 },
- { url = "https://files.pythonhosted.org/packages/8f/eb/f7032be105877bcf924709c97b1bf3b90255b4ec251f9340cef912559f28/uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f", size = 4659022 },
- { url = "https://files.pythonhosted.org/packages/3f/8d/2cbef610ca21539f0f36e2b34da49302029e7c9f09acef0b1c3b5839412b/uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281", size = 1468123 },
- { url = "https://files.pythonhosted.org/packages/93/0d/b0038d5a469f94ed8f2b2fce2434a18396d8fbfb5da85a0a9781ebbdec14/uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af", size = 819325 },
- { url = "https://files.pythonhosted.org/packages/50/94/0a687f39e78c4c1e02e3272c6b2ccdb4e0085fda3b8352fecd0410ccf915/uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6", size = 4582806 },
- { url = "https://files.pythonhosted.org/packages/d2/19/f5b78616566ea68edd42aacaf645adbf71fbd83fc52281fba555dc27e3f1/uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816", size = 4701068 },
- { url = "https://files.pythonhosted.org/packages/47/57/66f061ee118f413cd22a656de622925097170b9380b30091b78ea0c6ea75/uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc", size = 4454428 },
- { url = "https://files.pythonhosted.org/packages/63/9a/0962b05b308494e3202d3f794a6e85abe471fe3cafdbcf95c2e8c713aabd/uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553", size = 4660018 },
-]
-
-[[package]]
-name = "watchfiles"
-version = "1.0.5"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "anyio" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/03/e2/8ed598c42057de7aa5d97c472254af4906ff0a59a66699d426fc9ef795d7/watchfiles-1.0.5.tar.gz", hash = "sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9", size = 94537 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/39/f4/41b591f59021786ef517e1cdc3b510383551846703e03f204827854a96f8/watchfiles-1.0.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827", size = 405336 },
- { url = "https://files.pythonhosted.org/packages/ae/06/93789c135be4d6d0e4f63e96eea56dc54050b243eacc28439a26482b5235/watchfiles-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4", size = 395977 },
- { url = "https://files.pythonhosted.org/packages/d2/db/1cd89bd83728ca37054512d4d35ab69b5f12b8aa2ac9be3b0276b3bf06cc/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d", size = 455232 },
- { url = "https://files.pythonhosted.org/packages/40/90/d8a4d44ffe960517e487c9c04f77b06b8abf05eb680bed71c82b5f2cad62/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63", size = 459151 },
- { url = "https://files.pythonhosted.org/packages/6c/da/267a1546f26465dead1719caaba3ce660657f83c9d9c052ba98fb8856e13/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418", size = 489054 },
- { url = "https://files.pythonhosted.org/packages/b1/31/33850dfd5c6efb6f27d2465cc4c6b27c5a6f5ed53c6fa63b7263cf5f60f6/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9", size = 523955 },
- { url = "https://files.pythonhosted.org/packages/09/84/b7d7b67856efb183a421f1416b44ca975cb2ea6c4544827955dfb01f7dc2/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6", size = 502234 },
- { url = "https://files.pythonhosted.org/packages/71/87/6dc5ec6882a2254cfdd8b0718b684504e737273903b65d7338efaba08b52/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25", size = 454750 },
- { url = "https://files.pythonhosted.org/packages/3d/6c/3786c50213451a0ad15170d091570d4a6554976cf0df19878002fc96075a/watchfiles-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5", size = 631591 },
- { url = "https://files.pythonhosted.org/packages/1b/b3/1427425ade4e359a0deacce01a47a26024b2ccdb53098f9d64d497f6684c/watchfiles-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01", size = 625370 },
- { url = "https://files.pythonhosted.org/packages/15/ba/f60e053b0b5b8145d682672024aa91370a29c5c921a88977eb565de34086/watchfiles-1.0.5-cp311-cp311-win32.whl", hash = "sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246", size = 277791 },
- { url = "https://files.pythonhosted.org/packages/50/ed/7603c4e164225c12c0d4e8700b64bb00e01a6c4eeea372292a3856be33a4/watchfiles-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096", size = 291622 },
- { url = "https://files.pythonhosted.org/packages/a2/c2/99bb7c96b4450e36877fde33690ded286ff555b5a5c1d925855d556968a1/watchfiles-1.0.5-cp311-cp311-win_arm64.whl", hash = "sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed", size = 283699 },
- { url = "https://files.pythonhosted.org/packages/2a/8c/4f0b9bdb75a1bfbd9c78fad7d8854369283f74fe7cf03eb16be77054536d/watchfiles-1.0.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2", size = 401511 },
- { url = "https://files.pythonhosted.org/packages/dc/4e/7e15825def77f8bd359b6d3f379f0c9dac4eb09dd4ddd58fd7d14127179c/watchfiles-1.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f", size = 392715 },
- { url = "https://files.pythonhosted.org/packages/58/65/b72fb817518728e08de5840d5d38571466c1b4a3f724d190cec909ee6f3f/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec", size = 454138 },
- { url = "https://files.pythonhosted.org/packages/3e/a4/86833fd2ea2e50ae28989f5950b5c3f91022d67092bfec08f8300d8b347b/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21", size = 458592 },
- { url = "https://files.pythonhosted.org/packages/38/7e/42cb8df8be9a37e50dd3a818816501cf7a20d635d76d6bd65aae3dbbff68/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512", size = 487532 },
- { url = "https://files.pythonhosted.org/packages/fc/fd/13d26721c85d7f3df6169d8b495fcac8ab0dc8f0945ebea8845de4681dab/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d", size = 522865 },
- { url = "https://files.pythonhosted.org/packages/a1/0d/7f9ae243c04e96c5455d111e21b09087d0eeaf9a1369e13a01c7d3d82478/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6", size = 499887 },
- { url = "https://files.pythonhosted.org/packages/8e/0f/a257766998e26aca4b3acf2ae97dff04b57071e991a510857d3799247c67/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234", size = 454498 },
- { url = "https://files.pythonhosted.org/packages/81/79/8bf142575a03e0af9c3d5f8bcae911ee6683ae93a625d349d4ecf4c8f7df/watchfiles-1.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2", size = 630663 },
- { url = "https://files.pythonhosted.org/packages/f1/80/abe2e79f610e45c63a70d271caea90c49bbf93eb00fa947fa9b803a1d51f/watchfiles-1.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663", size = 625410 },
- { url = "https://files.pythonhosted.org/packages/91/6f/bc7fbecb84a41a9069c2c6eb6319f7f7df113adf113e358c57fc1aff7ff5/watchfiles-1.0.5-cp312-cp312-win32.whl", hash = "sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249", size = 277965 },
- { url = "https://files.pythonhosted.org/packages/99/a5/bf1c297ea6649ec59e935ab311f63d8af5faa8f0b86993e3282b984263e3/watchfiles-1.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705", size = 291693 },
- { url = "https://files.pythonhosted.org/packages/7f/7b/fd01087cc21db5c47e5beae507b87965db341cce8a86f9eb12bf5219d4e0/watchfiles-1.0.5-cp312-cp312-win_arm64.whl", hash = "sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417", size = 283287 },
- { url = "https://files.pythonhosted.org/packages/c7/62/435766874b704f39b2fecd8395a29042db2b5ec4005bd34523415e9bd2e0/watchfiles-1.0.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d", size = 401531 },
- { url = "https://files.pythonhosted.org/packages/6e/a6/e52a02c05411b9cb02823e6797ef9bbba0bfaf1bb627da1634d44d8af833/watchfiles-1.0.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763", size = 392417 },
- { url = "https://files.pythonhosted.org/packages/3f/53/c4af6819770455932144e0109d4854437769672d7ad897e76e8e1673435d/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40", size = 453423 },
- { url = "https://files.pythonhosted.org/packages/cb/d1/8e88df58bbbf819b8bc5cfbacd3c79e01b40261cad0fc84d1e1ebd778a07/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563", size = 458185 },
- { url = "https://files.pythonhosted.org/packages/ff/70/fffaa11962dd5429e47e478a18736d4e42bec42404f5ee3b92ef1b87ad60/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04", size = 486696 },
- { url = "https://files.pythonhosted.org/packages/39/db/723c0328e8b3692d53eb273797d9a08be6ffb1d16f1c0ba2bdbdc2a3852c/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f", size = 522327 },
- { url = "https://files.pythonhosted.org/packages/cd/05/9fccc43c50c39a76b68343484b9da7b12d42d0859c37c61aec018c967a32/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a", size = 499741 },
- { url = "https://files.pythonhosted.org/packages/23/14/499e90c37fa518976782b10a18b18db9f55ea73ca14641615056f8194bb3/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827", size = 453995 },
- { url = "https://files.pythonhosted.org/packages/61/d9/f75d6840059320df5adecd2c687fbc18960a7f97b55c300d20f207d48aef/watchfiles-1.0.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a", size = 629693 },
- { url = "https://files.pythonhosted.org/packages/fc/17/180ca383f5061b61406477218c55d66ec118e6c0c51f02d8142895fcf0a9/watchfiles-1.0.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936", size = 624677 },
- { url = "https://files.pythonhosted.org/packages/bf/15/714d6ef307f803f236d69ee9d421763707899d6298d9f3183e55e366d9af/watchfiles-1.0.5-cp313-cp313-win32.whl", hash = "sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc", size = 277804 },
- { url = "https://files.pythonhosted.org/packages/a8/b4/c57b99518fadf431f3ef47a610839e46e5f8abf9814f969859d1c65c02c7/watchfiles-1.0.5-cp313-cp313-win_amd64.whl", hash = "sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11", size = 291087 },
-]
-
-[[package]]
-name = "websockets"
-version = "15.0.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423 },
- { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082 },
- { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330 },
- { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878 },
- { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883 },
- { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252 },
- { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521 },
- { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958 },
- { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918 },
- { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388 },
- { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828 },
- { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437 },
- { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096 },
- { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332 },
- { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152 },
- { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096 },
- { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523 },
- { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790 },
- { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165 },
- { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160 },
- { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395 },
- { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841 },
- { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440 },
- { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098 },
- { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329 },
- { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111 },
- { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054 },
- { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496 },
- { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829 },
- { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217 },
- { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195 },
- { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393 },
- { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837 },
- { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 },
-]
-
-[[package]]
-name = "wsproto"
-version = "1.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "h11" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c9/4a/44d3c295350d776427904d73c189e10aeae66d7f555bb2feee16d1e4ba5a/wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", size = 53425 }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/78/58/e860788190eba3bcce367f74d29c4675466ce8dddfba85f7827588416f01/wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736", size = 24226 },
-]
diff --git a/packages/service-integration/Dockerfile b/packages/service-integration/Dockerfile
index 8a6d6a854c83..39e6841602c2 100644
--- a/packages/service-integration/Dockerfile
+++ b/packages/service-integration/Dockerfile
@@ -2,7 +2,7 @@
# Define arguments in the global scope
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64
@@ -50,7 +50,8 @@ ENV LANG=C.UTF-8
ENV PYTHONDONTWRITEBYTECODE=1 \
VIRTUAL_ENV=/home/scu/.venv
# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
-ENV UV_COMPILE_BYTECODE=1
+ENV UV_COMPILE_BYTECODE=1 \
+ UV_LINK_MODE=copy
# Ensures that the python and pip executables used
# in the image will be those from our virtualenv.
@@ -74,13 +75,6 @@ COPY --from=uv_build /uv /uvx /bin/
RUN uv venv "${VIRTUAL_ENV}"
-
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- pip~=24.0 \
- wheel \
- setuptools
-
WORKDIR /build/packages/service-integration
RUN \
diff --git a/packages/service-integration/requirements/_base.in b/packages/service-integration/requirements/_base.in
index 213a27f4c131..899bf3ec23bf 100644
--- a/packages/service-integration/requirements/_base.in
+++ b/packages/service-integration/requirements/_base.in
@@ -13,5 +13,5 @@ jinja2_time
jsonschema # pytest-plugin
pytest # pytest-plugin
pyyaml
-typer[all]
+typer
yarl
diff --git a/packages/service-integration/requirements/_base.txt b/packages/service-integration/requirements/_base.txt
index 332dcc970010..e76fb651293c 100644
--- a/packages/service-integration/requirements/_base.txt
+++ b/packages/service-integration/requirements/_base.txt
@@ -22,7 +22,7 @@ chardet==5.2.0
# via binaryornot
charset-normalizer==3.4.1
# via requests
-click==8.1.8
+click==8.2.1
# via
# -r requirements/_base.in
# cookiecutter
@@ -42,7 +42,7 @@ idna==3.10
# yarl
iniconfig==2.0.0
# via pytest
-jinja2==3.1.5
+jinja2==3.1.6
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -52,6 +52,8 @@ jinja2==3.1.5
# jinja2-time
jinja2-time==0.2.0
# via -r requirements/_base.in
+jsonref==1.1.0
+ # via -r requirements/../../../packages/models-library/requirements/_base.in
jsonschema==4.23.0
# via
# -r requirements/../../../packages/models-library/requirements/_base.in
@@ -81,7 +83,7 @@ pluggy==1.5.0
# via pytest
propcache==0.3.0
# via yarl
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -92,9 +94,9 @@ pydantic==2.10.6
# -r requirements/../../../packages/models-library/requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in
@@ -107,8 +109,10 @@ pydantic-settings==2.7.0
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/models-library/requirements/_base.in
pygments==2.19.1
- # via rich
-pytest==8.3.5
+ # via
+ # pytest
+ # rich
+pytest==8.4.1
# via -r requirements/_base.in
python-dateutil==2.9.0.post0
# via arrow
@@ -132,11 +136,11 @@ referencing==0.35.1
# -c requirements/../../../requirements/constraints.txt
# jsonschema
# jsonschema-specifications
-requests==2.32.3
+requests==2.32.4
# via
# cookiecutter
# docker
-rich==13.9.4
+rich==14.1.0
# via
# cookiecutter
# typer
@@ -150,17 +154,20 @@ six==1.17.0
# via python-dateutil
text-unidecode==1.3
# via python-slugify
-typer==0.15.2
+typer==0.16.1
# via -r requirements/_base.in
types-python-dateutil==2.9.0.20241206
# via arrow
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# pydantic
# pydantic-core
# pydantic-extra-types
# typer
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+urllib3==2.5.0
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
diff --git a/packages/service-integration/requirements/_test.txt b/packages/service-integration/requirements/_test.txt
index 19f48613efac..0074581d905b 100644
--- a/packages/service-integration/requirements/_test.txt
+++ b/packages/service-integration/requirements/_test.txt
@@ -19,14 +19,19 @@ pluggy==1.5.0
# via
# -c requirements/_base.txt
# pytest
-pytest==8.3.5
+ # pytest-cov
+pygments==2.19.1
+ # via
+ # -c requirements/_base.txt
+ # pytest
+pytest==8.4.1
# via
# -c requirements/_base.txt
# -r requirements/_test.in
# pytest-cov
# pytest-instafail
# pytest-sugar
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
pytest-instafail==0.5.0
# via -r requirements/_test.in
@@ -53,7 +58,7 @@ types-pyyaml==6.0.12.20241230
# via -r requirements/_test.in
types-requests==2.32.0.20250301
# via types-docker
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
diff --git a/packages/service-integration/requirements/_tools.txt b/packages/service-integration/requirements/_tools.txt
index 3b1673839385..5e646f82bd2b 100644
--- a/packages/service-integration/requirements/_tools.txt
+++ b/packages/service-integration/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# black
@@ -27,9 +27,9 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# black
# mypy
@@ -42,7 +42,9 @@ packaging==24.2
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # black
+ # mypy
pip==25.0.1
# via pip-tools
pip-tools==7.4.1
@@ -67,11 +69,11 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# mypy
diff --git a/packages/service-integration/src/service_integration/__init__.py b/packages/service-integration/src/service_integration/__init__.py
index 4f56c57d703b..78352fb23e83 100644
--- a/packages/service-integration/src/service_integration/__init__.py
+++ b/packages/service-integration/src/service_integration/__init__.py
@@ -1,5 +1,3 @@
-""" Library to facilitate the integration of user services running in osparc-simcore
-
-"""
+"""Library to facilitate the integration of user services running in osparc-simcore"""
from ._meta import __version__
diff --git a/packages/service-integration/src/service_integration/cli/__init__.py b/packages/service-integration/src/service_integration/cli/__init__.py
index a146de5735dd..6e8b5c6343e5 100644
--- a/packages/service-integration/src/service_integration/cli/__init__.py
+++ b/packages/service-integration/src/service_integration/cli/__init__.py
@@ -7,7 +7,7 @@
from .._meta import __version__
from ..settings import AppSettings
-from . import _compose_spec, _metadata, _run_creator, _test
+from . import _compose_spec, _escaping, _metadata, _run_creator, _test
from ._config import config_app
app = typer.Typer()
@@ -72,6 +72,7 @@ def main(
app.command("compose")(_compose_spec.create_compose)
app.add_typer(config_app, name="config", help="Manage osparc config files")
app.command("test")(_test.run_tests)
+app.command("legacy-escape")(_escaping.legacy_escape)
# legacy
app.command("bump-version")(_metadata.bump_version)
app.command("get-version")(_metadata.get_version)
diff --git a/packages/service-integration/src/service_integration/cli/_escaping.py b/packages/service-integration/src/service_integration/cli/_escaping.py
new file mode 100644
index 000000000000..2953911554cc
--- /dev/null
+++ b/packages/service-integration/src/service_integration/cli/_escaping.py
@@ -0,0 +1,44 @@
+import re
+from pathlib import Path
+from typing import Annotated
+
+import typer
+
+from ..osparc_config import OSPARC_CONFIG_DIRNAME
+
+
+def escape_dollar_brace(text: str) -> str:
+ # the pattern finds '$${' that is not preceded by another '$'.
+ pattern = r"(? str:
+ assert result.exception
+ tb_message = "\n".join(traceback.format_tb(result.exception.__traceback__))
+ return f"Below exception was raised by the cli:\n{tb_message}"
+
+
def test_cli_help(run_program_with_args: Callable):
result = run_program_with_args(
"--help",
)
- assert result.exit_code == 0
+ assert result.exit_code == os.EX_OK, _format_cli_error(result)
def test_cli_version(run_program_with_args: Callable):
result = run_program_with_args(
"--version",
)
- assert result.exit_code == 0
+ assert result.exit_code == os.EX_OK, _format_cli_error(result)
assert __version__ == result.output.strip()
+
+
+@pytest.fixture
+def copy_tests_data_dir(tests_data_dir: Path, tmp_path: Path) -> Path:
+ new_dir_path = tmp_path / "copy_tests_data_dir"
+ new_dir_path.mkdir(exist_ok=True, parents=True)
+
+ for item in tests_data_dir.glob("*"):
+ print(f"Copying {item} to {new_dir_path / item.name}")
+ shutil.copy2(item, new_dir_path / item.name)
+
+ return new_dir_path
+
+
+def test_cli_legacy_escape(copy_tests_data_dir: Path, run_program_with_args: Callable):
+ result = run_program_with_args(
+ "legacy-escape", "--osparc-config-dirname", copy_tests_data_dir
+ )
+ assert result.exit_code == os.EX_OK, _format_cli_error(result)
+ # NOTE only 1 file will have a sequence that will be escaped
+ assert (
+ f"Escaped sequence in {copy_tests_data_dir}/docker-compose-meta.yml"
+ in result.output.strip()
+ )
diff --git a/packages/service-integration/tests/test_cli__escaping.py b/packages/service-integration/tests/test_cli__escaping.py
new file mode 100644
index 000000000000..e33463b4dcd0
--- /dev/null
+++ b/packages/service-integration/tests/test_cli__escaping.py
@@ -0,0 +1,20 @@
+import pytest
+from service_integration.cli._escaping import escape_dollar_brace
+
+
+@pytest.mark.parametrize(
+ "to_escape, escaped",
+ [
+ ("some text", "some text"),
+ ("$${escapes}", "$$$${escapes}"),
+ ("$$${preserves}", "$$${preserves}"),
+ ("$$$${preserves}", "$$$${preserves}"),
+ ("$$$$${preserves}", "$$$$${preserves}"),
+ (
+ "$${escapes} & $$${preserves},$$$${preserves}, $$$$${preserves}",
+ "$$$${escapes} & $$${preserves},$$$${preserves}, $$$$${preserves}",
+ ),
+ ],
+)
+def test_escape_dollar_brace(to_escape: str, escaped: str):
+ assert escape_dollar_brace(to_escape) == escaped
diff --git a/packages/service-integration/tests/test_osparc_image_specs.py b/packages/service-integration/tests/test_osparc_image_specs.py
index 6bec87425ad2..0dd6b96232a4 100644
--- a/packages/service-integration/tests/test_osparc_image_specs.py
+++ b/packages/service-integration/tests/test_osparc_image_specs.py
@@ -8,6 +8,7 @@
import pytest
import yaml
from pydantic import BaseModel
+from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
from service_integration.compose_spec_model import BuildItem, Service
from service_integration.osparc_config import (
DockerComposeOverwriteConfig,
@@ -19,7 +20,13 @@
@pytest.fixture
-def settings() -> AppSettings:
+def settings(monkeypatch: pytest.MonkeyPatch) -> AppSettings:
+ setenvs_from_dict(
+ monkeypatch,
+ {
+ "ENABLE_OOIL_OSPARC_VARIABLE_IDENTIFIER": "true",
+ },
+ )
return AppSettings()
diff --git a/packages/service-library/requirements/_aiohttp.txt b/packages/service-library/requirements/_aiohttp.txt
index fabfd7475b79..dd705a2037c1 100644
--- a/packages/service-library/requirements/_aiohttp.txt
+++ b/packages/service-library/requirements/_aiohttp.txt
@@ -1,6 +1,6 @@
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via -r requirements/_aiohttp.in
aiopg==1.4.0
# via -r requirements/_aiohttp.in
@@ -14,10 +14,6 @@ attrs==25.1.0
# aiohttp
# jsonschema
# referencing
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-semantic-conventions
frozenlist==1.5.0
# via
# aiohttp
@@ -38,7 +34,7 @@ multidict==6.1.0
# via
# aiohttp
# yarl
-opentelemetry-api==1.30.0
+opentelemetry-api==1.34.1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aiohttp-client
@@ -46,27 +42,27 @@ opentelemetry-api==1.30.0
# opentelemetry-instrumentation-aiopg
# opentelemetry-instrumentation-dbapi
# opentelemetry-semantic-conventions
-opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation==0.55b1
# via
# opentelemetry-instrumentation-aiohttp-client
# opentelemetry-instrumentation-aiohttp-server
# opentelemetry-instrumentation-aiopg
# opentelemetry-instrumentation-dbapi
-opentelemetry-instrumentation-aiohttp-client==0.51b0
+opentelemetry-instrumentation-aiohttp-client==0.55b1
# via -r requirements/_aiohttp.in
-opentelemetry-instrumentation-aiohttp-server==0.51b0
+opentelemetry-instrumentation-aiohttp-server==0.55b1
# via -r requirements/_aiohttp.in
-opentelemetry-instrumentation-aiopg==0.51b0
+opentelemetry-instrumentation-aiopg==0.55b1
# via -r requirements/_aiohttp.in
-opentelemetry-instrumentation-dbapi==0.51b0
+opentelemetry-instrumentation-dbapi==0.55b1
# via opentelemetry-instrumentation-aiopg
-opentelemetry-semantic-conventions==0.51b0
+opentelemetry-semantic-conventions==0.55b1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aiohttp-client
# opentelemetry-instrumentation-aiohttp-server
# opentelemetry-instrumentation-dbapi
-opentelemetry-util-http==0.51b0
+opentelemetry-util-http==0.55b1
# via
# opentelemetry-instrumentation-aiohttp-client
# opentelemetry-instrumentation-aiohttp-server
@@ -92,11 +88,14 @@ rpds-py==0.23.1
# referencing
sqlalchemy==1.4.54
# via aiopg
+typing-extensions==4.14.1
+ # via
+ # opentelemetry-api
+ # opentelemetry-semantic-conventions
werkzeug==3.1.3
# via -r requirements/_aiohttp.in
wrapt==1.17.2
# via
- # deprecated
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aiohttp-client
# opentelemetry-instrumentation-aiohttp-server
diff --git a/packages/service-library/requirements/_base.in b/packages/service-library/requirements/_base.in
index 24222e414b81..d094798f8a49 100644
--- a/packages/service-library/requirements/_base.in
+++ b/packages/service-library/requirements/_base.in
@@ -19,6 +19,7 @@ faststream
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-instrumentation-aio-pika
+opentelemetry-instrumentation-asyncpg
opentelemetry-instrumentation-logging
opentelemetry-instrumentation-redis
opentelemetry-instrumentation-requests
diff --git a/packages/service-library/requirements/_base.txt b/packages/service-library/requirements/_base.txt
index 862ed2b212bf..2ef0a5469a1d 100644
--- a/packages/service-library/requirements/_base.txt
+++ b/packages/service-library/requirements/_base.txt
@@ -10,7 +10,7 @@ aiofiles==24.1.0
# via -r requirements/_base.in
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -49,14 +49,8 @@ certifi==2025.1.31
# requests
charset-normalizer==3.4.1
# via requests
-click==8.1.8
+click==8.2.1
# via typer
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-exporter-otlp-proto-grpc
- # opentelemetry-exporter-otlp-proto-http
- # opentelemetry-semantic-conventions
dnspython==2.7.0
# via email-validator
email-validator==2.2.0
@@ -71,7 +65,7 @@ frozenlist==1.5.0
# via
# aiohttp
# aiosignal
-googleapis-common-protos==1.68.0
+googleapis-common-protos==1.70.0
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
@@ -85,6 +79,8 @@ idna==3.10
# yarl
importlib-metadata==8.5.0
# via opentelemetry-api
+jsonref==1.1.0
+ # via -r requirements/../../../packages/models-library/requirements/_base.in
jsonschema==4.23.0
# via -r requirements/../../../packages/models-library/requirements/_base.in
jsonschema-specifications==2024.10.1
@@ -97,59 +93,64 @@ multidict==6.1.0
# via
# aiohttp
# yarl
-opentelemetry-api==1.30.0
+opentelemetry-api==1.34.1
# via
# -r requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aio-pika
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-logging
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
-opentelemetry-exporter-otlp==1.30.0
+opentelemetry-exporter-otlp==1.34.1
# via -r requirements/_base.in
-opentelemetry-exporter-otlp-proto-common==1.30.0
+opentelemetry-exporter-otlp-proto-common==1.34.1
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-exporter-otlp-proto-grpc==1.30.0
+opentelemetry-exporter-otlp-proto-grpc==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-exporter-otlp-proto-http==1.30.0
+opentelemetry-exporter-otlp-proto-http==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation==0.55b1
# via
# opentelemetry-instrumentation-aio-pika
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-logging
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
-opentelemetry-instrumentation-aio-pika==0.51b0
+opentelemetry-instrumentation-aio-pika==0.55b1
# via -r requirements/_base.in
-opentelemetry-instrumentation-logging==0.51b0
+opentelemetry-instrumentation-asyncpg==0.55b1
# via -r requirements/_base.in
-opentelemetry-instrumentation-redis==0.51b0
+opentelemetry-instrumentation-logging==0.55b1
# via -r requirements/_base.in
-opentelemetry-instrumentation-requests==0.51b0
+opentelemetry-instrumentation-redis==0.55b1
# via -r requirements/_base.in
-opentelemetry-proto==1.30.0
+opentelemetry-instrumentation-requests==0.55b1
+ # via -r requirements/_base.in
+opentelemetry-proto==1.34.1
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-sdk==1.30.0
+opentelemetry-sdk==1.34.1
# via
# -r requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-semantic-conventions==0.51b0
+opentelemetry-semantic-conventions==0.55b1
# via
# opentelemetry-instrumentation
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
-opentelemetry-util-http==0.51b0
+opentelemetry-util-http==0.55b1
# via opentelemetry-instrumentation-requests
orjson==3.10.15
# via
@@ -171,7 +172,7 @@ propcache==0.3.0
# via
# aiohttp
# yarl
-protobuf==5.29.3
+protobuf==5.29.5
# via
# googleapis-common-protos
# opentelemetry-proto
@@ -179,7 +180,7 @@ psutil==7.0.0
# via -r requirements/_base.in
pycryptodome==3.21.0
# via stream-zip
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -196,9 +197,9 @@ pydantic==2.10.6
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in
@@ -250,9 +251,9 @@ referencing==0.35.1
# -c requirements/../../../requirements/constraints.txt
# jsonschema
# jsonschema-specifications
-requests==2.32.3
+requests==2.32.4
# via opentelemetry-exporter-otlp-proto-http
-rich==14.0.0
+rich==14.1.0
# via
# -r requirements/../../../packages/settings-library/requirements/_base.in
# typer
@@ -274,21 +275,28 @@ toolz==1.0.0
# via -r requirements/_base.in
tqdm==4.67.1
# via -r requirements/_base.in
-typer==0.16.0
+typer==0.16.1
# via -r requirements/../../../packages/settings-library/requirements/_base.in
types-python-dateutil==2.9.0.20241206
# via arrow
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# aiodebug
# anyio
# faststream
+ # opentelemetry-api
+ # opentelemetry-exporter-otlp-proto-grpc
+ # opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
+ # opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pydantic-extra-types
# typer
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+urllib3==2.5.0
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -299,7 +307,6 @@ urllib3==2.3.0
# requests
wrapt==1.17.2
# via
- # deprecated
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-redis
diff --git a/packages/service-library/requirements/_fastapi.in b/packages/service-library/requirements/_fastapi.in
index 3303e6043afa..940a289b3c55 100644
--- a/packages/service-library/requirements/_fastapi.in
+++ b/packages/service-library/requirements/_fastapi.in
@@ -3,7 +3,7 @@
#
#
-
+asgi-lifespan
fastapi[standard]
fastapi-lifespan-manager
httpx[http2]
diff --git a/packages/service-library/requirements/_fastapi.txt b/packages/service-library/requirements/_fastapi.txt
index c6e5a29f597a..4b894d9984b6 100644
--- a/packages/service-library/requirements/_fastapi.txt
+++ b/packages/service-library/requirements/_fastapi.txt
@@ -5,34 +5,37 @@ anyio==4.8.0
# httpx
# starlette
# watchfiles
+asgi-lifespan==2.1.0
+ # via -r requirements/_fastapi.in
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
certifi==2025.1.31
# via
# httpcore
# httpx
-click==8.1.8
+ # sentry-sdk
+click==8.2.1
# via
# rich-toolkit
# typer
# uvicorn
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-semantic-conventions
dnspython==2.7.0
# via email-validator
email-validator==2.2.0
- # via fastapi
-fastapi==0.115.12
+ # via
+ # fastapi
+ # pydantic
+fastapi==0.116.1
# via
# -r requirements/_fastapi.in
# fastapi-lifespan-manager
-fastapi-cli==0.0.7
+fastapi-cli==0.0.8
# via fastapi
+fastapi-cloud-cli==0.1.5
+ # via fastapi-cli
fastapi-lifespan-manager==0.1.4
# via -r requirements/_fastapi.in
-h11==0.14.0
+h11==0.16.0
# via
# httpcore
# uvicorn
@@ -40,7 +43,7 @@ h2==4.2.0
# via httpx
hpack==4.1.0
# via h2
-httpcore==1.0.7
+httpcore==1.0.9
# via httpx
httptools==0.6.4
# via uvicorn
@@ -48,6 +51,7 @@ httpx==0.28.1
# via
# -r requirements/_fastapi.in
# fastapi
+ # fastapi-cloud-cli
hyperframe==6.1.0
# via h2
idna==3.10
@@ -65,31 +69,31 @@ markupsafe==3.0.2
# via jinja2
mdurl==0.1.2
# via markdown-it-py
-opentelemetry-api==1.30.0
+opentelemetry-api==1.34.1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-asgi
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
# opentelemetry-semantic-conventions
-opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation==0.55b1
# via
# opentelemetry-instrumentation-asgi
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
-opentelemetry-instrumentation-asgi==0.51b0
+opentelemetry-instrumentation-asgi==0.55b1
# via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-fastapi==0.51b0
+opentelemetry-instrumentation-fastapi==0.55b1
# via -r requirements/_fastapi.in
-opentelemetry-instrumentation-httpx==0.51b0
+opentelemetry-instrumentation-httpx==0.55b1
# via -r requirements/_fastapi.in
-opentelemetry-semantic-conventions==0.51b0
+opentelemetry-semantic-conventions==0.55b1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-asgi
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
-opentelemetry-util-http==0.51b0
+opentelemetry-util-http==0.55b1
# via
# opentelemetry-instrumentation-asgi
# opentelemetry-instrumentation-fastapi
@@ -98,9 +102,11 @@ packaging==24.2
# via opentelemetry-instrumentation
prometheus-client==0.21.1
# via -r requirements/_fastapi.in
-pydantic==2.10.6
- # via fastapi
-pydantic-core==2.27.2
+pydantic==2.11.7
+ # via
+ # fastapi
+ # fastapi-cloud-cli
+pydantic-core==2.33.2
# via pydantic
pygments==2.19.1
# via rich
@@ -110,32 +116,51 @@ python-multipart==0.0.20
# via fastapi
pyyaml==6.0.2
# via uvicorn
-rich==14.0.0
+rich==14.1.0
# via
# rich-toolkit
# typer
-rich-toolkit==0.14.7
- # via fastapi-cli
+rich-toolkit==0.15.0
+ # via
+ # fastapi-cli
+ # fastapi-cloud-cli
+rignore==0.6.4
+ # via fastapi-cloud-cli
+sentry-sdk==2.35.0
+ # via fastapi-cloud-cli
shellingham==1.5.4
# via typer
sniffio==1.3.1
- # via anyio
-starlette==0.46.0
+ # via
+ # anyio
+ # asgi-lifespan
+starlette==0.47.2
# via fastapi
-typer==0.16.0
- # via fastapi-cli
-typing-extensions==4.12.2
+typer==0.16.1
+ # via
+ # fastapi-cli
+ # fastapi-cloud-cli
+typing-extensions==4.14.1
# via
# anyio
# fastapi
+ # opentelemetry-api
+ # opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# rich-toolkit
+ # starlette
# typer
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+urllib3==2.5.0
+ # via sentry-sdk
uvicorn==0.34.2
# via
# fastapi
# fastapi-cli
+ # fastapi-cloud-cli
uvloop==0.21.0
# via uvicorn
watchfiles==1.0.5
@@ -144,7 +169,6 @@ websockets==15.0.1
# via uvicorn
wrapt==1.17.2
# via
- # deprecated
# opentelemetry-instrumentation
# opentelemetry-instrumentation-httpx
zipp==3.21.0
diff --git a/packages/service-library/requirements/_test.in b/packages/service-library/requirements/_test.in
index 239a389cbc06..5fd26efe1e97 100644
--- a/packages/service-library/requirements/_test.in
+++ b/packages/service-library/requirements/_test.in
@@ -17,6 +17,7 @@ botocore
coverage
docker
faker
+fakeredis[lua]
flaky
numpy
openapi-spec-validator
@@ -41,3 +42,4 @@ types_aiofiles
types_tqdm
types-psutil
types-psycopg2
+uvloop
diff --git a/packages/service-library/requirements/_test.txt b/packages/service-library/requirements/_test.txt
index 0714e6eb3e01..af2d7db162e7 100644
--- a/packages/service-library/requirements/_test.txt
+++ b/packages/service-library/requirements/_test.txt
@@ -3,7 +3,7 @@ aiohappyeyeballs==2.6.1
# -c requirements/_aiohttp.txt
# -c requirements/_base.txt
# aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_aiohttp.txt
@@ -20,7 +20,9 @@ anyio==4.8.0
# -c requirements/_fastapi.txt
# httpx
asgi-lifespan==2.1.0
- # via -r requirements/_test.in
+ # via
+ # -c requirements/_fastapi.txt
+ # -r requirements/_test.in
attrs==25.1.0
# via
# -c requirements/_aiohttp.txt
@@ -53,6 +55,8 @@ execnet==2.1.1
# via pytest-xdist
faker==36.1.1
# via -r requirements/_test.in
+fakeredis==2.30.3
+ # via -r requirements/_test.in
flaky==3.8.1
# via -r requirements/_test.in
frozenlist==1.5.0
@@ -65,11 +69,11 @@ greenlet==3.1.1
# via
# -c requirements/_aiohttp.txt
# sqlalchemy
-h11==0.14.0
+h11==0.16.0
# via
# -c requirements/_fastapi.txt
# httpcore
-httpcore==1.0.7
+httpcore==1.0.9
# via
# -c requirements/_fastapi.txt
# httpx
@@ -109,15 +113,17 @@ jsonschema-specifications==2024.10.1
# openapi-schema-validator
lazy-object-proxy==1.10.0
# via openapi-spec-validator
+lupa==2.5
+ # via fakeredis
multidict==6.1.0
# via
# -c requirements/_aiohttp.txt
# -c requirements/_base.txt
# aiohttp
# yarl
-mypy==1.15.0
+mypy==1.16.1
# via sqlalchemy
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via mypy
numpy==2.2.3
# via -r requirements/_test.in
@@ -134,12 +140,16 @@ packaging==24.2
# pytest-sugar
pathable==0.4.4
# via jsonschema-path
+pathspec==0.12.1
+ # via mypy
pillow==11.1.0
# via -r requirements/_test.in
pip==25.0.1
# via -r requirements/_test.in
pluggy==1.5.0
- # via pytest
+ # via
+ # pytest
+ # pytest-cov
pprintpp==0.4.0
# via pytest-icdiff
propcache==0.3.0
@@ -150,7 +160,12 @@ propcache==0.3.0
# yarl
py-cpuinfo==9.0.0
# via pytest-benchmark
-pytest==8.3.5
+pygments==2.19.1
+ # via
+ # -c requirements/_base.txt
+ # -c requirements/_fastapi.txt
+ # pytest
+pytest==8.4.1
# via
# -r requirements/_test.in
# pytest-aiohttp
@@ -165,27 +180,27 @@ pytest==8.3.5
# pytest-xdist
pytest-aiohttp==1.1.0
# via -r requirements/_test.in
-pytest-asyncio==0.26.0
+pytest-asyncio==1.0.0
# via
# -r requirements/_test.in
# pytest-aiohttp
pytest-benchmark==5.1.0
# via -r requirements/_test.in
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
-pytest-docker==3.2.0
+pytest-docker==3.2.3
# via -r requirements/_test.in
pytest-icdiff==0.9
# via -r requirements/_test.in
pytest-instafail==0.5.0
# via -r requirements/_test.in
-pytest-mock==3.14.0
+pytest-mock==3.14.1
# via -r requirements/_test.in
pytest-runner==6.0.1
# via -r requirements/_test.in
pytest-sugar==1.0.0
# via -r requirements/_test.in
-pytest-xdist==3.6.1
+pytest-xdist==3.8.0
# via -r requirements/_test.in
python-dateutil==2.9.0.post0
# via
@@ -202,6 +217,11 @@ pyyaml==6.0.2
# -c requirements/_base.txt
# -c requirements/_fastapi.txt
# jsonschema-path
+redis==5.2.1
+ # via
+ # -c requirements/../../../requirements/constraints.txt
+ # -c requirements/_base.txt
+ # fakeredis
referencing==0.35.1
# via
# -c requirements/../../../requirements/constraints.txt
@@ -210,7 +230,7 @@ referencing==0.35.1
# jsonschema
# jsonschema-path
# jsonschema-specifications
-requests==2.32.3
+requests==2.32.4
# via
# -c requirements/_base.txt
# docker
@@ -236,6 +256,8 @@ sniffio==1.3.1
# -c requirements/_fastapi.txt
# anyio
# asgi-lifespan
+sortedcontainers==2.4.0
+ # via fakeredis
sqlalchemy==1.4.54
# via
# -c requirements/../../../requirements/constraints.txt
@@ -255,8 +277,9 @@ types-requests==2.32.0.20250301
# via types-tqdm
types-tqdm==4.67.0.20250301
# via -r requirements/_test.in
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
+ # -c requirements/_aiohttp.txt
# -c requirements/_base.txt
# -c requirements/_fastapi.txt
# anyio
@@ -264,14 +287,19 @@ typing-extensions==4.12.2
# sqlalchemy2-stubs
tzdata==2025.1
# via faker
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
+ # -c requirements/_fastapi.txt
# botocore
# docker
# requests
# types-requests
+uvloop==0.21.0
+ # via
+ # -c requirements/_fastapi.txt
+ # -r requirements/_test.in
yarl==1.18.3
# via
# -c requirements/_aiohttp.txt
diff --git a/packages/service-library/requirements/_tools.txt b/packages/service-library/requirements/_tools.txt
index 985c2c3bc856..35fcb09f3491 100644
--- a/packages/service-library/requirements/_tools.txt
+++ b/packages/service-library/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# black
@@ -27,11 +27,11 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via
# -c requirements/_test.txt
# -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# -c requirements/_test.txt
# black
@@ -45,7 +45,10 @@ packaging==24.2
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # -c requirements/_test.txt
+ # black
+ # mypy
pip==25.0.1
# via
# -c requirements/_test.txt
@@ -73,11 +76,11 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/packages/service-library/setup.cfg b/packages/service-library/setup.cfg
index 874495da36bd..714b873009e3 100644
--- a/packages/service-library/setup.cfg
+++ b/packages/service-library/setup.cfg
@@ -21,6 +21,7 @@ markers =
testit: "marks test to run during development"
performance_test: "performance test"
no_cleanup_check_rabbitmq_server_has_no_errors: "no check in rabbitmq logs"
+ heavy_load: "marks test as heavy load"
[mypy]
plugins =
diff --git a/packages/service-library/setup.py b/packages/service-library/setup.py
index 521b491b918e..2ddd96c9ece1 100644
--- a/packages/service-library/setup.py
+++ b/packages/service-library/setup.py
@@ -38,7 +38,7 @@ def read_reqs(reqs_path: Path) -> set[str]:
"python_requires": "~=3.11",
"install_requires": tuple(PROD_REQUIREMENTS),
"packages": find_packages(where="src"),
- "package_data": {"": ["py.typed"]},
+ "package_data": {"": ["py.typed", "redis/lua/*.lua"]},
"package_dir": {"": "src"},
"test_suite": "tests",
"tests_require": tuple(TEST_REQUIREMENTS),
diff --git a/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py b/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py
index e7b98347c318..23fcbb41f0a3 100644
--- a/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py
+++ b/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py
@@ -1,20 +1,21 @@
-""" Holderplace for random helpers using aiopg
+"""Holderplace for random helpers using aiopg
- - Drop here functions/constants that at that time does
- not fit in any of the setups. Then, they can be moved and
- refactor when new abstractions are used in place.
+- Drop here functions/constants that at that time does
+not fit in any of the setups. Then, they can be moved and
+refactor when new abstractions are used in place.
- - aiopg is used as a client sdk to interact asynchronously with postgres service
+- aiopg is used as a client sdk to interact asynchronously with postgres service
- SEE for aiopg: https://aiopg.readthedocs.io/en/stable/sa.html
- SEE for underlying psycopg: http://initd.org/psycopg/docs/module.html
- SEE for extra keywords: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+SEE for aiopg: https://aiopg.readthedocs.io/en/stable/sa.html
+SEE for underlying psycopg: http://initd.org/psycopg/docs/module.html
+SEE for extra keywords: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
"""
# TODO: Towards implementing https://github.com/ITISFoundation/osparc-simcore/issues/1195
# TODO: deprecate this module. Move utils into retry_policies, simcore_postgres_database.utils_aiopg
import logging
+from typing import Final
import sqlalchemy as sa
from aiohttp import web
@@ -31,6 +32,8 @@
log = logging.getLogger(__name__)
+APP_AIOPG_ENGINE_KEY: Final = web.AppKey("APP_AIOPG_ENGINE_KEY", Engine)
+
async def raise_if_not_responsive(engine: Engine):
async with engine.acquire() as conn:
diff --git a/packages/service-library/src/servicelib/aiohttp/application_keys.py b/packages/service-library/src/servicelib/aiohttp/application_keys.py
index 3958c860cb00..1a85b2a00796 100644
--- a/packages/service-library/src/servicelib/aiohttp/application_keys.py
+++ b/packages/service-library/src/servicelib/aiohttp/application_keys.py
@@ -1,4 +1,4 @@
-""" Namespace to keep all application storage keys
+"""Namespace to keep all application storage keys
Unique keys to identify stored data
Naming convention accounts for the storage scope: application, request, response, configuration and/or resources
@@ -8,22 +8,25 @@
See https://aiohttp.readthedocs.io/en/stable/web_advanced.html#data-sharing-aka-no-singletons-please
"""
+
from typing import Final
-# REQUIREMENTS:
-# - guarantees all keys are unique
-# - one place for all common keys
-# - hierarchical classification
+from aiohttp import ClientSession, web
+
+# APPLICATION's CONTEXT KEYS
+
+# NOTE: use these keys to store/retrieve data from aiohttp.web.Application
+# SEE https://docs.aiohttp.org/en/stable/web_quickstart.html#aiohttp-web-app-key
#
# web.Application keys, i.e. app[APP_*_KEY]
#
-APP_CONFIG_KEY: Final[str] = f"{__name__ }.config"
-APP_SETTINGS_KEY: Final[str] = f"{__name__ }.settings"
+APP_CONFIG_KEY = web.AppKey("APP_CONFIG_KEY", dict[str, object])
APP_AIOPG_ENGINE_KEY: Final[str] = f"{__name__ }.aiopg_engine"
-APP_CLIENT_SESSION_KEY: Final[str] = f"{__name__ }.session"
+APP_CLIENT_SESSION_KEY: web.AppKey[ClientSession] = web.AppKey("APP_CLIENT_SESSION_KEY")
+
APP_FIRE_AND_FORGET_TASKS_KEY: Final[str] = f"{__name__}.tasks"
diff --git a/packages/service-library/src/servicelib/aiohttp/application_setup.py b/packages/service-library/src/servicelib/aiohttp/application_setup.py
index 0d52603f9651..3375c5444f9d 100644
--- a/packages/service-library/src/servicelib/aiohttp/application_setup.py
+++ b/packages/service-library/src/servicelib/aiohttp/application_setup.py
@@ -2,9 +2,12 @@
import inspect
import logging
from collections.abc import Callable
+from contextlib import ContextDecorator
from copy import deepcopy
+from datetime import datetime
from enum import Enum
-from typing import Any, Protocol
+from types import TracebackType
+from typing import Any, Final, Protocol
import arrow
from aiohttp import web
@@ -13,23 +16,21 @@
TypedDict,
)
-from .application_keys import APP_CONFIG_KEY, APP_SETTINGS_KEY
+from .application_keys import APP_CONFIG_KEY
-log = logging.getLogger(__name__)
+_logger = logging.getLogger(__name__)
-APP_SETUP_COMPLETED_KEY = f"{__name__ }.setup"
+APP_SETUP_COMPLETED_KEY: Final[web.AppKey] = web.AppKey("setup_completed", list[str])
class _SetupFunc(Protocol):
__name__: str
- def __call__(self, app: web.Application, *args: Any, **kwds: Any) -> bool:
- ...
+ def __call__(self, app: web.Application, *args: Any, **kwds: Any) -> bool: ...
class _ApplicationSettings(Protocol):
- def is_enabled(self, field_name: str) -> bool:
- ...
+ def is_enabled(self, field_name: str) -> bool: ...
class ModuleCategory(Enum):
@@ -46,12 +47,10 @@ def __init__(self, *, reason) -> None:
super().__init__(reason)
-class ApplicationSetupError(Exception):
- ...
+class ApplicationSetupError(Exception): ...
-class DependencyError(ApplicationSetupError):
- ...
+class DependencyError(ApplicationSetupError): ...
class SetupMetadataDict(TypedDict):
@@ -116,13 +115,14 @@ def _get_app_settings_and_field_name(
arg_settings_name: str | None,
setup_func_name: str,
logger: logging.Logger,
+ app_settings_key: web.AppKey,
) -> tuple[_ApplicationSettings | None, str | None]:
- app_settings: _ApplicationSettings | None = app.get(APP_SETTINGS_KEY)
+ app_settings: _ApplicationSettings | None = app.get(app_settings_key)
settings_field_name = arg_settings_name
if app_settings:
if not settings_field_name:
- # FIXME: hard-coded WEBSERVER_ temporary
+ # NOTE: hard-coded WEBSERVER_ temporary
settings_field_name = f"WEBSERVER_{arg_module_name.split('.')[-1].upper()}"
logger.debug("Checking addon's %s ", f"{settings_field_name=}")
@@ -134,6 +134,53 @@ def _get_app_settings_and_field_name(
return app_settings, settings_field_name
+class _SetupTimingContext(ContextDecorator):
+ """Context manager/decorator for timing and logging module setup operations."""
+
+ def __init__(
+ self,
+ module_name: str,
+ *,
+ logger: logging.Logger,
+ category: ModuleCategory | None = None,
+ depends: list[str] | None = None,
+ ) -> None:
+ """Initialize timing context.
+
+ :param module_name: Name of the module being set up
+ :param category: Optional module category for detailed logging
+ :param depends: Optional dependencies for detailed logging
+ """
+ self.module_name = module_name
+ self.category = category
+ self.depends = depends
+ self.started: datetime | None = None
+ self.head_msg = f"Setup of {module_name}"
+ self.logger = logger
+
+ def __enter__(self) -> None:
+ self.started = arrow.utcnow().datetime
+ if self.category is not None:
+ self.logger.info(
+ "%s (%s, %s) started ... ",
+ self.head_msg,
+ f"{self.category.name=}",
+ f"{self.depends}",
+ )
+ else:
+ self.logger.info("%s started ...", self.head_msg)
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ if self.started:
+ elapsed = (arrow.utcnow() - self.started).total_seconds()
+ _logger.info("%s completed [Elapsed: %3.1f secs]", self.head_msg, elapsed)
+
+
# PUBLIC API ------------------------------------------------------------------
@@ -141,13 +188,69 @@ def is_setup_completed(module_name: str, app: web.Application) -> bool:
return module_name in app[APP_SETUP_COMPLETED_KEY]
+def ensure_single_setup(
+ module_name: str,
+ *,
+ logger: logging.Logger,
+) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
+ """Ensures a setup function is executed only once per application and handles completion.
+
+ :param module_name: Name of the module being set up
+ """
+
+ def _log_skip(reason: str) -> bool:
+ logger.info("Skipping '%s' setup: %s", module_name, reason)
+ return False
+
+ def decorator(setup_func: _SetupFunc) -> _SetupFunc:
+
+ @functools.wraps(setup_func)
+ def _wrapper(app: web.Application, *args: Any, **kwargs: Any) -> bool:
+
+ # pre-setup init
+ if APP_SETUP_COMPLETED_KEY not in app:
+ app[APP_SETUP_COMPLETED_KEY] = []
+
+ # check
+ if is_setup_completed(module_name, app):
+ _log_skip(
+ f"'{module_name}' was already initialized in {app}."
+ " Setup can only be executed once per app."
+ )
+ return False
+
+ try:
+ completed = setup_func(app, *args, **kwargs)
+
+ # post-setup handling
+ if completed is None:
+ completed = True
+
+ if completed: # registers completed setup
+ app[APP_SETUP_COMPLETED_KEY].append(module_name)
+ return completed
+
+ assert not completed # nosec
+ _log_skip("Undefined (setup function returned false)")
+ return False
+
+ except SkipModuleSetupError as err:
+ _log_skip(err.reason)
+ return False
+
+ return _wrapper
+
+ return decorator
+
+
def app_module_setup(
module_name: str,
category: ModuleCategory,
*,
+ app_settings_key: web.AppKey,
settings_name: str | None = None,
depends: list[str] | None = None,
- logger: logging.Logger = log,
+ logger: logging.Logger = _logger,
# TODO: SEE https://github.com/ITISFoundation/osparc-simcore/issues/2008
# TODO: - settings_name becomes module_name!!
# TODO: - plugin base should be aware of setup and settings -> model instead of function?
@@ -190,35 +293,27 @@ def setup(app: web.Application):
module_name, depends, config_section, config_enabled
)
- def _decorate(setup_func: _SetupFunc):
- if "setup" not in setup_func.__name__:
- logger.warning("Rename '%s' to contain 'setup'", setup_func.__name__)
-
- # metadata info
- def setup_metadata() -> SetupMetadataDict:
- return SetupMetadataDict(
- module_name=module_name,
- dependencies=depends,
- config_section=section,
- config_enabled=config_enabled,
- )
+ # metadata info
+ def _setup_metadata() -> SetupMetadataDict:
+ return SetupMetadataDict(
+ module_name=module_name,
+ dependencies=depends,
+ config_section=section,
+ config_enabled=config_enabled,
+ )
- # wrapper
- @functools.wraps(setup_func)
- def _wrapper(app: web.Application, *args, **kargs) -> bool:
- # pre-setup
- head_msg = f"Setup of {module_name}"
- started = arrow.utcnow()
- logger.info(
- "%s (%s, %s) started ... ",
- head_msg,
- f"{category.name=}",
- f"{depends}",
- )
+ def decorator(setup_func: _SetupFunc) -> _SetupFunc:
- if APP_SETUP_COMPLETED_KEY not in app:
- app[APP_SETUP_COMPLETED_KEY] = []
+ assert ( # nosec
+ "setup_" in setup_func.__name__
+ ), f"Rename '{setup_func.__name__}' like 'setup_$(plugin-name)'"
+ @functools.wraps(setup_func)
+ @ensure_single_setup(module_name, logger=logger)
+ @_SetupTimingContext(
+ module_name, category=category, depends=depends, logger=logger
+ )
+ def _wrapper(app: web.Application, *args, **kargs) -> bool:
if category == ModuleCategory.ADDON:
# ONLY addons can be enabled/disabled
@@ -243,6 +338,7 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool:
settings_name,
setup_func.__name__,
logger,
+ app_settings_key,
)
if (
@@ -258,7 +354,6 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool:
return False
if depends:
- # TODO: no need to enforce. Use to deduce order instead.
uninitialized = [
dep for dep in depends if not is_setup_completed(dep, app)
]
@@ -266,52 +361,24 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool:
msg = f"Cannot setup app module '{module_name}' because the following dependencies are still uninitialized: {uninitialized}"
raise DependencyError(msg)
- # execution of setup
- try:
- if is_setup_completed(module_name, app):
- raise SkipModuleSetupError( # noqa: TRY301
- reason=f"'{module_name}' was already initialized in {app}."
- " Setup can only be executed once per app."
- )
-
- completed = setup_func(app, *args, **kargs)
-
- # post-setup
- if completed is None:
- completed = True
-
- if completed: # registers completed setup
- app[APP_SETUP_COMPLETED_KEY].append(module_name)
- else:
- raise SkipModuleSetupError( # noqa: TRY301
- reason="Undefined (setup function returned false)"
- )
+ # execution of setup with module name
+ completed: bool = setup_func(app, *args, **kargs)
- except SkipModuleSetupError as exc:
- logger.info("Skipping '%s' setup: %s", module_name, exc.reason)
- completed = False
-
- elapsed = arrow.utcnow() - started
- logger.info(
- "%s %s [Elapsed: %3.1f secs]",
- head_msg,
- "completed" if completed else "skipped",
- elapsed.total_seconds(),
- )
return completed
- _wrapper.metadata = setup_metadata # type: ignore[attr-defined]
- _wrapper.mark_as_simcore_servicelib_setup_func = True # type: ignore[attr-defined]
- # NOTE: this is added by functools.wraps decorated
- assert _wrapper.__wrapped__ == setup_func # nosec
+ assert (
+ _wrapper.__wrapped__ == setup_func
+ ), "this is added by functools.wraps decorator" # nosec
+
+ setattr(_wrapper, "metadata", _setup_metadata) # noqa: B010
+ setattr(_wrapper, "mark_as_simcore_servicelib_setup_func", True) # noqa: B010
return _wrapper
- return _decorate
+ return decorator
def is_setup_function(fun: Callable) -> bool:
- # TODO: use _SetupFunc protocol to check in runtime
return (
inspect.isfunction(fun)
and hasattr(fun, "mark_as_simcore_servicelib_setup_func")
diff --git a/packages/service-library/src/servicelib/aiohttp/client_session.py b/packages/service-library/src/servicelib/aiohttp/client_session.py
index 40e49c76a4bc..010c2ec345bf 100644
--- a/packages/service-library/src/servicelib/aiohttp/client_session.py
+++ b/packages/service-library/src/servicelib/aiohttp/client_session.py
@@ -1,5 +1,4 @@
from collections.abc import AsyncGenerator
-from typing import cast
from aiohttp import ClientSession, ClientTimeout, web
from common_library.json_serialization import json_dumps
@@ -41,10 +40,11 @@ async def persistent_client_session(app: web.Application) -> AsyncGenerator[None
def get_client_session(app: web.Application) -> ClientSession:
"""Refers to the one-and-only client in the app"""
assert APP_CLIENT_SESSION_KEY in app # nosec
- return cast(ClientSession, app[APP_CLIENT_SESSION_KEY])
+ return app[APP_CLIENT_SESSION_KEY]
__all__: tuple[str, ...] = (
+ "APP_CLIENT_SESSION_KEY",
"get_client_session",
"persistent_client_session",
)
diff --git a/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py b/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py
index 88b0338dadfb..9e5056f67bc5 100644
--- a/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py
+++ b/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py
@@ -8,7 +8,6 @@
from typing import Final
from aiohttp import web
-from servicelib.logging_utils import log_context
from settings_library.postgres import PostgresSettings
from simcore_postgres_database.utils_aiosqlalchemy import ( # type: ignore[import-not-found] # this on is unclear
get_pg_engine_stateinfo,
@@ -39,23 +38,22 @@ def get_async_engine(app: web.Application) -> AsyncEngine:
return engine
-async def connect_to_db(app: web.Application, settings: PostgresSettings) -> None:
+async def connect_to_db(
+ app: web.Application, settings: PostgresSettings, application_name: str
+) -> None:
"""
- db services up, data migrated and ready to use
- sets an engine in app state (use `get_async_engine(app)` to retrieve)
"""
- if settings.POSTGRES_CLIENT_NAME:
- settings = settings.model_copy(
- update={"POSTGRES_CLIENT_NAME": settings.POSTGRES_CLIENT_NAME + "-asyncpg"}
- )
-
with log_context(
_logger,
logging.INFO,
"Connecting app[APP_DB_ASYNC_ENGINE_KEY] to postgres with %s",
f"{settings=}",
):
- engine = await create_async_engine_and_database_ready(settings)
+ engine = await create_async_engine_and_database_ready(
+ settings, application_name
+ )
_set_async_engine_to_app_state(app, engine)
_logger.info(
diff --git a/packages/service-library/src/servicelib/aiohttp/docker_utils.py b/packages/service-library/src/servicelib/aiohttp/docker_utils.py
index 8e9393e1e69c..3468b789a8a5 100644
--- a/packages/service-library/src/servicelib/aiohttp/docker_utils.py
+++ b/packages/service-library/src/servicelib/aiohttp/docker_utils.py
@@ -1,6 +1,9 @@
import logging
+from typing import Final
+import aiodocker
import aiohttp
+from aiohttp import web
from models_library.docker import DockerGenericTag
from pydantic import TypeAdapter, ValidationError
from settings_library.docker_registry import RegistrySettings
@@ -18,6 +21,8 @@
_logger = logging.getLogger(__name__)
+APP_DOCKER_ENGINE_KEY: Final = web.AppKey("APP_DOCKER_ENGINE_KEY", aiodocker.Docker)
+
async def retrieve_image_layer_information(
image: DockerGenericTag, registry_settings: RegistrySettings
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_constants.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_constants.py
index 79594cb18b89..fe38782e9ffe 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_constants.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_constants.py
@@ -3,9 +3,9 @@
from pydantic import PositiveFloat
MINUTE: Final[PositiveFloat] = 60
-APP_LONG_RUNNING_TASKS_MANAGER_KEY: Final[
- str
-] = f"{__name__ }.long_running_tasks.tasks_manager"
-RQT_LONG_RUNNING_TASKS_CONTEXT_KEY: Final[
- str
-] = f"{__name__}.long_running_tasks.context"
+APP_LONG_RUNNING_MANAGER_KEY: Final[str] = (
+ f"{__name__ }.long_running_tasks.tasks_manager"
+)
+RQT_LONG_RUNNING_TASKS_CONTEXT_KEY: Final[str] = (
+ f"{__name__}.long_running_tasks.context"
+)
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py
deleted file mode 100644
index b38004b32009..000000000000
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from typing import Any
-
-from aiohttp import web
-
-from ...long_running_tasks._task import TasksManager
-from ._constants import (
- APP_LONG_RUNNING_TASKS_MANAGER_KEY,
- RQT_LONG_RUNNING_TASKS_CONTEXT_KEY,
-)
-
-
-def get_tasks_manager(app: web.Application) -> TasksManager:
- output: TasksManager = app[APP_LONG_RUNNING_TASKS_MANAGER_KEY]
- return output
-
-
-def get_task_context(request: web.Request) -> dict[str, Any]:
- output: dict[str, Any] = request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY]
- return output
-
-
-def create_task_name_from_request(request: web.Request) -> str:
- return f"{request.method} {request.rel_url}"
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py
index 4534d7c951cb..8a679c70b7d8 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py
@@ -3,7 +3,7 @@
from aiohttp import web
from common_library.json_serialization import json_dumps
-from ...long_running_tasks._errors import (
+from ...long_running_tasks.errors import (
TaskCancelledError,
TaskNotCompletedError,
TaskNotFoundError,
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_manager.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_manager.py
new file mode 100644
index 000000000000..e77e8959ccfd
--- /dev/null
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_manager.py
@@ -0,0 +1,18 @@
+from aiohttp import web
+
+from ...long_running_tasks.manager import LongRunningManager
+from ...long_running_tasks.models import TaskContext
+from ._constants import APP_LONG_RUNNING_MANAGER_KEY
+from ._request import get_task_context
+
+
+class AiohttpLongRunningManager(LongRunningManager):
+
+ @staticmethod
+ def get_task_context(request: web.Request) -> TaskContext:
+ return get_task_context(request)
+
+
+def get_long_running_manager(app: web.Application) -> AiohttpLongRunningManager:
+ output: AiohttpLongRunningManager = app[APP_LONG_RUNNING_MANAGER_KEY]
+ return output
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_request.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_request.py
new file mode 100644
index 000000000000..0ccfd3c6a40a
--- /dev/null
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_request.py
@@ -0,0 +1,10 @@
+from typing import Any
+
+from aiohttp import web
+
+from ._constants import RQT_LONG_RUNNING_TASKS_CONTEXT_KEY
+
+
+def get_task_context(request: web.Request) -> dict[str, Any]:
+ output: dict[str, Any] = request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY]
+ return output
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py
index 1906c0bc93f1..55879e34ef13 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py
@@ -1,18 +1,17 @@
-import logging
from typing import Any
from aiohttp import web
-from common_library.json_serialization import json_dumps
from pydantic import BaseModel
-from servicelib.aiohttp import status
-from ...long_running_tasks._errors import TaskNotCompletedError, TaskNotFoundError
-from ...long_running_tasks._models import TaskGet, TaskId, TaskStatus
-from ...long_running_tasks._task import TrackedTask
-from ..requests_validation import parse_request_path_parameters_as
-from ._dependencies import get_task_context, get_tasks_manager
+from ...aiohttp import status
+from ...long_running_tasks import lrt_api
+from ...long_running_tasks.models import TaskGet, TaskId
+from ..requests_validation import (
+ parse_request_path_parameters_as,
+)
+from ..rest_responses import create_data_response
+from ._manager import get_long_running_manager
-_logger = logging.getLogger(__name__)
routes = web.RouteTableDef()
@@ -22,79 +21,61 @@ class _PathParam(BaseModel):
@routes.get("", name="list_tasks")
async def list_tasks(request: web.Request) -> web.Response:
- tasks_manager = get_tasks_manager(request.app)
- task_context = get_task_context(request)
- tracked_tasks: list[TrackedTask] = tasks_manager.list_tasks(
- with_task_context=task_context
- )
-
- return web.json_response(
- {
- "data": [
- TaskGet(
- task_id=t.task_id,
- task_name=t.task_name,
- status_href=f"{request.app.router['get_task_status'].url_for(task_id=t.task_id)}",
- result_href=f"{request.app.router['get_task_result'].url_for(task_id=t.task_id)}",
- abort_href=f"{request.app.router['cancel_and_delete_task'].url_for(task_id=t.task_id)}",
- )
- for t in tracked_tasks
- ]
- },
- dumps=json_dumps,
+ long_running_manager = get_long_running_manager(request.app)
+ return create_data_response(
+ [
+ TaskGet(
+ task_id=t.task_id,
+ status_href=f"{request.app.router['get_task_status'].url_for(task_id=t.task_id)}",
+ result_href=f"{request.app.router['get_task_result'].url_for(task_id=t.task_id)}",
+ abort_href=f"{request.app.router['remove_task'].url_for(task_id=t.task_id)}",
+ )
+ for t in await lrt_api.list_tasks(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ )
+ ]
)
@routes.get("/{task_id}", name="get_task_status")
async def get_task_status(request: web.Request) -> web.Response:
path_params = parse_request_path_parameters_as(_PathParam, request)
- tasks_manager = get_tasks_manager(request.app)
- task_context = get_task_context(request)
+ long_running_manager = get_long_running_manager(request.app)
- task_status: TaskStatus = tasks_manager.get_task_status(
- task_id=path_params.task_id, with_task_context=task_context
+ task_status = await lrt_api.get_task_status(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ path_params.task_id,
)
- return web.json_response({"data": task_status}, dumps=json_dumps)
+ return create_data_response(task_status)
@routes.get("/{task_id}/result", name="get_task_result")
async def get_task_result(request: web.Request) -> web.Response | Any:
path_params = parse_request_path_parameters_as(_PathParam, request)
- tasks_manager = get_tasks_manager(request.app)
- task_context = get_task_context(request)
+ long_running_manager = get_long_running_manager(request.app)
# NOTE: this might raise an exception that will be catched by the _error_handlers
- try:
- task_result = tasks_manager.get_task_result(
- task_id=path_params.task_id, with_task_context=task_context
- )
- # NOTE: this will fail if the task failed for some reason....
- await tasks_manager.remove_task(
- path_params.task_id, with_task_context=task_context, reraise_errors=False
- )
- return task_result
- except (TaskNotFoundError, TaskNotCompletedError):
- raise
- except Exception:
- # the task shall be removed in this case
- await tasks_manager.remove_task(
- path_params.task_id, with_task_context=task_context, reraise_errors=False
- )
- raise
-
-
-@routes.delete("/{task_id}", name="cancel_and_delete_task")
-async def cancel_and_delete_task(request: web.Request) -> web.Response:
- path_params = parse_request_path_parameters_as(_PathParam, request)
- tasks_manager = get_tasks_manager(request.app)
- task_context = get_task_context(request)
- await tasks_manager.remove_task(path_params.task_id, with_task_context=task_context)
- return web.json_response(status=status.HTTP_204_NO_CONTENT)
+ return await lrt_api.get_task_result(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ path_params.task_id,
+ )
-__all__: tuple[str, ...] = (
- "get_tasks_manager",
- "TaskId",
- "TaskGet",
- "TaskStatus",
-)
+@routes.delete("/{task_id}", name="remove_task")
+async def remove_task(request: web.Request) -> web.Response:
+ path_params = parse_request_path_parameters_as(_PathParam, request)
+ long_running_manager = get_long_running_manager(request.app)
+
+ await lrt_api.remove_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ path_params.task_id,
+ )
+ return web.json_response(status=status.HTTP_204_NO_CONTENT)
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py
index d0c96699462d..09c50be9685e 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py
@@ -1,39 +1,47 @@
import asyncio
-import logging
+import datetime
from collections.abc import AsyncGenerator, Callable
from functools import wraps
from typing import Any
from aiohttp import web
+from aiohttp.web import HTTPException
from common_library.json_serialization import json_dumps
-from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter
+from pydantic import AnyHttpUrl, TypeAdapter
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
from ...aiohttp import status
-from ...long_running_tasks._models import TaskGet
-from ...long_running_tasks._task import (
+from ...long_running_tasks import lrt_api
+from ...long_running_tasks._serialization import (
+ BaseObjectSerializer,
+ register_custom_serialization,
+)
+from ...long_running_tasks.constants import (
+ DEFAULT_STALE_TASK_CHECK_INTERVAL,
+ DEFAULT_STALE_TASK_DETECT_TIMEOUT,
+)
+from ...long_running_tasks.models import (
+ LRTNamespace,
+ RegisteredTaskName,
TaskContext,
- TaskProtocol,
- TasksManager,
- start_task,
+ TaskGet,
)
from ..typing_extension import Handler
from . import _routes
from ._constants import (
- APP_LONG_RUNNING_TASKS_MANAGER_KEY,
- MINUTE,
+ APP_LONG_RUNNING_MANAGER_KEY,
RQT_LONG_RUNNING_TASKS_CONTEXT_KEY,
)
-from ._dependencies import create_task_name_from_request, get_tasks_manager
from ._error_handlers import base_long_running_error_handler
+from ._manager import AiohttpLongRunningManager, get_long_running_manager
-_logger = logging.getLogger(__name__)
-
-def no_ops_decorator(handler: Handler):
+def _no_ops_decorator(handler: Handler):
return handler
-def no_task_context_decorator(handler: Handler):
+def _no_task_context_decorator(handler: Handler):
@wraps(handler)
async def _wrap(request: web.Request):
request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY] = {}
@@ -42,22 +50,27 @@ async def _wrap(request: web.Request):
return _wrap
+def _create_task_name_from_request(request: web.Request) -> str:
+ return f"{request.method} {request.rel_url}"
+
+
async def start_long_running_task(
# NOTE: positional argument are suffixed with "_" to avoid name conflicts with "task_kwargs" keys
request_: web.Request,
- task_: TaskProtocol,
+ registerd_task_name: RegisteredTaskName,
*,
fire_and_forget: bool = False,
task_context: TaskContext,
**task_kwargs: Any,
) -> web.Response:
- task_manager = get_tasks_manager(request_.app)
- task_name = create_task_name_from_request(request_)
+ long_running_manager = get_long_running_manager(request_.app)
+ task_name = _create_task_name_from_request(request_)
task_id = None
try:
- task_id = start_task(
- task_manager,
- task_,
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ registerd_task_name,
fire_and_forget=fire_and_forget,
task_context=task_context,
task_name=task_name,
@@ -74,11 +87,10 @@ async def start_long_running_task(
f"http://{ip_addr}:{port}{request_.app.router['get_task_result'].url_for(task_id=task_id)}" # NOSONAR
)
abort_url = TypeAdapter(AnyHttpUrl).validate_python(
- f"http://{ip_addr}:{port}{request_.app.router['cancel_and_delete_task'].url_for(task_id=task_id)}" # NOSONAR
+ f"http://{ip_addr}:{port}{request_.app.router['remove_task'].url_for(task_id=task_id)}" # NOSONAR
)
task_get = TaskGet(
task_id=task_id,
- task_name=task_name,
status_href=f"{status_url}",
result_href=f"{result_url}",
abort_href=f"{abort_url}",
@@ -89,10 +101,14 @@ async def start_long_running_task(
dumps=json_dumps,
)
except asyncio.CancelledError:
- # cancel the task, the client has disconnected
+ # remove the task, the client was disconnected
if task_id:
- task_manager = get_tasks_manager(request_.app)
- await task_manager.cancel_task(task_id, with_task_context=None)
+ await lrt_api.remove_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ task_context,
+ task_id,
+ )
raise
@@ -115,41 +131,69 @@ def _wrap_and_add_routes(
)
+class AiohttpHTTPExceptionSerializer(BaseObjectSerializer[HTTPException]):
+ @classmethod
+ def get_init_kwargs_from_object(cls, obj: HTTPException) -> dict:
+ return {
+ "status_code": obj.status_code,
+ "reason": obj.reason,
+ "text": obj.text,
+ "headers": dict(obj.headers) if obj.headers else None,
+ }
+
+ @classmethod
+ def prepare_object_init_kwargs(cls, data: dict) -> dict:
+ data.pop("status_code")
+ return data
+
+
def setup(
app: web.Application,
*,
router_prefix: str,
- handler_check_decorator: Callable = no_ops_decorator,
- task_request_context_decorator: Callable = no_task_context_decorator,
- stale_task_check_interval_s: PositiveFloat = 1 * MINUTE,
- stale_task_detect_timeout_s: PositiveFloat = 5 * MINUTE,
+ redis_settings: RedisSettings,
+ rabbit_settings: RabbitSettings,
+ lrt_namespace: LRTNamespace,
+ stale_task_check_interval: datetime.timedelta = DEFAULT_STALE_TASK_CHECK_INTERVAL,
+ stale_task_detect_timeout: datetime.timedelta = DEFAULT_STALE_TASK_DETECT_TIMEOUT,
+ handler_check_decorator: Callable = _no_ops_decorator,
+ task_request_context_decorator: Callable = _no_task_context_decorator,
) -> None:
"""
- `router_prefix` APIs are mounted on `/...`, this
will change them to be mounted as `{router_prefix}/...`
- - `stale_task_check_interval_s` interval at which the
+ - `redis_settings` settings for Redis connection
+ - `rabbit_settings` settings for RabbitMQ connection
+ - `lrt_namespace` namespace for the long-running tasks
+ - `stale_task_check_interval` interval at which the
TaskManager checks for tasks which are no longer being
actively monitored by a client
- - `stale_task_detect_timeout_s` interval after which a
- task is considered stale
+ - `stale_task_detect_timeout` interval after which atask is considered stale
"""
async def on_cleanup_ctx(app: web.Application) -> AsyncGenerator[None, None]:
- # add components to state
- app[
- APP_LONG_RUNNING_TASKS_MANAGER_KEY
- ] = long_running_task_manager = TasksManager(
- stale_task_check_interval_s=stale_task_check_interval_s,
- stale_task_detect_timeout_s=stale_task_detect_timeout_s,
- )
+ register_custom_serialization(HTTPException, AiohttpHTTPExceptionSerializer)
# add error handlers
app.middlewares.append(base_long_running_error_handler)
+ # add components to state
+ app[APP_LONG_RUNNING_MANAGER_KEY] = long_running_manager = (
+ AiohttpLongRunningManager(
+ stale_task_check_interval=stale_task_check_interval,
+ stale_task_detect_timeout=stale_task_detect_timeout,
+ redis_settings=redis_settings,
+ rabbit_settings=rabbit_settings,
+ lrt_namespace=lrt_namespace,
+ )
+ )
+
+ await long_running_manager.setup()
+
yield
# cleanup
- await long_running_task_manager.close()
+ await long_running_manager.teardown()
# add routing (done at setup-time)
_wrap_and_add_routes(
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py
index e29fabc87fea..ed5675a457d4 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py
@@ -1,9 +1,11 @@
import asyncio
import logging
from collections.abc import AsyncGenerator
-from typing import Any
+from datetime import timedelta
+from typing import Any, Final
from aiohttp import ClientConnectionError, ClientSession
+from pydantic import PositiveFloat
from tenacity import TryAgain, retry
from tenacity.asyncio import AsyncRetrying
from tenacity.before_sleep import before_sleep_log
@@ -12,14 +14,21 @@
from tenacity.wait import wait_random_exponential
from yarl import URL
-from ...long_running_tasks._constants import DEFAULT_POLL_INTERVAL_S, HOUR
-from ...long_running_tasks._models import LRTask, RequestBody
+from ...long_running_tasks.constants import DEFAULT_POLL_INTERVAL_S
+from ...long_running_tasks.models import (
+ LRTask,
+ RequestBody,
+ TaskGet,
+ TaskId,
+ TaskProgress,
+ TaskStatus,
+)
from ...rest_responses import unwrap_envelope_if_required
from .. import status
-from .server import TaskGet, TaskId, TaskProgress, TaskStatus
_logger = logging.getLogger(__name__)
+_DEFAULT_CLIENT_TIMEOUT_S: Final[PositiveFloat] = timedelta(hours=1).total_seconds()
_DEFAULT_AIOHTTP_RETRY_POLICY: dict[str, Any] = {
"retry": retry_if_exception_type(ClientConnectionError),
@@ -43,7 +52,7 @@ async def _wait_for_completion(
session: ClientSession,
task_id: TaskId,
status_url: URL,
- client_timeout: int,
+ client_timeout: PositiveFloat,
) -> AsyncGenerator[TaskProgress, None]:
try:
async for attempt in AsyncRetrying(
@@ -92,7 +101,7 @@ async def long_running_task_request(
session: ClientSession,
url: URL,
json: RequestBody | None = None,
- client_timeout: int = 1 * HOUR,
+ client_timeout: PositiveFloat = _DEFAULT_CLIENT_TIMEOUT_S,
) -> AsyncGenerator[LRTask, None]:
"""Will use the passed `ClientSession` to call an oSparc long
running task `url` passing `json` as request body.
@@ -123,6 +132,3 @@ async def long_running_task_request(
if task:
await _abort_task(session, URL(task.abort_href))
raise
-
-
-__all__: tuple[str, ...] = ("LRTask",)
diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py
index 55d1295c1977..2c51e7cc43dc 100644
--- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py
+++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py
@@ -6,39 +6,21 @@
running task.
"""
-from ...long_running_tasks._errors import TaskAlreadyRunningError, TaskCancelledError
-from ...long_running_tasks._models import ProgressMessage, ProgressPercent
-from ...long_running_tasks._task import (
- TaskId,
- TaskProgress,
- TaskProtocol,
- TasksManager,
- TaskStatus,
-)
-from ._dependencies import (
- create_task_name_from_request,
- get_task_context,
- get_tasks_manager,
-)
-from ._routes import TaskGet
+from typing import Final
+
+from aiohttp import web
+
+from ._manager import get_long_running_manager
from ._server import setup, start_long_running_task
+APP_LONG_RUNNING_TASKS_KEY: Final = web.AppKey(
+ "APP_LONG_RUNNING_TASKS_KEY", dict[str, object]
+)
+
__all__: tuple[str, ...] = (
- "create_task_name_from_request",
- "get_task_context",
- "get_tasks_manager",
- "ProgressMessage",
- "ProgressPercent",
+ "get_long_running_manager",
"setup",
"start_long_running_task",
- "TaskAlreadyRunningError",
- "TaskCancelledError",
- "TaskId",
- "TaskGet",
- "TasksManager",
- "TaskProgress",
- "TaskProtocol",
- "TaskStatus",
)
# nopycln: file
diff --git a/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py b/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py
index 24d7328d351d..0723755c4ba6 100644
--- a/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py
+++ b/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py
@@ -1,11 +1,17 @@
import asyncio.events
import sys
import time
+from typing import Final
+from aiohttp import web
from pyinstrument import Profiler
from .incidents import LimitedOrderedStack, SlowCallback
+APP_SLOW_CALLBACKS_MONITOR_KEY: Final = web.AppKey(
+ "APP_SLOW_CALLBACKS_MONITOR_KEY", LimitedOrderedStack[SlowCallback]
+)
+
def enable(
slow_duration_secs: float, incidents: LimitedOrderedStack[SlowCallback]
diff --git a/packages/service-library/src/servicelib/aiohttp/monitoring.py b/packages/service-library/src/servicelib/aiohttp/monitoring.py
index 84472c7e2f34..b929860f2af1 100644
--- a/packages/service-library/src/servicelib/aiohttp/monitoring.py
+++ b/packages/service-library/src/servicelib/aiohttp/monitoring.py
@@ -1,6 +1,5 @@
"""Enables monitoring of some quantities needed for diagnostics"""
-import asyncio
import logging
from collections.abc import Awaitable, Callable
from time import perf_counter
@@ -26,9 +25,10 @@
)
from .typing_extension import Handler
-log = logging.getLogger(__name__)
+_logger = logging.getLogger(__name__)
_PROMETHEUS_METRICS: Final[str] = f"{__name__}.prometheus_metrics" # noqa: N816
+APP_MONITORING_NAMESPACE_KEY: Final = web.AppKey("APP_MONITORING_NAMESPACE_KEY", str)
def get_collector_registry(app: web.Application) -> CollectorRegistry:
@@ -60,17 +60,15 @@ def middleware_factory(
async def middleware_handler(request: web.Request, handler: Handler):
# See https://prometheus.io/docs/concepts/metric_types
- log_exception: BaseException | None = None
- resp: web.StreamResponse = web.HTTPInternalServerError(
- reason="Unexpected exception"
- )
+ response: web.StreamResponse = web.HTTPInternalServerError()
+
canonical_endpoint = request.path
if request.match_info.route.resource:
canonical_endpoint = request.match_info.route.resource.canonical
start_time = perf_counter()
try:
if enter_middleware_cb:
- with log_catch(logger=log, reraise=False):
+ with log_catch(logger=_logger, reraise=False):
await enter_middleware_cb(request)
metrics = request.app[_PROMETHEUS_METRICS]
@@ -86,29 +84,19 @@ async def middleware_handler(request: web.Request, handler: Handler):
endpoint=canonical_endpoint,
user_agent=user_agent,
):
- resp = await handler(request)
+ response = await handler(request)
assert isinstance( # nosec
- resp, web.StreamResponse
+ response, web.StreamResponse
), "Forgot envelope middleware?"
except web.HTTPServerError as exc:
- resp = exc
- log_exception = exc
- raise resp from exc
+ response = exc
+ raise
+
except web.HTTPException as exc:
- resp = exc
- log_exception = None
- raise resp from exc
- except asyncio.CancelledError as exc:
- resp = web.HTTPInternalServerError(text=f"{exc}")
- log_exception = exc
- raise resp from exc
- except Exception as exc: # pylint: disable=broad-except
- resp = web.HTTPInternalServerError(text=f"{exc}")
- resp.__cause__ = exc
- log_exception = exc
- raise resp from exc
+ response = exc
+ raise
finally:
response_latency_seconds = perf_counter() - start_time
@@ -118,29 +106,15 @@ async def middleware_handler(request: web.Request, handler: Handler):
method=request.method,
endpoint=canonical_endpoint,
user_agent=user_agent,
- http_status=resp.status,
+ http_status=response.status,
response_latency_seconds=response_latency_seconds,
)
if exit_middleware_cb:
- with log_catch(logger=log, reraise=False):
- await exit_middleware_cb(request, resp)
-
- if log_exception:
- log.error(
- 'Unexpected server error "%s" from access: %s "%s %s" done '
- "in %3.2f secs. Responding with status %s",
- type(log_exception),
- request.remote,
- request.method,
- request.path,
- response_latency_seconds,
- resp.status,
- exc_info=log_exception,
- stack_info=True,
- )
-
- return resp
+ with log_catch(logger=_logger, reraise=False):
+ await exit_middleware_cb(request, response)
+
+ return response
setattr( # noqa: B010
middleware_handler, "__middleware_name__", f"{__name__}.monitor_{app_name}"
diff --git a/packages/service-library/src/servicelib/aiohttp/observer.py b/packages/service-library/src/servicelib/aiohttp/observer.py
index e0dfd6a579e1..7ff3ca4826e4 100644
--- a/packages/service-library/src/servicelib/aiohttp/observer.py
+++ b/packages/service-library/src/servicelib/aiohttp/observer.py
@@ -6,21 +6,26 @@
import logging
from collections import defaultdict
from collections.abc import Callable
+from typing import Final
from aiohttp import web
+from ..aiohttp.application_setup import ensure_single_setup
from ..utils import logged_gather
log = logging.getLogger(__name__)
_APP_OBSERVER_EVENTS_REGISTRY_KEY = "{__name__}.event_registry"
+APP_FIRE_AND_FORGET_TASKS_KEY: Final = web.AppKey(
+ "APP_FIRE_AND_FORGET_TASKS_KEY", set[object]
+)
-class ObserverRegistryNotFoundError(RuntimeError):
- ...
+class ObserverRegistryNotFoundError(RuntimeError): ...
+@ensure_single_setup(__name__, logger=log)
def setup_observer_registry(app: web.Application):
# only once
app.setdefault(_APP_OBSERVER_EVENTS_REGISTRY_KEY, defaultdict(list))
diff --git a/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py b/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py
index eab7d1fc5980..07d3c7127297 100644
--- a/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py
+++ b/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py
@@ -1,9 +1,6 @@
from aiohttp.web import HTTPInternalServerError, Request, StreamResponse, middleware
-from servicelib.mimetype_constants import (
- MIMETYPE_APPLICATION_JSON,
- MIMETYPE_APPLICATION_ND_JSON,
-)
+from ..mimetype_constants import MIMETYPE_APPLICATION_JSON, MIMETYPE_APPLICATION_ND_JSON
from ..utils_profiling_middleware import _is_profiling, _profiler, append_profile
@@ -13,7 +10,7 @@ async def profiling_middleware(request: Request, handler):
try:
if _profiler.is_running or (_profiler.last_session is not None):
raise HTTPInternalServerError(
- reason="Profiler is already running. Only a single request can be profiled at any given time.",
+ text="Profiler is already running. Only a single request can be profiled at any given time.",
headers={},
)
_profiler.reset()
@@ -24,7 +21,7 @@ async def profiling_middleware(request: Request, handler):
if response.content_type != MIMETYPE_APPLICATION_JSON:
raise HTTPInternalServerError(
- reason=f"Profiling middleware is not compatible with {response.content_type=}",
+ text=f"Profiling middleware is not compatible with {response.content_type=}",
headers={},
)
diff --git a/packages/service-library/src/servicelib/request_keys.py b/packages/service-library/src/servicelib/aiohttp/request_keys.py
similarity index 62%
rename from packages/service-library/src/servicelib/request_keys.py
rename to packages/service-library/src/servicelib/aiohttp/request_keys.py
index 8322e812557f..3d288b6960ec 100644
--- a/packages/service-library/src/servicelib/request_keys.py
+++ b/packages/service-library/src/servicelib/aiohttp/request_keys.py
@@ -1,7 +1,5 @@
-""" Storage keys in requests
+"""Storage keys in requests"""
-"""
from typing import Final
-# RQT=request
RQT_USERID_KEY: Final[str] = f"{__name__}.userid"
diff --git a/packages/service-library/src/servicelib/aiohttp/requests_validation.py b/packages/service-library/src/servicelib/aiohttp/requests_validation.py
index d555e535fe74..d5717fbdd4d6 100644
--- a/packages/service-library/src/servicelib/aiohttp/requests_validation.py
+++ b/packages/service-library/src/servicelib/aiohttp/requests_validation.py
@@ -10,10 +10,11 @@
import json.decoder
from collections.abc import Iterator
from contextlib import contextmanager
-from typing import TypeVar
+from typing import Final, TypeVar
from aiohttp import web
-from common_library.json_serialization import json_dumps
+from common_library.user_messages import user_message
+from models_library.rest_error import EnvelopedError
from pydantic import BaseModel, TypeAdapter, ValidationError
from ..mimetype_constants import MIMETYPE_APPLICATION_JSON
@@ -22,17 +23,20 @@
ModelClass = TypeVar("ModelClass", bound=BaseModel)
ModelOrListOrDictType = TypeVar("ModelOrListOrDictType", bound=BaseModel | list | dict)
+APP_JSON_SCHEMA_SPECS_KEY: Final = web.AppKey(
+ "APP_JSON_SCHEMA_SPECS_KEY", dict[str, object]
+)
+
@contextmanager
def handle_validation_as_http_error(
- *, error_msg_template: str, resource_name: str, use_error_v1: bool
+ *, error_msg_template: str, resource_name: str
) -> Iterator[None]:
"""Context manager to handle ValidationError and reraise them as HTTPUnprocessableEntity error
Arguments:
error_msg_template -- _description_
resource_name --
- use_error_v1 -- If True, it uses new error response
Raises:
web.HTTPUnprocessableEntity: (422) raised from a ValidationError
@@ -43,53 +47,40 @@ def handle_validation_as_http_error(
yield
except ValidationError as err:
- details = [
+ # SEE https://github.com/ITISFoundation/osparc-simcore/issues/443
+ _details = [
{
- "loc": ".".join(map(str, e["loc"])),
+ "loc": ".".join(map(str, e["loc"])), # e.g. "body.name"
"msg": e["msg"],
"type": e["type"],
}
for e in err.errors()
]
- reason_msg = error_msg_template.format(
- failed=", ".join(d["loc"] for d in details)
- )
-
- if use_error_v1:
- # NOTE: keeps backwards compatibility until ligher error response is implemented in the entire API
- # Implements servicelib.aiohttp.rest_responses.ErrorItemType
- errors = [
- {
- "code": e["type"],
- "message": e["msg"],
- "resource": resource_name,
- "field": e["loc"],
- }
- for e in details
- ]
- error_str = json_dumps(
- {
- "error": {
- "status": status.HTTP_422_UNPROCESSABLE_ENTITY,
- "errors": errors,
- }
- }
- )
- else:
- # NEW proposed error for https://github.com/ITISFoundation/osparc-simcore/issues/443
- error_str = json_dumps(
- {
- "error": {
- "msg": reason_msg,
- "resource": resource_name, # optional
- "details": details, # optional
- }
+
+ errors_details = [
+ {
+ "code": e["type"],
+ "message": e["msg"],
+ "resource": resource_name,
+ "field": e["loc"],
+ }
+ for e in _details
+ ]
+
+ error_json_str = EnvelopedError.model_validate(
+ {
+ "error": {
+ "message": error_msg_template.format(
+ failed=", ".join(e["field"] for e in errors_details)
+ ),
+ "status": status.HTTP_422_UNPROCESSABLE_ENTITY,
+ "errors": errors_details,
}
- )
+ }
+ ).model_dump_json(exclude_unset=True, exclude_none=True)
raise web.HTTPUnprocessableEntity( # 422
- reason=reason_msg,
- text=error_str,
+ text=error_json_str,
content_type=MIMETYPE_APPLICATION_JSON,
) from err
@@ -105,15 +96,10 @@ def handle_validation_as_http_error(
def parse_request_path_parameters_as(
parameters_schema_cls: type[ModelClass],
request: web.Request,
- *,
- use_enveloped_error_v1: bool = True,
) -> ModelClass:
"""Parses path parameters from 'request' and validates against 'parameters_schema'
- Keyword Arguments:
- use_enveloped_error_v1 -- new enveloped error model (default: {True})
-
Raises:
web.HTTPUnprocessableEntity: (422) if validation of parameters fail
@@ -122,9 +108,10 @@ def parse_request_path_parameters_as(
"""
with handle_validation_as_http_error(
- error_msg_template="Invalid parameter/s '{failed}' in request path",
+ error_msg_template=user_message(
+ "Invalid parameter/s '{failed}' in request path"
+ ),
resource_name=request.rel_url.path,
- use_error_v1=use_enveloped_error_v1,
):
data = dict(request.match_info)
return parameters_schema_cls.model_validate(data)
@@ -133,15 +120,10 @@ def parse_request_path_parameters_as(
def parse_request_query_parameters_as(
parameters_schema_cls: type[ModelClass],
request: web.Request,
- *,
- use_enveloped_error_v1: bool = True,
) -> ModelClass:
"""Parses query parameters from 'request' and validates against 'parameters_schema'
- Keyword Arguments:
- use_enveloped_error_v1 -- new enveloped error model (default: {True})
-
Raises:
web.HTTPUnprocessableEntity: (422) if validation of parameters fail
@@ -150,9 +132,10 @@ def parse_request_query_parameters_as(
"""
with handle_validation_as_http_error(
- error_msg_template="Invalid parameter/s '{failed}' in request query",
+ error_msg_template=user_message(
+ "Invalid parameter/s '{failed}' in request query"
+ ),
resource_name=request.rel_url.path,
- use_error_v1=use_enveloped_error_v1,
):
# NOTE: Currently, this does not take into consideration cases where there are multiple
# query parameters with the same key. However, we are not using such cases anywhere at the moment.
@@ -167,13 +150,12 @@ def parse_request_query_parameters_as(
def parse_request_headers_as(
parameters_schema_cls: type[ModelClass],
request: web.Request,
- *,
- use_enveloped_error_v1: bool = True,
) -> ModelClass:
with handle_validation_as_http_error(
- error_msg_template="Invalid parameter/s '{failed}' in request headers",
+ error_msg_template=user_message(
+ "Invalid parameter/s '{failed}' in request headers"
+ ),
resource_name=request.rel_url.path,
- use_error_v1=use_enveloped_error_v1,
):
data = dict(request.headers)
return parameters_schema_cls.model_validate(data)
@@ -182,8 +164,6 @@ def parse_request_headers_as(
async def parse_request_body_as(
model_schema_cls: type[ModelOrListOrDictType],
request: web.Request,
- *,
- use_enveloped_error_v1: bool = True,
) -> ModelOrListOrDictType:
"""Parses and validates request body against schema
@@ -198,9 +178,8 @@ async def parse_request_body_as(
Validated model of request body
"""
with handle_validation_as_http_error(
- error_msg_template="Invalid field/s '{failed}' in request body",
+ error_msg_template=user_message("Invalid field/s '{failed}' in request body"),
resource_name=request.rel_url.path,
- use_error_v1=use_enveloped_error_v1,
):
if not request.can_read_body:
# requests w/o body e.g. when model-schema is fully optional
diff --git a/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py b/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py
index 064355fdbd2e..0c8a6da57cfe 100644
--- a/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py
+++ b/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py
@@ -5,32 +5,38 @@
import logging
from collections.abc import Awaitable, Callable
-from typing import Any
+from typing import Any, Final
from aiohttp import web
+from aiohttp.web_exceptions import HTTPError
from aiohttp.web_request import Request
from aiohttp.web_response import StreamResponse
-from common_library.error_codes import create_error_code
+from common_library.error_codes import ErrorCodeStr, create_error_code
from common_library.json_serialization import json_dumps, json_loads
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
+from common_library.user_messages import user_message
+from models_library.basic_types import IDStr
from models_library.rest_error import ErrorGet, ErrorItemType, LogMessageType
-from ..logging_errors import create_troubleshotting_log_kwargs
from ..mimetype_constants import MIMETYPE_APPLICATION_JSON
-from ..rest_responses import is_enveloped_from_map, is_enveloped_from_text
-from ..utils import is_production_environ
+from ..rest_constants import RESPONSE_MODEL_POLICY
+from ..rest_responses import is_enveloped_from_text
+from ..status_codes_utils import get_code_description, is_5xx_server_error
+from . import status
from .rest_responses import (
create_data_response,
create_http_error,
safe_status_message,
wrap_as_envelope,
)
-from .rest_utils import EnvelopeFactory
from .typing_extension import Handler, Middleware
+from .web_exceptions_extension import get_http_error_class_or_none
DEFAULT_API_VERSION = "v0"
-_FMSG_INTERNAL_ERROR_USER_FRIENDLY = (
+_FMSG_INTERNAL_ERROR_USER_FRIENDLY = user_message(
"We apologize for the inconvenience. "
- "The issue has been recorded, please report it if it persists."
+ "The issue has been recorded, please report it if it persists.",
+ _version=1,
)
@@ -42,110 +48,198 @@ def is_api_request(request: web.Request, api_version: str) -> bool:
return bool(request.path.startswith(base_path))
-def error_middleware_factory( # noqa: C901
- api_version: str,
-) -> Middleware:
- _is_prod: bool = is_production_environ()
-
- def _process_and_raise_unexpected_error(request: web.BaseRequest, err: Exception):
- error_code = create_error_code(err)
- error_context: dict[str, Any] = {
- "request.remote": f"{request.remote}",
- "request.method": f"{request.method}",
- "request.path": f"{request.path}",
- }
-
- user_error_msg = _FMSG_INTERNAL_ERROR_USER_FRIENDLY
- http_error = create_http_error(
- err,
+def _create_error_context(
+ request: web.BaseRequest, exception: Exception
+) -> tuple[ErrorCodeStr, dict[str, Any]]:
+ """Create error code and context for logging purposes.
+
+ Returns:
+ Tuple of (error_code, error_context)
+ """
+ error_code = create_error_code(exception)
+ error_context: dict[str, Any] = {
+ "request.remote": f"{request.remote}",
+ "request.method": f"{request.method}",
+ "request.path": f"{request.path}",
+ }
+ return error_code, error_context
+
+
+def _log_5xx_server_error(
+ request: web.BaseRequest, exception: Exception, user_error_msg: str
+) -> ErrorCodeStr:
+ """Log 5XX server errors with error code and context."""
+ error_code, error_context = _create_error_context(request, exception)
+
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
user_error_msg,
- web.HTTPInternalServerError,
- skip_internal_error_details=_is_prod,
+ error=exception,
+ error_context=error_context,
error_code=error_code,
)
- _logger.exception(
- **create_troubleshotting_log_kwargs(
- user_error_msg,
- error=err,
- error_context=error_context,
- error_code=error_code,
+ )
+ return error_code
+
+
+def _handle_unexpected_exception_as_500(
+ request: web.BaseRequest, exception: Exception
+) -> web.HTTPInternalServerError:
+ """Process unexpected exceptions and return them as HTTP errors with proper formatting.
+
+ IMPORTANT: this function cannot throw exceptions, as it is called
+ """
+ error_code, error_context = _create_error_context(request, exception)
+ user_error_msg = _FMSG_INTERNAL_ERROR_USER_FRIENDLY
+
+ error_context["http_error"] = http_error = create_http_error(
+ exception,
+ user_error_msg,
+ web.HTTPInternalServerError,
+ error_code=error_code,
+ )
+
+ _log_5xx_server_error(request, exception, user_error_msg)
+
+ return http_error
+
+
+def handle_aiohttp_web_http_error(
+ request: web.BaseRequest, exception: web.HTTPError
+) -> web.HTTPError:
+ """Handle standard HTTP errors by ensuring they're properly formatted.
+
+ NOTE: this needs further refactoring to avoid code duplication
+ """
+ assert request # nosec
+ assert not exception.empty_body, "HTTPError should not have an empty body" # nosec
+
+ exception.content_type = MIMETYPE_APPLICATION_JSON
+ if exception.reason:
+ exception.set_status(
+ exception.status, reason=safe_status_message(message=exception.reason)
+ )
+
+ if not exception.text or not is_enveloped_from_text(exception.text):
+ # NOTE: aiohttp.HTTPException creates `text = f"{self.status}: {self.reason}"`
+ user_error_msg = exception.text or "Unexpected error"
+
+ error_code: IDStr | None = None
+ if is_5xx_server_error(exception.status):
+ error_code = IDStr(
+ _log_5xx_server_error(request, exception, user_error_msg)
+ )
+
+ error_model = ErrorGet(
+ errors=[
+ ErrorItemType(
+ code=exception.__class__.__name__,
+ message=user_error_msg,
+ resource=None,
+ field=None,
+ ),
+ ],
+ status=exception.status,
+ logs=[
+ LogMessageType(message=user_error_msg, level="ERROR"),
+ ],
+ message=user_error_msg,
+ support_id=error_code,
+ )
+ exception.text = json_dumps(
+ wrap_as_envelope(
+ error=error_model.model_dump(mode="json", **RESPONSE_MODEL_POLICY)
)
)
- raise http_error
+ return exception
+
+
+def _handle_aiohttp_web_http_successful(
+ request: web.Request, exception: web.HTTPSuccessful
+) -> web.HTTPSuccessful:
+ """Handle successful HTTP responses, ensuring they're properly enveloped."""
+ assert request # nosec
+
+ exception.content_type = MIMETYPE_APPLICATION_JSON
+ if exception.reason:
+ exception.set_status(
+ exception.status, reason=safe_status_message(message=exception.reason)
+ )
+
+ if exception.text and not is_enveloped_from_text(exception.text):
+ # Ensures that the response is enveloped
+ data = json_loads(exception.text)
+ exception.text = json_dumps({"data": data})
+
+ return exception
+
+
+def _handle_exception_as_http_error(
+ request: web.Request,
+ exception: NotImplementedError | TimeoutError,
+ status_code: int,
+) -> HTTPError:
+ """
+ Generic handler for exceptions that map to specific HTTP status codes.
+ Converts the status code to the appropriate HTTP error class and creates a response.
+ """
+ assert request # nosec
+
+ http_error_cls = get_http_error_class_or_none(status_code)
+ if http_error_cls is None:
+ msg = (
+ f"No HTTP error class found for status code {status_code}, falling back to 500",
+ )
+ raise ValueError(msg)
+
+ user_error_msg = get_code_description(status_code)
+
+ if is_5xx_server_error(status_code):
+ _log_5xx_server_error(request, exception, user_error_msg)
+
+ return create_http_error(exception, user_error_msg, http_error_cls)
+
+
+def error_middleware_factory(api_version: str) -> Middleware:
@web.middleware
- async def _middleware_handler(request: web.Request, handler: Handler): # noqa: C901
+ async def _middleware_handler(request: web.Request, handler: Handler):
"""
Ensure all error raised are properly enveloped and json responses
"""
if not is_api_request(request, api_version):
return await handler(request)
- # FIXME: review when to send info to client and when not!
try:
- return await handler(request)
+ try:
+ result = await handler(request)
- except web.HTTPError as err:
-
- err.content_type = MIMETYPE_APPLICATION_JSON
- if err.reason:
- err.set_status(err.status, safe_status_message(message=err.reason))
-
- if not err.text or not is_enveloped_from_text(err.text):
- error_message = err.text or err.reason or "Unexpected error"
- error_model = ErrorGet(
- errors=[
- ErrorItemType.from_error(err),
- ],
- status=err.status,
- logs=[
- LogMessageType(message=error_message, level="ERROR"),
- ],
- message=error_message,
+ except web.HTTPError as exc: # 4XX and 5XX raised as exceptions
+ result = handle_aiohttp_web_http_error(request, exc)
+
+ except web.HTTPSuccessful as exc: # 2XX rased as exceptions
+ result = _handle_aiohttp_web_http_successful(request, exc)
+
+ except web.HTTPRedirection as exc: # 3XX raised as exceptions
+ result = exc
+
+ except NotImplementedError as exc:
+ result = _handle_exception_as_http_error(
+ request, exc, status.HTTP_501_NOT_IMPLEMENTED
)
- err.text = EnvelopeFactory(error=error_model).as_text()
-
- raise
-
- except web.HTTPSuccessful as err:
- err.content_type = MIMETYPE_APPLICATION_JSON
- if err.reason:
- err.set_status(err.status, safe_status_message(message=err.reason))
-
- if err.text:
- try:
- payload = json_loads(err.text)
- if not is_enveloped_from_map(payload):
- payload = wrap_as_envelope(data=payload)
- err.text = json_dumps(payload)
- except Exception as other_error: # pylint: disable=broad-except
- _process_and_raise_unexpected_error(request, other_error)
- raise
-
- except web.HTTPRedirection as err:
- _logger.debug("Redirected to %s", err)
- raise
-
- except NotImplementedError as err:
- http_error = create_http_error(
- err,
- f"{err}",
- web.HTTPNotImplemented,
- skip_internal_error_details=_is_prod,
- )
- raise http_error from err
-
- except TimeoutError as err:
- http_error = create_http_error(
- err,
- f"{err}",
- web.HTTPGatewayTimeout,
- skip_internal_error_details=_is_prod,
- )
- raise http_error from err
- except Exception as err: # pylint: disable=broad-except
- _process_and_raise_unexpected_error(request, err)
+ except TimeoutError as exc:
+ result = _handle_exception_as_http_error(
+ request, exc, status.HTTP_504_GATEWAY_TIMEOUT
+ )
+
+ except Exception as exc: # pylint: disable=broad-except
+ #
+ # Last resort for unexpected exceptions (including those raise by the exception handlers!)
+ #
+ result = _handle_unexpected_exception_as_500(request, exc)
+
+ return result
# adds identifier (mostly for debugging)
setattr( # noqa: B010
@@ -164,7 +258,6 @@ def envelope_middleware_factory(
api_version: str,
) -> Callable[..., Awaitable[StreamResponse]]:
# FIXME: This data conversion is very error-prone. Use decorators instead!
- _is_prod: bool = is_production_environ()
@web.middleware
async def _middleware_handler(
@@ -205,3 +298,8 @@ def append_rest_middlewares(
"""Helper that appends rest-middlewares in the correct order"""
app.middlewares.append(error_middleware_factory(api_version))
app.middlewares.append(envelope_middleware_factory(api_version))
+
+
+APP_JSONSCHEMA_SPECS_KEY: Final = web.AppKey(
+ "APP_JSONSCHEMA_SPECS_KEY", dict[str, object]
+)
diff --git a/packages/service-library/src/servicelib/aiohttp/rest_responses.py b/packages/service-library/src/servicelib/aiohttp/rest_responses.py
index 3986de59700c..7e3214dac2f2 100644
--- a/packages/service-library/src/servicelib/aiohttp/rest_responses.py
+++ b/packages/service-library/src/servicelib/aiohttp/rest_responses.py
@@ -1,4 +1,4 @@
-from typing import Any, Final, TypedDict
+from typing import Any, Final, TypedDict, TypeVar
from aiohttp import web
from aiohttp.web_exceptions import HTTPError
@@ -10,7 +10,7 @@
from ..mimetype_constants import MIMETYPE_APPLICATION_JSON
from ..rest_constants import RESPONSE_MODEL_POLICY
from ..rest_responses import is_enveloped
-from ..status_codes_utils import get_code_description, is_error
+from ..status_codes_utils import get_code_description, get_code_display_name, is_error
class EnvelopeDict(TypedDict):
@@ -69,32 +69,37 @@ def safe_status_message(
return flat_message[: max_length - 3] + "..."
+T_HTTPError = TypeVar("T_HTTPError", bound=HTTPError)
+
+
def create_http_error(
errors: list[Exception] | Exception,
error_message: str | None = None,
- http_error_cls: type[HTTPError] = web.HTTPInternalServerError,
+ http_error_cls: type[
+ T_HTTPError
+ ] = web.HTTPInternalServerError, # type: ignore[assignment]
*,
status_reason: str | None = None,
- skip_internal_error_details: bool = False,
error_code: ErrorCodeStr | None = None,
-) -> HTTPError:
+) -> T_HTTPError:
"""
- Response body conforms OAS schema model
- Can skip internal details when 500 status e.g. to avoid transmitting server
exceptions to the client in production
"""
- if not isinstance(errors, list):
- errors = [errors]
-
- is_internal_error = bool(http_error_cls == web.HTTPInternalServerError)
- status_reason = status_reason or get_code_description(http_error_cls.status_code)
+ status_reason = status_reason or get_code_display_name(http_error_cls.status_code)
error_message = error_message or get_code_description(http_error_cls.status_code)
assert len(status_reason) < MAX_STATUS_MESSAGE_LENGTH # nosec
- if is_internal_error and skip_internal_error_details:
- error = ErrorGet.model_validate(
+ # WARNING: do not refactor too much this function withouth considering how
+ # front-end handle errors. i.e. please sync with front-end developers before
+ # changing the workflows in this function
+
+ is_internal_error = bool(http_error_cls == web.HTTPInternalServerError)
+ if is_internal_error:
+ error_model = ErrorGet.model_validate(
{
"status": http_error_cls.status_code,
"message": error_message,
@@ -102,8 +107,11 @@ def create_http_error(
}
)
else:
+ if not isinstance(errors, list):
+ errors = [errors]
+
items = [ErrorItemType.from_error(err) for err in errors]
- error = ErrorGet.model_validate(
+ error_model = ErrorGet.model_validate(
{
"errors": items, # NOTE: deprecated!
"status": http_error_cls.status_code,
@@ -113,26 +121,31 @@ def create_http_error(
)
assert not http_error_cls.empty_body # nosec
+
payload = wrap_as_envelope(
- error=error.model_dump(mode="json", **RESPONSE_MODEL_POLICY)
+ error=error_model.model_dump(mode="json", **RESPONSE_MODEL_POLICY)
)
return http_error_cls(
reason=safe_status_message(status_reason),
- text=json_dumps(
- payload,
- ),
+ text=json_dumps(payload),
content_type=MIMETYPE_APPLICATION_JSON,
)
-def exception_to_response(exc: HTTPError) -> web.Response:
+def exception_to_response(exception: HTTPError) -> web.Response:
# Returning web.HTTPException is deprecated so here we have a converter to a response
# so it can be used as
# SEE https://github.com/aio-libs/aiohttp/issues/2415
+
+ if exception.reason:
+ reason = safe_status_message(exception.reason)
+ else:
+ reason = get_code_description(exception.status)
+
return web.Response(
- status=exc.status,
- headers=exc.headers,
- reason=exc.reason,
- text=exc.text,
+ status=exception.status,
+ headers=exception.headers,
+ reason=reason,
+ text=exception.text,
)
diff --git a/packages/service-library/src/servicelib/aiohttp/status.py b/packages/service-library/src/servicelib/aiohttp/status.py
index 2a38913adcc6..695ddb0c6d56 100644
--- a/packages/service-library/src/servicelib/aiohttp/status.py
+++ b/packages/service-library/src/servicelib/aiohttp/status.py
@@ -17,6 +17,10 @@
from __future__ import annotations
+from typing import Final
+
+from aiohttp import web
+
__all__ = (
"HTTP_100_CONTINUE",
"HTTP_101_SWITCHING_PROTOCOLS",
@@ -146,3 +150,5 @@
HTTP_508_LOOP_DETECTED = 508
HTTP_510_NOT_EXTENDED = 510
HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511
+
+APP_HEALTH_KEY: Final = web.AppKey("APP_HEALTH_KEY", str)
diff --git a/packages/service-library/src/servicelib/aiohttp/tracing.py b/packages/service-library/src/servicelib/aiohttp/tracing.py
index 1e41aab20f09..140a24852e4c 100644
--- a/packages/service-library/src/servicelib/aiohttp/tracing.py
+++ b/packages/service-library/src/servicelib/aiohttp/tracing.py
@@ -2,6 +2,7 @@
import logging
from collections.abc import AsyncIterator, Callable
+from typing import Final
from aiohttp import web
from opentelemetry import trace
@@ -15,12 +16,14 @@
middleware as aiohttp_server_opentelemetry_middleware, # pylint:disable=no-name-in-module
)
from opentelemetry.sdk.resources import Resource
-from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace import SpanProcessor, TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
-from servicelib.logging_utils import log_context
from settings_library.tracing import TracingSettings
from yarl import URL
+from ..logging_utils import log_context
+from ..tracing import get_trace_id_header
+
_logger = logging.getLogger(__name__)
try:
from opentelemetry.instrumentation.botocore import ( # type: ignore[import-not-found]
@@ -36,6 +39,13 @@
HAS_AIOPG = True
except ImportError:
HAS_AIOPG = False
+try:
+ from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor
+
+ HAS_ASYNCPG = True
+except ImportError:
+ HAS_ASYNCPG = False
+
try:
from opentelemetry.instrumentation.requests import RequestsInstrumentor
@@ -50,11 +60,24 @@
except ImportError:
HAS_AIO_PIKA = False
+APP_OPENTELEMETRY_INSTRUMENTOR_KEY: Final = web.AppKey(
+ "APP_OPENTELEMETRY_INSTRUMENTOR_KEY", dict[str, object]
+)
+
+
+def _create_span_processor(tracing_destination: str) -> SpanProcessor:
+ otlp_exporter = OTLPSpanExporterHTTP(
+ endpoint=tracing_destination,
+ )
+ return BatchSpanProcessor(otlp_exporter)
+
def _startup(
+ *,
app: web.Application,
tracing_settings: TracingSettings,
service_name: str,
+ add_response_trace_id_header: bool = False,
) -> None:
"""
Sets up this service for a distributed tracing system (opentelemetry)
@@ -90,12 +113,8 @@ def _startup(
tracing_destination,
)
- otlp_exporter = OTLPSpanExporterHTTP(
- endpoint=tracing_destination,
- )
-
# Add the span processor to the tracer provider
- tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter)) # type: ignore[attr-defined] # https://github.com/open-telemetry/opentelemetry-python/issues/3713
+ tracer_provider.add_span_processor(_create_span_processor(tracing_destination)) # type: ignore[attr-defined] # https://github.com/open-telemetry/opentelemetry-python/issues/3713
# Instrument aiohttp server
# Explanation for custom middleware call DK 10/2024:
# OpenTelemetry Aiohttp autoinstrumentation is meant to be used by only calling `AioHttpServerInstrumentor().instrument()`
@@ -106,6 +125,8 @@ def _startup(
#
# Since the code that is provided (monkeypatched) in the __init__ that the opentelemetry-autoinstrumentation-library provides is only 4 lines,
# just adding a middleware, we are free to simply execute this "missed call" [since we can't call the monkeypatch'ed __init__()] in this following line:
+ if add_response_trace_id_header:
+ app.middlewares.insert(0, response_trace_id_header_middleware)
app.middlewares.insert(0, aiohttp_server_opentelemetry_middleware)
# Code of the aiohttp server instrumentation: github.com/open-telemetry/opentelemetry-python-contrib/blob/eccb05c808a7d797ef5b6ecefed3590664426fbf/instrumentation/opentelemetry-instrumentation-aiohttp-server/src/opentelemetry/instrumentation/aiohttp_server/__init__.py#L246
# For reference, the above statement was written for:
@@ -122,6 +143,13 @@ def _startup(
msg="Attempting to add aio-pg opentelemetry autoinstrumentation...",
):
AiopgInstrumentor().instrument()
+ if HAS_ASYNCPG:
+ with log_context(
+ _logger,
+ logging.INFO,
+ msg="Attempting to add asyncpg opentelemetry autoinstrumentation...",
+ ):
+ AsyncPGInstrumentor().instrument()
if HAS_BOTOCORE:
with log_context(
_logger,
@@ -146,6 +174,21 @@ def _startup(
AioPikaInstrumentor().instrument()
+@web.middleware
+async def response_trace_id_header_middleware(request: web.Request, handler):
+ headers = get_trace_id_header()
+
+ try:
+ response = await handler(request)
+ except web.HTTPException as exc:
+ if headers:
+ exc.headers.update(headers)
+ raise
+ if headers:
+ response.headers.update(headers)
+ return response
+
+
def _shutdown() -> None:
"""Uninstruments all opentelemetry instrumentors that were instrumented."""
try:
@@ -157,6 +200,11 @@ def _shutdown() -> None:
AiopgInstrumentor().uninstrument()
except Exception: # pylint:disable=broad-exception-caught
_logger.exception("Failed to uninstrument AiopgInstrumentor")
+ if HAS_ASYNCPG:
+ try:
+ AsyncPGInstrumentor().uninstrument()
+ except Exception: # pylint:disable=broad-exception-caught
+ _logger.exception("Failed to uninstrument AsyncPGInstrumentor")
if HAS_BOTOCORE:
try:
BotocoreInstrumentor().uninstrument()
@@ -175,9 +223,18 @@ def _shutdown() -> None:
def get_tracing_lifespan(
- app: web.Application, tracing_settings: TracingSettings, service_name: str
+ *,
+ app: web.Application,
+ tracing_settings: TracingSettings,
+ service_name: str,
+ add_response_trace_id_header: bool = False,
) -> Callable[[web.Application], AsyncIterator]:
- _startup(app=app, tracing_settings=tracing_settings, service_name=service_name)
+ _startup(
+ app=app,
+ tracing_settings=tracing_settings,
+ service_name=service_name,
+ add_response_trace_id_header=add_response_trace_id_header,
+ )
async def tracing_lifespan(app: web.Application):
assert app # nosec
diff --git a/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py b/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py
index 1e642895f1dd..9fab723c1edb 100644
--- a/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py
+++ b/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py
@@ -11,10 +11,10 @@
import tqdm
from pydantic import NonNegativeInt
-from servicelib.logging_utils import log_catch
from tqdm.contrib.logging import tqdm_logging_redirect
from ..file_utils import shutil_move
+from ..logging_utils import log_catch
from ..progress_bar import ProgressBarData
from ._errors import (
CouldNotFindValueError,
diff --git a/packages/service-library/src/servicelib/async_utils.py b/packages/service-library/src/servicelib/async_utils.py
index c6466df0a708..fc84fda55acc 100644
--- a/packages/service-library/src/servicelib/async_utils.py
+++ b/packages/service-library/src/servicelib/async_utils.py
@@ -1,14 +1,13 @@
import asyncio
-import contextlib
-import datetime
import logging
from collections import deque
-from collections.abc import Awaitable, Callable, Coroutine
-from contextlib import suppress
+from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from functools import wraps
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar
+from common_library.async_tools import cancel_wait_task
+
from . import tracing
from .utils_profiling_middleware import dont_profile, is_profiling, profile_context
@@ -56,9 +55,7 @@ async def _safe_cancel(context: Context) -> None:
try:
await context.in_queue.put(None)
if context.task is not None:
- context.task.cancel()
- with suppress(asyncio.CancelledError):
- await context.task
+ await cancel_wait_task(context.task, max_delay=None)
except RuntimeError as e:
if "Event loop is closed" in f"{e}":
_logger.warning("event loop is closed and could not cancel %s", context)
@@ -212,40 +209,3 @@ async def worker(in_q: Queue[QueueElement], out_q: Queue) -> None:
return wrapper
return decorator
-
-
-def delayed_start(
- delay: datetime.timedelta,
-) -> Callable[
- [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]
-]:
- def _decorator(
- func: Callable[P, Coroutine[Any, Any, R]],
- ) -> Callable[P, Coroutine[Any, Any, R]]:
- @wraps(func)
- async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
- await asyncio.sleep(delay.total_seconds())
- return await func(*args, **kwargs)
-
- return _wrapper
-
- return _decorator
-
-
-async def cancel_wait_task(
- task: asyncio.Task,
- *,
- max_delay: float | None = None,
-) -> None:
- """Cancel a asyncio.Task and waits for it to finish.
-
- :param task: task to be canceled
- :param max_delay: duration (in seconds) to wait before giving
- up the cancellation. If None it waits forever.
- :raises TimeoutError: raised if cannot cancel the task.
- """
-
- task.cancel()
- async with asyncio.timeout(max_delay):
- with contextlib.suppress(asyncio.CancelledError):
- await task
diff --git a/packages/service-library/src/servicelib/background_task.py b/packages/service-library/src/servicelib/background_task.py
index 508f34b99eec..a283c78f6060 100644
--- a/packages/service-library/src/servicelib/background_task.py
+++ b/packages/service-library/src/servicelib/background_task.py
@@ -6,10 +6,10 @@
from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine
from typing import Any, Final, ParamSpec, TypeVar
+from common_library.async_tools import cancel_wait_task, delayed_start
from tenacity import TryAgain, before_sleep_log, retry, retry_if_exception_type
from tenacity.wait import wait_fixed
-from .async_utils import cancel_wait_task, delayed_start
from .logging_utils import log_catch, log_context
_logger = logging.getLogger(__name__)
@@ -42,7 +42,7 @@ def periodic(
) -> Callable[
[Callable[P, Coroutine[Any, Any, None]]], Callable[P, Coroutine[Any, Any, None]]
]:
- """Calls the function periodically with a given interval.
+ """Calls the function periodically with a given interval or triggered by an early wake-up event.
Arguments:
interval -- the interval between calls
@@ -58,7 +58,7 @@ def periodic(
"""
def _decorator(
- func: Callable[P, Coroutine[Any, Any, None]],
+ async_fun: Callable[P, Coroutine[Any, Any, None]],
) -> Callable[P, Coroutine[Any, Any, None]]:
class _InternalTryAgain(TryAgain):
# Local exception to prevent reacting to similarTryAgain exceptions raised by the wrapped func
@@ -82,10 +82,10 @@ class _InternalTryAgain(TryAgain):
),
before_sleep=before_sleep_log(_logger, logging.DEBUG),
)
- @functools.wraps(func)
+ @functools.wraps(async_fun)
async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> None:
with log_catch(_logger, reraise=True):
- await func(*args, **kwargs)
+ await async_fun(*args, **kwargs)
raise _InternalTryAgain
return _wrapper
@@ -142,4 +142,4 @@ async def periodic_task(
if asyncio_task is not None:
# NOTE: this stopping is shielded to prevent the cancellation to propagate
# into the stopping procedure
- await asyncio.shield(cancel_wait_task(asyncio_task, max_delay=stop_timeout))
+ await cancel_wait_task(asyncio_task, max_delay=stop_timeout)
diff --git a/packages/service-library/src/servicelib/background_task_utils.py b/packages/service-library/src/servicelib/background_task_utils.py
index 8313f6424303..bd70241b183f 100644
--- a/packages/service-library/src/servicelib/background_task_utils.py
+++ b/packages/service-library/src/servicelib/background_task_utils.py
@@ -3,11 +3,10 @@
from collections.abc import Callable, Coroutine
from typing import Any, ParamSpec, TypeVar
-from servicelib.exception_utils import silence_exceptions
-from servicelib.redis._errors import CouldNotAcquireLockError
-
from .background_task import periodic
+from .exception_utils import suppress_exceptions
from .redis import RedisClientSDK, exclusive
+from .redis._errors import CouldNotAcquireLockError
P = ParamSpec("P")
R = TypeVar("R")
@@ -39,10 +38,11 @@ def _decorator(
coro: Callable[P, Coroutine[Any, Any, None]],
) -> Callable[P, Coroutine[Any, Any, None]]:
@periodic(interval=retry_after)
- @silence_exceptions(
+ @suppress_exceptions(
# Replicas will raise CouldNotAcquireLockError
# SEE https://github.com/ITISFoundation/osparc-simcore/issues/7574
- (CouldNotAcquireLockError,)
+ (CouldNotAcquireLockError,),
+ reason=f"Multiple instances of the periodic task `{coro.__module__}.{coro.__name__}` are running.",
)
@exclusive(
redis_client,
@@ -53,6 +53,8 @@ def _decorator(
async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> None:
return await coro(*args, **kwargs)
+ # Marks with an identifier (mostly to assert a function has been decorated with this decorator)
+ setattr(_wrapper, "__exclusive_periodic__", True) # noqa: B010
return _wrapper
return _decorator
diff --git a/packages/service-library/src/servicelib/celery/__init__.py b/packages/service-library/src/servicelib/celery/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/packages/service-library/src/servicelib/celery/app_server.py b/packages/service-library/src/servicelib/celery/app_server.py
new file mode 100644
index 000000000000..c11d4b46acd1
--- /dev/null
+++ b/packages/service-library/src/servicelib/celery/app_server.py
@@ -0,0 +1,44 @@
+import asyncio
+import threading
+from abc import ABC, abstractmethod
+from asyncio import AbstractEventLoop
+from typing import Generic, TypeVar
+
+from ..celery.task_manager import TaskManager
+
+T = TypeVar("T")
+
+
+class BaseAppServer(ABC, Generic[T]):
+ def __init__(self, app: T) -> None:
+ self._app: T = app
+ self._shutdown_event: asyncio.Event = asyncio.Event()
+
+ @property
+ def app(self) -> T:
+ return self._app
+
+ @property
+ def event_loop(self) -> AbstractEventLoop:
+ return self._event_loop
+
+ @event_loop.setter
+ def event_loop(self, loop: AbstractEventLoop) -> None:
+ self._event_loop = loop
+
+ @property
+ def shutdown_event(self) -> asyncio.Event:
+ return self._shutdown_event
+
+ @property
+ @abstractmethod
+ def task_manager(self) -> TaskManager:
+ raise NotImplementedError
+
+ @abstractmethod
+ async def run_until_shutdown(
+ self,
+ startup_completed_event: threading.Event,
+ ) -> None:
+ """Used to initialize the app server until shutdown event is set."""
+ raise NotImplementedError
diff --git a/packages/service-library/src/servicelib/celery/models.py b/packages/service-library/src/servicelib/celery/models.py
new file mode 100644
index 000000000000..2f37e9b70c5d
--- /dev/null
+++ b/packages/service-library/src/servicelib/celery/models.py
@@ -0,0 +1,236 @@
+import datetime
+from enum import StrEnum
+from typing import Annotated, Final, Literal, Protocol, Self, TypeAlias, TypeVar
+from uuid import UUID
+
+import orjson
+from common_library.json_serialization import json_dumps, json_loads
+from models_library.progress_bar import ProgressReport
+from pydantic import BaseModel, ConfigDict, Field, StringConstraints, model_validator
+from pydantic.config import JsonDict
+
+ModelType = TypeVar("ModelType", bound=BaseModel)
+
+TaskID: TypeAlias = str
+TaskName: TypeAlias = Annotated[
+ str, StringConstraints(strip_whitespace=True, min_length=1)
+]
+TaskUUID: TypeAlias = UUID
+_TASK_ID_KEY_DELIMITATOR: Final[str] = ":"
+_FORBIDDEN_KEYS = ("*", _TASK_ID_KEY_DELIMITATOR, "=")
+_FORBIDDEN_VALUES = (_TASK_ID_KEY_DELIMITATOR, "=")
+AllowedTypes = (
+ int | float | bool | str | None | list[str] | list[int] | list[float] | list[bool]
+)
+
+Wildcard: TypeAlias = Literal["*"]
+WILDCARD: Final[Wildcard] = "*"
+
+
+class OwnerMetadata(BaseModel):
+ """
+ Class for associating metadata with a celery task. The implementation is very flexible and allows the task owner to define their own metadata.
+ This could be metadata for validating if a user has access to a given task (e.g. user_id or product_name) or metadata for keeping track of how to handle a task,
+ e.g. which schema will the result of the task have.
+
+ The class exposes a filtering mechanism to list tasks using wildcards.
+
+ Example usage:
+ class StorageOwnerMetadata(OwnerMetadata):
+ user_id: int | Wildcard
+ product_name: int | Wildcard
+ owner = APP_NAME
+
+ Where APP_NAME is the name of the service. Listing tasks using the filter
+ `StorageOwnerMetadata(user_id=123, product_name=WILDCARD)` will return all tasks with
+ user_id 123, any product_name submitted from the service.
+
+ If the metadata schema is known, the class allows deserializing the metadata (recreate_as_model). I.e. one can recover the metadata from the task:
+ metadata -> task_uuid -> metadata
+
+ """
+
+ model_config = ConfigDict(extra="allow", frozen=True)
+ owner: Annotated[
+ str,
+ StringConstraints(min_length=1, pattern=r"^[a-z_-]+$"),
+ Field(
+ description='Identifies the service owning the task. Should be the "APP_NAME" of the service.'
+ ),
+ ]
+
+ @model_validator(mode="after")
+ def _check_valid_filters(self) -> Self:
+ for key, value in self.model_dump().items():
+ # forbidden keys
+ if any(x in key for x in _FORBIDDEN_KEYS):
+ raise ValueError(f"Invalid filter key: '{key}'")
+ # forbidden values
+ if any(x in f"{value}" for x in _FORBIDDEN_VALUES):
+ raise ValueError(f"Invalid filter value for key '{key}': '{value}'")
+
+ class _TypeValidationModel(BaseModel):
+ filters: dict[str, AllowedTypes]
+
+ _TypeValidationModel.model_validate({"filters": self.model_dump()})
+ return self
+
+ def model_dump_task_id(self, task_uuid: TaskUUID | Wildcard) -> TaskID:
+ data = self.model_dump(mode="json")
+ data.update({"task_uuid": f"{task_uuid}"})
+ return _TASK_ID_KEY_DELIMITATOR.join(
+ [f"{k}={json_dumps(v)}" for k, v in sorted(data.items())]
+ )
+
+ @classmethod
+ def model_validate_task_id(cls, task_id: TaskID) -> Self:
+ data = cls._deserialize_task_id(task_id)
+ data.pop("task_uuid", None)
+ return cls.model_validate(data)
+
+ @classmethod
+ def _deserialize_task_id(cls, task_id: TaskID) -> dict[str, AllowedTypes]:
+ key_value_pairs = [
+ item.split("=") for item in task_id.split(_TASK_ID_KEY_DELIMITATOR)
+ ]
+ try:
+ return {key: json_loads(value) for key, value in key_value_pairs}
+ except orjson.JSONDecodeError as err:
+ raise ValueError(f"Invalid task_id format: {task_id}") from err
+
+ @classmethod
+ def get_task_uuid(cls, task_id: TaskID) -> TaskUUID:
+ data = cls._deserialize_task_id(task_id)
+ try:
+ uuid_string = data["task_uuid"]
+ if not isinstance(uuid_string, str):
+ raise ValueError(f"Invalid task_id format: {task_id}")
+ return TaskUUID(uuid_string)
+ except ValueError as err:
+ raise ValueError(f"Invalid task_id format: {task_id}") from err
+
+
+class TaskState(StrEnum):
+ PENDING = "PENDING"
+ STARTED = "STARTED"
+ RETRY = "RETRY"
+ SUCCESS = "SUCCESS"
+ FAILURE = "FAILURE"
+
+
+TASK_DONE_STATES: Final[tuple[TaskState, ...]] = (
+ TaskState.SUCCESS,
+ TaskState.FAILURE,
+)
+
+
+class TasksQueue(StrEnum):
+ CPU_BOUND = "cpu_bound"
+ DEFAULT = "default"
+ API_WORKER_QUEUE = "api_worker_queue"
+
+
+class ExecutionMetadata(BaseModel):
+ name: TaskName
+ ephemeral: bool = True
+ queue: TasksQueue = TasksQueue.DEFAULT
+
+
+class Task(BaseModel):
+ uuid: TaskUUID
+ metadata: ExecutionMetadata
+
+ @staticmethod
+ def _update_json_schema_extra(schema: JsonDict) -> None:
+ schema.update(
+ {
+ "examples": [
+ {
+ "uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "metadata": {
+ "name": "task1",
+ "ephemeral": True,
+ "queue": "default",
+ },
+ },
+ {
+ "uuid": "223e4567-e89b-12d3-a456-426614174001",
+ "metadata": {
+ "name": "task2",
+ "ephemeral": False,
+ "queue": "cpu_bound",
+ },
+ },
+ {
+ "uuid": "323e4567-e89b-12d3-a456-426614174002",
+ "metadata": {
+ "name": "task3",
+ "ephemeral": True,
+ "queue": "default",
+ },
+ },
+ ]
+ }
+ )
+
+ model_config = ConfigDict(json_schema_extra=_update_json_schema_extra)
+
+
+class TaskInfoStore(Protocol):
+ async def create_task(
+ self,
+ task_id: TaskID,
+ execution_metadata: ExecutionMetadata,
+ expiry: datetime.timedelta,
+ ) -> None: ...
+
+ async def task_exists(self, task_id: TaskID) -> bool: ...
+
+ async def get_task_metadata(self, task_id: TaskID) -> ExecutionMetadata | None: ...
+
+ async def get_task_progress(self, task_id: TaskID) -> ProgressReport | None: ...
+
+ async def list_tasks(self, owner_metadata: OwnerMetadata) -> list[Task]: ...
+
+ async def remove_task(self, task_id: TaskID) -> None: ...
+
+ async def set_task_progress(
+ self, task_id: TaskID, report: ProgressReport
+ ) -> None: ...
+
+
+class TaskStatus(BaseModel):
+ task_uuid: TaskUUID
+ task_state: TaskState
+ progress_report: ProgressReport
+
+ @staticmethod
+ def _update_json_schema_extra(schema: JsonDict) -> None:
+
+ schema.update(
+ {
+ "examples": [
+ {
+ "task_uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "task_state": "SUCCESS",
+ "progress_report": {
+ "actual_value": 0.5,
+ "total": 1.0,
+ "attempts": 1,
+ "unit": "Byte",
+ "message": {
+ "description": "some description",
+ "current": 12.2,
+ "total": 123,
+ },
+ },
+ }
+ ]
+ }
+ )
+
+ model_config = ConfigDict(json_schema_extra=_update_json_schema_extra)
+
+ @property
+ def is_done(self) -> bool:
+ return self.task_state in TASK_DONE_STATES
diff --git a/packages/service-library/src/servicelib/celery/task_manager.py b/packages/service-library/src/servicelib/celery/task_manager.py
new file mode 100644
index 000000000000..78722dd66454
--- /dev/null
+++ b/packages/service-library/src/servicelib/celery/task_manager.py
@@ -0,0 +1,43 @@
+from typing import Any, Protocol, runtime_checkable
+
+from models_library.progress_bar import ProgressReport
+
+from ..celery.models import (
+ ExecutionMetadata,
+ OwnerMetadata,
+ Task,
+ TaskID,
+ TaskStatus,
+ TaskUUID,
+)
+
+
+@runtime_checkable
+class TaskManager(Protocol):
+ async def submit_task(
+ self,
+ execution_metadata: ExecutionMetadata,
+ *,
+ owner_metadata: OwnerMetadata,
+ **task_param
+ ) -> TaskUUID: ...
+
+ async def cancel_task(
+ self, owner_metadata: OwnerMetadata, task_uuid: TaskUUID
+ ) -> None: ...
+
+ async def task_exists(self, task_id: TaskID) -> bool: ...
+
+ async def get_task_result(
+ self, owner_metadata: OwnerMetadata, task_uuid: TaskUUID
+ ) -> Any: ...
+
+ async def get_task_status(
+ self, owner_metadata: OwnerMetadata, task_uuid: TaskUUID
+ ) -> TaskStatus: ...
+
+ async def list_tasks(self, owner_metadata: OwnerMetadata) -> list[Task]: ...
+
+ async def set_task_progress(
+ self, task_id: TaskID, report: ProgressReport
+ ) -> None: ...
diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py b/packages/service-library/src/servicelib/container_utils.py
similarity index 77%
rename from services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py
rename to packages/service-library/src/servicelib/container_utils.py
index 64b91bf938e2..03d468b4a977 100644
--- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py
+++ b/packages/service-library/src/servicelib/container_utils.py
@@ -1,19 +1,35 @@
import asyncio
import logging
from collections.abc import Sequence
-from typing import Any
+from typing import Any, Final
from aiodocker import Docker, DockerError
from aiodocker.execs import Exec
from aiodocker.stream import Stream
+from common_library.errors_classes import OsparcErrorMixin
from pydantic import NonNegativeFloat
-from starlette import status
-from ..core.errors import (
- ContainerExecCommandFailedError,
- ContainerExecContainerNotFoundError,
- ContainerExecTimeoutError,
-)
+
+class BaseContainerUtilsError(OsparcErrorMixin, Exception):
+ pass
+
+
+class ContainerExecContainerNotFoundError(BaseContainerUtilsError):
+ msg_template = "Container '{container_name}' was not found"
+
+
+class ContainerExecTimeoutError(BaseContainerUtilsError):
+ msg_template = "Timed out after {timeout} while executing: '{command}'"
+
+
+class ContainerExecCommandFailedError(BaseContainerUtilsError):
+ msg_template = (
+ "Command '{command}' exited with code '{exit_code}'"
+ "and output: '{command_result}'"
+ )
+
+
+_HTTP_404_NOT_FOUND: Final[int] = 404
_logger = logging.getLogger(__name__)
@@ -77,10 +93,10 @@ async def run_command_in_container(
_execute_command(container_name, command), timeout
)
except DockerError as e:
- if e.status == status.HTTP_404_NOT_FOUND:
+ if e.status == _HTTP_404_NOT_FOUND:
raise ContainerExecContainerNotFoundError(
container_name=container_name
) from e
raise
- except asyncio.TimeoutError as e:
+ except TimeoutError as e:
raise ContainerExecTimeoutError(timeout=timeout, command=command) from e
diff --git a/packages/service-library/src/servicelib/db_asyncpg_utils.py b/packages/service-library/src/servicelib/db_asyncpg_utils.py
index f9dfd27c2d8c..d72f77d95159 100644
--- a/packages/service-library/src/servicelib/db_asyncpg_utils.py
+++ b/packages/service-library/src/servicelib/db_asyncpg_utils.py
@@ -18,7 +18,7 @@
@retry(**PostgresRetryPolicyUponInitialization(_logger).kwargs)
async def create_async_engine_and_database_ready(
- settings: PostgresSettings,
+ settings: PostgresSettings, application_name: str
) -> AsyncEngine:
"""
- creates asyncio engine
@@ -30,17 +30,17 @@ async def create_async_engine_and_database_ready(
raise_if_migration_not_ready,
)
- server_settings = None
- if settings.POSTGRES_CLIENT_NAME:
- assert isinstance(settings.POSTGRES_CLIENT_NAME, str) # nosec
- server_settings = {
- "application_name": settings.POSTGRES_CLIENT_NAME,
- }
+ server_settings = {
+ "jit": "off",
+ "application_name": settings.client_name(
+ f"{application_name}", suffix="asyncpg"
+ ),
+ }
engine = create_async_engine(
settings.dsn_with_async_sqlalchemy,
- pool_size=settings.POSTGRES_MINSIZE,
- max_overflow=settings.POSTGRES_MAXSIZE - settings.POSTGRES_MINSIZE,
+ pool_size=settings.POSTGRES_MAX_POOLSIZE,
+ max_overflow=settings.POSTGRES_MAX_OVERFLOW,
connect_args={"server_settings": server_settings},
pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects
future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released
@@ -71,7 +71,7 @@ async def check_postgres_liveness(engine: AsyncEngine) -> LivenessResult:
@contextlib.asynccontextmanager
async def with_async_pg_engine(
- settings: PostgresSettings,
+ settings: PostgresSettings, *, application_name: str
) -> AsyncIterator[AsyncEngine]:
"""
Creates an asyncpg engine and ensures it is properly closed after use.
@@ -82,14 +82,16 @@ async def with_async_pg_engine(
logging.DEBUG,
f"connection to db {settings.dsn_with_async_sqlalchemy}",
):
- server_settings = None
- if settings.POSTGRES_CLIENT_NAME:
- assert isinstance(settings.POSTGRES_CLIENT_NAME, str)
+ server_settings = {
+ "application_name": settings.client_name(
+ application_name, suffix="asyncpg"
+ ),
+ }
engine = create_async_engine(
settings.dsn_with_async_sqlalchemy,
- pool_size=settings.POSTGRES_MINSIZE,
- max_overflow=settings.POSTGRES_MAXSIZE - settings.POSTGRES_MINSIZE,
+ pool_size=settings.POSTGRES_MAX_POOLSIZE,
+ max_overflow=settings.POSTGRES_MAX_OVERFLOW,
connect_args={"server_settings": server_settings},
pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects
future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released
diff --git a/packages/service-library/src/servicelib/deferred_tasks/__init__.py b/packages/service-library/src/servicelib/deferred_tasks/__init__.py
index dd57b0838103..be2491ffb6f5 100644
--- a/packages/service-library/src/servicelib/deferred_tasks/__init__.py
+++ b/packages/service-library/src/servicelib/deferred_tasks/__init__.py
@@ -22,14 +22,16 @@
- `cancel`: (called by the user) [optional]:
send a message to cancel the current task. A warning will be logged but no call to either
`on_result` or `on_finished_with_error` will occur.
+- `on_cancelled` (called by state `ManuallyCancelled`) [optional] {can be overwritten by the user}:
+ called after the cancellation is handled by the worker executing the `run`
## DeferredHandler lifecycle
```mermaid
stateDiagram-v2
- * --> Scheduled: via [start]
- ** --> ManuallyCancelled: via [cancel]
+ (1) --> Scheduled: via [start]
+ (2) --> ManuallyCancelled: via [cancel]
ManuallyCancelled --> Worker: attempts to cancel task in
@@ -41,9 +43,10 @@
ErrorResult --> FinishedWithError: gives up when out of retries or if cancelled
Worker --> DeferredResult: success
- DeferredResult --> °: calls [on_result]
- FinishedWithError --> °°: calls [on_finished_with_error]
- Worker --> °°°: task cancelled
+ DeferredResult --> (3): calls [on_result]
+ FinishedWithError --> (4): calls [on_finished_with_error]
+ Worker --> Removed*: task cancelled
+ Removed* --> (5): calls [on_cancelled]
```
### States
@@ -57,6 +60,7 @@
- `FinishedWIthError`: logs error, invokes `on_finished_with_error` and removes the schedule
- `DeferredResult`: invokes `on_result` and removes the schedule
- `ManuallyCancelled`: sends message to all instances to cancel. The instance handling the task will cancel the task and remove the schedule
+- `Removed*`: is a fake state that does not exist only used to convey the information that the cancellation event is triggered after removal
"""
from ._base_deferred_handler import (
diff --git a/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py b/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py
index 3c5110ef8f83..cf42eff26f47 100644
--- a/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py
+++ b/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py
@@ -45,6 +45,21 @@ async def get_retries(cls, context: DeferredContext) -> NonNegativeInt:
assert context # nosec
return 0
+ @classmethod
+ async def get_retry_delay(
+ cls,
+ context: DeferredContext,
+ remaining_attempts: NonNegativeInt,
+ total_attempts: NonNegativeInt,
+ ) -> timedelta:
+ """
+ returns: the delay between eatch retry attempt (default: 0s)
+ """
+ assert context # nosec
+ assert remaining_attempts # nosec
+ assert total_attempts # nosec
+ return timedelta(seconds=0)
+
@classmethod
@abstractmethod
async def get_timeout(cls, context: DeferredContext) -> timedelta:
@@ -84,6 +99,11 @@ async def on_finished_with_error(
NOTE: by design the default action is to do nothing
"""
+ @classmethod
+ @abstractmethod
+ async def on_cancelled(cls, context: DeferredContext) -> None:
+ """called after handling ``cancel`` request by the copy executing ``run``"""
+
@classmethod
async def cancel(cls, task_uid: TaskUID) -> None:
"""cancels a deferred"""
diff --git a/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py b/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py
index b49990a78341..c1b26a3d8478 100644
--- a/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py
+++ b/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py
@@ -2,18 +2,24 @@
import inspect
import logging
from collections.abc import Awaitable, Callable, Iterable
-from datetime import timedelta
+from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Final
import arrow
from faststream.exceptions import NackMessage, RejectMessage
-from faststream.rabbit import ExchangeType, RabbitBroker, RabbitExchange, RabbitRouter
+from faststream.rabbit import (
+ ExchangeType,
+ RabbitBroker,
+ RabbitExchange,
+ RabbitQueue,
+ RabbitRouter,
+)
from pydantic import NonNegativeInt
-from servicelib.logging_utils import log_catch, log_context
-from servicelib.redis import RedisClientSDK
from settings_library.rabbit import RabbitSettings
+from ..logging_utils import log_catch, log_context
+from ..redis import RedisClientSDK
from ._base_deferred_handler import (
BaseDeferredHandler,
DeferredContext,
@@ -112,6 +118,14 @@ def _raise_if_not_type(task_result: Any, expected_types: Iterable[type]) -> None
raise TypeError(msg)
+async def _wait_until_future_date(possible_future_date: datetime) -> None:
+ while True:
+ now = arrow.utcnow().datetime
+ if now >= possible_future_date:
+ return
+ await asyncio.sleep(1)
+
+
class DeferredManager: # pylint:disable=too-many-instance-attributes
def __init__(
self,
@@ -149,10 +163,14 @@ def __init__(
self._global_resources_prefix = f"{calling_module_name}"
self.common_exchange = RabbitExchange(
- f"{self._global_resources_prefix}_common", type=ExchangeType.DIRECT
+ f"{self._global_resources_prefix}_common",
+ durable=True,
+ type=ExchangeType.DIRECT,
)
self.cancellation_exchange = RabbitExchange(
- f"{self._global_resources_prefix}_cancellation", type=ExchangeType.FANOUT
+ f"{self._global_resources_prefix}_cancellation",
+ durable=True,
+ type=ExchangeType.FANOUT,
)
def patch_based_deferred_handlers(self) -> None:
@@ -243,8 +261,10 @@ def un_patch_base_deferred_handlers(cls) -> None:
subclass.is_present.original_is_present # type: ignore
)
- def _get_global_queue_name(self, queue_name: _FastStreamRabbitQueue) -> str:
- return f"{self._global_resources_prefix}_{queue_name}"
+ def _get_global_queue(self, queue_name: _FastStreamRabbitQueue) -> RabbitQueue:
+ return RabbitQueue(
+ f"{self._global_resources_prefix}_{queue_name}", durable=True
+ )
def __get_subclass(
self, class_unique_reference: ClassUniqueReference
@@ -259,7 +279,7 @@ async def __publish_to_queue(
) -> None:
await self.broker.publish(
task_uid,
- queue=self._get_global_queue_name(queue),
+ queue=self._get_global_queue(queue),
exchange=(
self.cancellation_exchange
if queue == _FastStreamRabbitQueue.MANUALLY_CANCELLED
@@ -285,18 +305,21 @@ async def __start(
subclass = self.__get_subclass(class_unique_reference)
deferred_context = self.__get_deferred_context(start_context)
+ retry_count = await subclass.get_retries(deferred_context)
task_schedule = TaskScheduleModel(
timeout=await subclass.get_timeout(deferred_context),
- execution_attempts=await subclass.get_retries(deferred_context) + 1,
+ total_attempts=retry_count,
+ execution_attempts=retry_count + 1,
class_unique_reference=class_unique_reference,
start_context=start_context,
state=TaskState.SCHEDULED,
)
+ await self._task_tracker.save(task_uid, task_schedule)
+
with log_catch(_logger, reraise=False):
await subclass.on_created(task_uid, deferred_context)
- await self._task_tracker.save(task_uid, task_schedule)
_logger.debug("Scheduled task '%s' with entry: %s", task_uid, task_schedule)
await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.SCHEDULED)
@@ -448,7 +471,29 @@ async def _fs_handle_error_result( # pylint:disable=method-hidden
task_schedule.result, TaskResultCancelledError
):
_logger.debug("Schedule retry attempt for task_uid '%s'", task_uid)
- # does not retry if task was cancelled
+
+ # resilenet wait before retrying
+ if task_schedule.wait_cancellation_until is None:
+ # save the new one
+ subclass = self.__get_subclass(task_schedule.class_unique_reference)
+ deferred_context = self.__get_deferred_context(
+ task_schedule.start_context
+ )
+ sleep_interval = await subclass.get_retry_delay(
+ context=deferred_context,
+ remaining_attempts=task_schedule.execution_attempts,
+ total_attempts=task_schedule.total_attempts,
+ )
+ task_schedule.wait_cancellation_until = (
+ arrow.utcnow().datetime + sleep_interval
+ )
+ await self._task_tracker.save(task_uid, task_schedule)
+
+ await _wait_until_future_date(task_schedule.wait_cancellation_until)
+ task_schedule.wait_cancellation_until = None
+ await self._task_tracker.save(task_uid, task_schedule)
+
+ # waiting is done can proceed with retry
task_schedule.state = TaskState.SUBMIT_TASK
await self._task_tracker.save(task_uid, task_schedule)
await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.SUBMIT_TASK)
@@ -558,6 +603,11 @@ async def _fs_handle_manually_cancelled( # pylint:disable=method-hidden
_logger.info("Found and cancelled run for '%s'", task_uid)
await self.__remove_task(task_uid, task_schedule)
+ subclass = self.__get_subclass(task_schedule.class_unique_reference)
+ deferred_context = self.__get_deferred_context(task_schedule.start_context)
+ with log_catch(_logger, reraise=False):
+ await subclass.on_cancelled(deferred_context)
+
async def __is_present(self, task_uid: TaskUID) -> bool:
task_schedule: TaskScheduleModel | None = await self._task_tracker.get(task_uid)
return task_schedule is not None
@@ -569,47 +619,43 @@ def _register_subscribers(self) -> None:
# pylint:disable=unexpected-keyword-arg
# pylint:disable=no-value-for-parameter
self._fs_handle_scheduled = self.router.subscriber(
- queue=self._get_global_queue_name(_FastStreamRabbitQueue.SCHEDULED),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.SCHEDULED),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_scheduled)
self._fs_handle_submit_task = self.router.subscriber(
- queue=self._get_global_queue_name(_FastStreamRabbitQueue.SUBMIT_TASK),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.SUBMIT_TASK),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_submit_task)
self._fs_handle_worker = self.router.subscriber(
- queue=self._get_global_queue_name(_FastStreamRabbitQueue.WORKER),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.WORKER),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_worker)
self._fs_handle_error_result = self.router.subscriber(
- queue=self._get_global_queue_name(_FastStreamRabbitQueue.ERROR_RESULT),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.ERROR_RESULT),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_error_result)
self._fs_handle_finished_with_error = self.router.subscriber(
- queue=self._get_global_queue_name(
- _FastStreamRabbitQueue.FINISHED_WITH_ERROR
- ),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.FINISHED_WITH_ERROR),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_finished_with_error)
self._fs_handle_deferred_result = self.router.subscriber(
- queue=self._get_global_queue_name(_FastStreamRabbitQueue.DEFERRED_RESULT),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.DEFERRED_RESULT),
exchange=self.common_exchange,
retry=True,
)(self._fs_handle_deferred_result)
self._fs_handle_manually_cancelled = self.router.subscriber(
- queue=self._get_global_queue_name(
- _FastStreamRabbitQueue.MANUALLY_CANCELLED
- ),
+ queue=self._get_global_queue(_FastStreamRabbitQueue.MANUALLY_CANCELLED),
exchange=self.cancellation_exchange,
retry=True,
)(self._fs_handle_manually_cancelled)
diff --git a/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py b/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py
index 5a88b99568b3..8d34e1081637 100644
--- a/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py
+++ b/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py
@@ -1,7 +1,9 @@
from datetime import datetime, timedelta
from enum import Enum
+from typing import Annotated
import arrow
+from common_library.basic_types import DEFAULT_FACTORY
from pydantic import BaseModel, Field, NonNegativeInt
from ._base_deferred_handler import StartContext
@@ -23,37 +25,63 @@ class TaskState(str, Enum):
class TaskScheduleModel(BaseModel):
- timeout: timedelta = Field(
- ..., description="Amount of time after which the task execution will time out"
- )
- class_unique_reference: ClassUniqueReference = Field(
- ...,
- description="reference to the class containing the code and handlers for the execution of the task",
- )
- start_context: StartContext = Field(
- ...,
- description="data used to assemble the ``StartContext``",
- )
-
- state: TaskState = Field(
- ..., description="represents the execution step of the task"
- )
-
- execution_attempts: NonNegativeInt = Field(
- ...,
- description="remaining attempts to run the code, only retries if this is > 0",
- )
-
- time_started: datetime = Field(
- default_factory=lambda: arrow.utcnow().datetime,
- description="time when task schedule was created, used for statistics",
- )
-
- result: TaskExecutionResult | None = Field(
- default=None,
- description=(
- f"Populated by {TaskState.WORKER}. It always has a value after worker handles it."
- "Will be used "
+ timeout: Annotated[
+ timedelta,
+ Field(
+ description="Amount of time after which the task execution will time out"
),
- discriminator="result_type",
- )
+ ]
+ class_unique_reference: Annotated[
+ ClassUniqueReference,
+ Field(
+ description="reference to the class containing the code and handlers for the execution of the task",
+ ),
+ ]
+ start_context: Annotated[
+ StartContext,
+ Field(
+ description="data used to assemble the ``StartContext``",
+ ),
+ ]
+
+ state: Annotated[
+ TaskState, Field(description="represents the execution step of the task")
+ ]
+
+ total_attempts: Annotated[
+ NonNegativeInt,
+ Field(
+ description="maximum number of attempts before giving up (0 means no retries)"
+ ),
+ ]
+
+ execution_attempts: Annotated[
+ NonNegativeInt,
+ Field(
+ description="remaining attempts to run the code, only retries if this is > 0",
+ ),
+ ]
+
+ wait_cancellation_until: Annotated[
+ datetime | None,
+ Field(description="when set has to wait till this before cancelling the task"),
+ ] = None
+
+ time_started: Annotated[
+ datetime,
+ Field(
+ default_factory=lambda: arrow.utcnow().datetime,
+ description="time when task schedule was created, used for statistics",
+ ),
+ ] = DEFAULT_FACTORY
+
+ result: Annotated[
+ TaskExecutionResult | None,
+ Field(
+ description=(
+ f"Populated by {TaskState.WORKER}. It always has a value after worker handles it."
+ "Will be used "
+ ),
+ discriminator="result_type",
+ ),
+ ] = None
diff --git a/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py b/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py
index bcf9ce5ec2ab..1ac791e1ade5 100644
--- a/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py
+++ b/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py
@@ -57,6 +57,12 @@ async def handle_run(
result_to_return = TaskResultSuccess(value=task_result)
except asyncio.CancelledError:
result_to_return = TaskResultCancelledError()
+ # NOTE: if the task is itself cancelled it shall re-raise: see https://superfastpython.com/asyncio-cancellederror-consumed/
+ current_task = asyncio.current_task()
+ assert current_task is not None # nosec
+ if current_task.cancelling() > 0:
+ # owner function is being cancelled -> propagate cancellation
+ raise
except Exception as e: # pylint:disable=broad-exception-caught
result_to_return = TaskResultError(
error=_format_exception(e),
diff --git a/packages/service-library/src/servicelib/docker_utils.py b/packages/service-library/src/servicelib/docker_utils.py
index 552a6d936047..a919cb9487d7 100644
--- a/packages/service-library/src/servicelib/docker_utils.py
+++ b/packages/service-library/src/servicelib/docker_utils.py
@@ -22,6 +22,7 @@
)
from settings_library.docker_registry import RegistrySettings
from tenacity import (
+ before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
@@ -275,6 +276,7 @@ def _reset_progress_from_previous_attempt() -> None:
stop=stop_after_attempt(retry_upon_error_count),
reraise=True,
retry=retry_if_exception_type(asyncio.TimeoutError),
+ before_sleep=before_sleep_log(_logger, logging.WARNING),
)
async def _pull_image_with_retry() -> None:
nonlocal attempt
diff --git a/packages/service-library/src/servicelib/exception_utils.py b/packages/service-library/src/servicelib/exception_utils.py
index 2de33fd98e65..76bfb149606f 100644
--- a/packages/service-library/src/servicelib/exception_utils.py
+++ b/packages/service-library/src/servicelib/exception_utils.py
@@ -5,6 +5,7 @@
from functools import wraps
from typing import Any, Final, ParamSpec, TypeVar
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from pydantic import BaseModel, Field, NonNegativeFloat, PrivateAttr
_logger = logging.getLogger(__name__)
@@ -76,9 +77,65 @@ def else_reset(self) -> None:
F = TypeVar("F", bound=Callable[..., Any])
-def silence_exceptions(exceptions: tuple[type[BaseException], ...]) -> Callable[[F], F]:
- def _decorator(func_or_coro: F) -> F:
+def _should_suppress_exception(
+ exc: BaseException,
+ predicate: Callable[[BaseException], bool] | None,
+ func_name: str,
+) -> bool:
+ if predicate is None:
+ # No predicate provided, suppress all exceptions
+ return True
+
+ try:
+ return predicate(exc)
+ except Exception as predicate_exc: # pylint: disable=broad-except
+ # the predicate function raised an exception
+ # log it and do not suppress the original exception
+ _logger.warning(
+ **create_troubleshooting_log_kwargs(
+ f"Predicate function raised exception {type(predicate_exc).__name__}:{predicate_exc} in {func_name}. "
+ f"Original exception will be re-raised: {type(exc).__name__}",
+ error=predicate_exc,
+ error_context={
+ "func_name": func_name,
+ "original_exception": f"{type(exc).__name__}",
+ },
+ tip="Predicate raised, please fix it.",
+ )
+ )
+ return False
+
+
+def suppress_exceptions(
+ exceptions: tuple[type[BaseException], ...],
+ *,
+ reason: str,
+ predicate: Callable[[BaseException], bool] | None = None,
+) -> Callable[[F], F]:
+ """
+ Decorator to suppress specified exceptions.
+
+ Args:
+ exceptions: Tuple of exception types to suppress
+ reason: Reason for suppression (for logging)
+ predicate: Optional function to check exception attributes.
+ If provided, exception is only suppressed if predicate returns True.
+
+ Example:
+ # Suppress all ConnectionError exceptions
+ @suppress_exceptions((ConnectionError,), reason="Network issues")
+ def my_func(): ...
+
+ # Suppress only ConnectionError with specific errno
+ @suppress_exceptions(
+ (ConnectionError,),
+ reason="Specific network error",
+ predicate=lambda e: hasattr(e, 'errno') and e.errno == 104
+ )
+ def my_func(): ...
+ """
+ def _decorator(func_or_coro: F) -> F:
if inspect.iscoroutinefunction(func_or_coro):
@wraps(func_or_coro)
@@ -86,7 +143,19 @@ async def _async_wrapper(*args, **kwargs) -> Any:
try:
assert inspect.iscoroutinefunction(func_or_coro) # nosec
return await func_or_coro(*args, **kwargs)
- except exceptions:
+ except exceptions as exc:
+ # Check if exception should be suppressed
+ if not _should_suppress_exception(
+ exc, predicate, func_or_coro.__name__
+ ):
+ raise # Re-raise if predicate returns False or fails
+
+ _logger.debug(
+ "Caught suppressed exception %s in %s: TIP: %s",
+ exc,
+ func_or_coro.__name__,
+ reason,
+ )
return None
return _async_wrapper # type: ignore[return-value] # decorators typing is hard
@@ -95,7 +164,19 @@ async def _async_wrapper(*args, **kwargs) -> Any:
def _sync_wrapper(*args, **kwargs) -> Any:
try:
return func_or_coro(*args, **kwargs)
- except exceptions:
+ except exceptions as exc:
+ # Check if exception should be suppressed
+ if not _should_suppress_exception(
+ exc, predicate, func_or_coro.__name__
+ ):
+ raise # Re-raise if predicate returns False or fails
+
+ _logger.debug(
+ "Caught suppressed exception %s in %s: TIP: %s",
+ exc,
+ func_or_coro.__name__,
+ reason,
+ )
return None
return _sync_wrapper # type: ignore[return-value] # decorators typing is hard
diff --git a/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py b/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py
index 8116869af5db..0f7003137bbe 100644
--- a/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py
+++ b/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py
@@ -10,7 +10,7 @@
_logger = logging.getLogger(__name__)
-class _TerminateTaskGroupError(Exception):
+class _ClientDisconnectedError(Exception):
pass
@@ -21,9 +21,9 @@ async def _message_poller(
message = await receive()
if message["type"] == "http.disconnect":
_logger.debug(
- "client disconnected, terminating request to %s!", request.url
+ "client disconnected the request to %s!", request.url, stacklevel=2
)
- raise _TerminateTaskGroupError
+ raise _ClientDisconnectedError
# Puts the message in the queue
await queue.put(message)
@@ -72,9 +72,9 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
)
await handler_task
poller_task.cancel()
- except* _TerminateTaskGroupError:
+ except* _ClientDisconnectedError:
if not handler_task.done():
_logger.info(
- "The client disconnected. request to %s was cancelled.",
+ "The client disconnected. The request to %s was cancelled.",
request.url,
)
diff --git a/packages/service-library/src/servicelib/fastapi/celery/__init__.py b/packages/service-library/src/servicelib/fastapi/celery/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/packages/service-library/src/servicelib/fastapi/celery/app_server.py b/packages/service-library/src/servicelib/fastapi/celery/app_server.py
new file mode 100644
index 000000000000..3c42aa9144d0
--- /dev/null
+++ b/packages/service-library/src/servicelib/fastapi/celery/app_server.py
@@ -0,0 +1,35 @@
+import datetime
+import logging
+import threading
+from typing import Final
+
+from asgi_lifespan import LifespanManager
+from fastapi import FastAPI
+
+from ...celery.app_server import BaseAppServer
+from ...celery.task_manager import TaskManager
+
+_SHUTDOWN_TIMEOUT: Final[float] = datetime.timedelta(seconds=10).total_seconds()
+
+_logger = logging.getLogger(__name__)
+
+
+class FastAPIAppServer(BaseAppServer[FastAPI]):
+ @property
+ def task_manager(self) -> TaskManager:
+ task_manager = self.app.state.task_manager
+ assert task_manager, "Task manager is not initialized" # nosec
+ assert isinstance(task_manager, TaskManager)
+ return task_manager
+
+ async def run_until_shutdown(
+ self, startup_completed_event: threading.Event
+ ) -> None:
+ async with LifespanManager(
+ self.app,
+ startup_timeout=None, # waits for full app initialization (DB migrations, etc.)
+ shutdown_timeout=_SHUTDOWN_TIMEOUT,
+ ):
+ _logger.info("fastapi app initialized")
+ startup_completed_event.set()
+ await self.shutdown_event.wait() # NOTE: wait here until shutdown is requested
diff --git a/packages/service-library/src/servicelib/fastapi/client_session.py b/packages/service-library/src/servicelib/fastapi/client_session.py
index b92dcc2d525c..f9c126272eec 100644
--- a/packages/service-library/src/servicelib/fastapi/client_session.py
+++ b/packages/service-library/src/servicelib/fastapi/client_session.py
@@ -2,13 +2,17 @@
import httpx
from fastapi import FastAPI
+from settings_library.tracing import TracingSettings
+
+from .tracing import setup_httpx_client_tracing
def setup_client_session(
app: FastAPI,
*,
default_timeout: datetime.timedelta = datetime.timedelta(seconds=20),
- max_keepalive_connections: int = 20
+ max_keepalive_connections: int = 20,
+ tracing_settings: TracingSettings | None,
) -> None:
async def on_startup() -> None:
session = httpx.AsyncClient(
@@ -16,6 +20,8 @@ async def on_startup() -> None:
limits=httpx.Limits(max_keepalive_connections=max_keepalive_connections),
timeout=default_timeout.total_seconds(),
)
+ if tracing_settings:
+ setup_httpx_client_tracing(session)
app.state.aiohttp_client_session = session
async def on_shutdown() -> None:
diff --git a/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py b/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py
index 8f472dc9b518..c089b81e034a 100644
--- a/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py
+++ b/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py
@@ -14,7 +14,9 @@
_logger = logging.getLogger(__name__)
-async def connect_to_db(app: FastAPI, settings: PostgresSettings) -> None:
+async def connect_to_db(
+ app: FastAPI, settings: PostgresSettings, application_name: str
+) -> None:
warnings.warn(
"The 'connect_to_db' function is deprecated and will be removed in a future release. "
"Please use 'postgres_lifespan' instead for managing the database connection lifecycle.",
@@ -27,7 +29,9 @@ async def connect_to_db(app: FastAPI, settings: PostgresSettings) -> None:
logging.DEBUG,
f"Connecting and migraging {settings.dsn_with_async_sqlalchemy}",
):
- engine = await create_async_engine_and_database_ready(settings)
+ engine = await create_async_engine_and_database_ready(
+ settings, application_name
+ )
app.state.engine = engine
_logger.debug(
diff --git a/packages/service-library/src/servicelib/fastapi/http_client_thin.py b/packages/service-library/src/servicelib/fastapi/http_client_thin.py
index e4806f88bcfb..a62461f00091 100644
--- a/packages/service-library/src/servicelib/fastapi/http_client_thin.py
+++ b/packages/service-library/src/servicelib/fastapi/http_client_thin.py
@@ -8,7 +8,6 @@
from common_library.errors_classes import OsparcErrorMixin
from httpx import AsyncClient, ConnectError, HTTPError, PoolTimeout, Response
from httpx._types import TimeoutTypes, URLTypes
-from servicelib.fastapi.tracing import setup_httpx_client_tracing
from settings_library.tracing import TracingSettings
from tenacity import RetryCallState
from tenacity.asyncio import AsyncRetrying
@@ -18,6 +17,7 @@
from tenacity.wait import wait_exponential
from .http_client import BaseHTTPApi
+from .tracing import setup_httpx_client_tracing
_logger = logging.getLogger(__name__)
@@ -128,7 +128,7 @@ def retry_on_errors(
"""
def decorator(
- request_func: Callable[..., Awaitable[Response]]
+ request_func: Callable[..., Awaitable[Response]],
) -> Callable[..., Awaitable[Response]]:
assert asyncio.iscoroutinefunction(request_func)
@@ -178,7 +178,7 @@ def expect_status(
"""
def decorator(
- request_func: Callable[..., Awaitable[Response]]
+ request_func: Callable[..., Awaitable[Response]],
) -> Callable[..., Awaitable[Response]]:
assert asyncio.iscoroutinefunction(request_func)
diff --git a/packages/service-library/src/servicelib/fastapi/http_error.py b/packages/service-library/src/servicelib/fastapi/http_error.py
index 2cc9814dc8fa..1b078111a7d9 100644
--- a/packages/service-library/src/servicelib/fastapi/http_error.py
+++ b/packages/service-library/src/servicelib/fastapi/http_error.py
@@ -2,6 +2,7 @@
from collections.abc import Awaitable, Callable
from typing import TypeVar
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from fastapi import FastAPI, HTTPException, status
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
@@ -11,7 +12,6 @@
from fastapi.responses import JSONResponse
from pydantic import ValidationError
-from ..logging_errors import create_troubleshotting_log_kwargs
from ..status_codes_utils import is_5xx_server_error
validation_error_response_definition["properties"] = {
@@ -23,7 +23,7 @@
}
-TException = TypeVar("TException")
+TException = TypeVar("TException", bound=BaseException)
_logger = logging.getLogger(__name__)
@@ -48,24 +48,35 @@ async def _http_error_handler(request: Request, exc: Exception) -> JSONResponse:
"errors": error_extractor(exc) if error_extractor else [f"{exc}"]
}
+ response = JSONResponse(
+ content=jsonable_encoder(
+ {"error": error_content} if envelope_error else error_content
+ ),
+ status_code=status_code,
+ )
+
if is_5xx_server_error(status_code):
_logger.exception(
- create_troubleshotting_log_kwargs(
- "Unexpected error happened in the Resource Usage Tracker. Please contact support.",
+ create_troubleshooting_log_kwargs(
+ f"A 5XX server error happened in current service. Responding with {error_content} and {status_code} status code",
error=exc,
error_context={
"request": request,
- "request.method": f"{request.method}",
+ "request.client_host": (
+ request.client.host if request.client else "unknown"
+ ),
+ "request.method": request.method,
+ "request.url_path": request.url.path,
+ "request.query_params": dict(request.query_params),
+ "request.headers": dict(request.headers),
+ "response": response,
+ "response.error_content": error_content,
+ "response.status_code": status_code,
},
)
)
- return JSONResponse(
- content=jsonable_encoder(
- {"error": error_content} if envelope_error else error_content
- ),
- status_code=status_code,
- )
+ return response
return _http_error_handler
diff --git a/packages/service-library/src/servicelib/fastapi/lifespan_utils.py b/packages/service-library/src/servicelib/fastapi/lifespan_utils.py
index 4ccf04109304..894d91788550 100644
--- a/packages/service-library/src/servicelib/fastapi/lifespan_utils.py
+++ b/packages/service-library/src/servicelib/fastapi/lifespan_utils.py
@@ -1,6 +1,6 @@
import contextlib
-from collections.abc import Iterator
-from typing import Final
+from collections.abc import AsyncIterator, Callable, Iterator
+from typing import Final, TypeAlias
from common_library.errors_classes import OsparcErrorMixin
from fastapi import FastAPI
@@ -8,6 +8,8 @@
from ..logging_utils import log_context
+Lifespan: TypeAlias = Callable[[FastAPI], AsyncIterator[None]]
+
class LifespanError(OsparcErrorMixin, RuntimeError): ...
diff --git a/packages/service-library/src/servicelib/fastapi/logging_lifespan.py b/packages/service-library/src/servicelib/fastapi/logging_lifespan.py
new file mode 100644
index 000000000000..035d9bc10aa7
--- /dev/null
+++ b/packages/service-library/src/servicelib/fastapi/logging_lifespan.py
@@ -0,0 +1,78 @@
+import logging
+from collections.abc import AsyncIterator, Awaitable, Callable
+from contextlib import AsyncExitStack
+
+from common_library.logging.logging_utils_filtering import LoggerName, MessageSubstring
+from fastapi import FastAPI
+from settings_library.tracing import TracingSettings
+
+from ..logging_utils import (
+ LogLevelInt,
+ async_loggers,
+ log_context,
+)
+from .lifespan_utils import Lifespan
+
+_logger = logging.getLogger(__name__)
+
+
+def create_logging_lifespan(
+ *,
+ log_format_local_dev_enabled: bool,
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
+ tracing_settings: TracingSettings | None,
+ log_base_level: LogLevelInt,
+ noisy_loggers: tuple[str, ...] | None,
+) -> Lifespan:
+ """Returns a FastAPI-compatible lifespan handler to set up async logging."""
+ exit_stack = AsyncExitStack()
+ exit_stack.enter_context(
+ async_loggers(
+ log_base_level=log_base_level,
+ noisy_loggers=noisy_loggers,
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ logger_filter_mapping=logger_filter_mapping,
+ tracing_settings=tracing_settings,
+ )
+ )
+
+ async def _logging_lifespan(app: FastAPI) -> AsyncIterator[None]:
+ assert app is not None, "app must be provided"
+ yield
+ with log_context(_logger, logging.INFO, "Re-enable Blocking logger"):
+ await exit_stack.aclose()
+
+ return _logging_lifespan
+
+
+def create_logging_shutdown_event(
+ *,
+ log_format_local_dev_enabled: bool,
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
+ tracing_settings: TracingSettings | None,
+ log_base_level: LogLevelInt,
+ noisy_loggers: tuple[str, ...] | None,
+) -> Callable[[], Awaitable[None]]:
+ """retruns a fastapi-compatible shutdown event handler to be used with old style lifespan
+ handlers. This is useful for applications that do not use the new async lifespan
+ handlers introduced in fastapi 0.100.0.
+
+ Note: This function is for backwards compatibility only and will be removed in the future.
+ setup_logging_lifespan should be used instead for new style lifespan handlers.
+ """
+ exit_stack = AsyncExitStack()
+ exit_stack.enter_context(
+ async_loggers(
+ log_base_level=log_base_level,
+ noisy_loggers=noisy_loggers,
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ logger_filter_mapping=logger_filter_mapping,
+ tracing_settings=tracing_settings,
+ )
+ )
+
+ async def _on_shutdown_event() -> None:
+ with log_context(_logger, logging.INFO, "Re-enable Blocking logger"):
+ await exit_stack.aclose()
+
+ return _on_shutdown_event
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py
index 4593bbf7b01d..de44c393d8e5 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py
@@ -1,7 +1,6 @@
import asyncio
import functools
import logging
-import warnings
from collections.abc import Awaitable, Callable
from typing import Any, Final
@@ -14,10 +13,10 @@
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_exponential
-from ...long_running_tasks._errors import GenericClientError
-from ...long_running_tasks._models import ClientConfiguration, TaskId, TaskStatus
+from ...long_running_tasks.errors import GenericClientError
+from ...long_running_tasks.models import ClientConfiguration, TaskId, TaskStatus
-DEFAULT_HTTP_REQUESTS_TIMEOUT: Final[PositiveFloat] = 15
+_DEFAULT_HTTP_REQUESTS_TIMEOUT: Final[PositiveFloat] = 15
logger = logging.getLogger(__name__)
@@ -89,7 +88,7 @@ def retry_on_http_errors(
assert asyncio.iscoroutinefunction(request_func)
@functools.wraps(request_func)
- async def request_wrapper(zelf: "Client", *args, **kwargs) -> Any:
+ async def request_wrapper(zelf: "HttpClient", *args, **kwargs) -> Any:
async for attempt in AsyncRetrying(
stop=stop_after_attempt(max_attempt_number=3),
wait=wait_exponential(min=1),
@@ -107,7 +106,7 @@ async def request_wrapper(zelf: "Client", *args, **kwargs) -> Any:
return request_wrapper
-class Client:
+class HttpClient:
"""
This is a client that aims to simplify the requests to get the
status, result and/or cancel of a long running task.
@@ -121,7 +120,7 @@ def __init__(self, app: FastAPI, async_client: AsyncClient, base_url: str):
"""
self.app = app
self._async_client = async_client
- self._base_url = base_url
+ self.base_url = base_url
@property
def _client_configuration(self) -> ClientConfiguration:
@@ -130,7 +129,7 @@ def _client_configuration(self) -> ClientConfiguration:
def _get_url(self, path: str) -> str:
url_path = f"{self._client_configuration.router_prefix}{path}".lstrip("/")
- url = TypeAdapter(AnyHttpUrl).validate_python(f"{self._base_url}{url_path}")
+ url = TypeAdapter(AnyHttpUrl).validate_python(f"{self.base_url}{url_path}")
return f"{url}"
@retry_on_http_errors
@@ -172,7 +171,7 @@ async def get_task_result(
return result.json()
@retry_on_http_errors
- async def cancel_and_delete_task(
+ async def remove_task(
self, task_id: TaskId, *, timeout: PositiveFloat | None = None # noqa: ASYNC109
) -> None:
timeout = timeout or self._client_configuration.default_timeout
@@ -181,16 +180,6 @@ async def cancel_and_delete_task(
timeout=timeout,
)
- if result.status_code == status.HTTP_200_OK:
- warnings.warn(
- "returning a 200 when cancelling a task has been deprecated with PR#3236"
- "and will be removed after 11.2022"
- "please do close your studies at least once before that date, so that the dy-sidecar"
- "get replaced",
- category=DeprecationWarning,
- )
- return
-
if result.status_code not in (
status.HTTP_204_NO_CONTENT,
status.HTTP_404_NOT_FOUND,
@@ -207,7 +196,7 @@ def setup(
app: FastAPI,
*,
router_prefix: str = "",
- http_requests_timeout: PositiveFloat = DEFAULT_HTTP_REQUESTS_TIMEOUT,
+ http_requests_timeout: PositiveFloat = _DEFAULT_HTTP_REQUESTS_TIMEOUT,
):
"""
- `router_prefix` by default it is assumed the server mounts the APIs on
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py
index c16fadd8be2b..35e534fcc02a 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py
@@ -1,20 +1,24 @@
import asyncio
-from asyncio.log import logger
+import logging
+import warnings
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from typing import Any, Final
+from common_library.logging.logging_errors import create_troubleshooting_log_message
from pydantic import PositiveFloat
-from ...long_running_tasks._errors import TaskClientTimeoutError
-from ...long_running_tasks._models import (
+from ...long_running_tasks.errors import TaskClientTimeoutError, TaskExceptionError
+from ...long_running_tasks.models import (
ProgressCallback,
ProgressMessage,
ProgressPercent,
TaskId,
TaskStatus,
)
-from ._client import Client
+from ._client import HttpClient
+
+_logger = logging.getLogger(__name__)
# NOTE: very short running requests are involved
MAX_CONCURRENCY: Final[int] = 10
@@ -66,7 +70,7 @@ async def update(
@asynccontextmanager
async def periodic_task_result(
- client: Client,
+ client: HttpClient,
task_id: TaskId,
*,
task_timeout: PositiveFloat,
@@ -92,11 +96,18 @@ async def periodic_task_result(
raises: `asyncio.TimeoutError` NOTE: the remote task will also be removed
"""
+ warnings.warn(
+ "This context manager is deprecated and will be removed in future releases. "
+ "Please use the `servicelib.long_running_tasks.lrt_api` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
progress_manager = _ProgressManager(progress_callback)
async def _status_update() -> TaskStatus:
task_status: TaskStatus = await client.get_task_status(task_id)
- logger.debug("Task status %s", task_status.model_dump_json())
+ _logger.debug("Task status %s", task_status.model_dump_json())
await progress_manager.update(
task_id=task_id,
message=task_status.task_progress.message,
@@ -114,13 +125,22 @@ async def _wait_for_task_result() -> Any:
try:
result = await asyncio.wait_for(_wait_for_task_result(), timeout=task_timeout)
- logger.debug("%s, %s", f"{task_id=}", f"{result=}")
+ _logger.debug("%s, %s", f"{task_id=}", f"{result=}")
yield result
except TimeoutError as e:
- await client.cancel_and_delete_task(task_id)
+ await client.remove_task(task_id)
raise TaskClientTimeoutError(
task_id=task_id,
timeout=task_timeout,
exception=e,
) from e
+ except Exception as e:
+ _logger.warning(
+ create_troubleshooting_log_message(
+ user_error_msg=f"{task_id=} raised an exception",
+ error=e,
+ tip=f"Check the logs of the service responding to '{client.base_url}'",
+ )
+ )
+ raise TaskExceptionError(task_id=task_id, exception=e, traceback="") from e
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py
index 937ddcf33d17..ced9efa4f16b 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py
@@ -1,8 +1,10 @@
from fastapi import Request
-from ...long_running_tasks._task import TasksManager
+from ._manager import FastAPILongRunningManager
-def get_tasks_manager(request: Request) -> TasksManager:
- output: TasksManager = request.app.state.long_running_task_manager
- return output
+def get_long_running_manager(request: Request) -> FastAPILongRunningManager:
+ assert isinstance(
+ request.app.state.long_running_manager, FastAPILongRunningManager
+ ) # nosec
+ return request.app.state.long_running_manager
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py
index e5f1ef7d9eea..0214e0092176 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py
@@ -5,7 +5,7 @@
from starlette.requests import Request
from starlette.responses import JSONResponse
-from ...long_running_tasks._errors import (
+from ...long_running_tasks.errors import (
BaseLongRunningError,
TaskNotCompletedError,
TaskNotFoundError,
@@ -18,10 +18,10 @@ async def base_long_running_error_handler(
_: Request, exception: BaseLongRunningError
) -> JSONResponse:
_logger.debug("%s", exception, stack_info=True)
- error_fields = dict(code=exception.code, message=f"{exception}")
+ error_fields = {"code": exception.code, "message": f"{exception}"}
status_code = (
status.HTTP_404_NOT_FOUND
- if isinstance(exception, (TaskNotFoundError, TaskNotCompletedError))
+ if isinstance(exception, TaskNotFoundError | TaskNotCompletedError)
else status.HTTP_400_BAD_REQUEST
)
return JSONResponse(content=jsonable_encoder(error_fields), status_code=status_code)
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_manager.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_manager.py
new file mode 100644
index 000000000000..535dd97eaf81
--- /dev/null
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_manager.py
@@ -0,0 +1,11 @@
+from fastapi import Request
+
+from ...long_running_tasks.manager import LongRunningManager
+from ...long_running_tasks.models import TaskContext
+
+
+class FastAPILongRunningManager(LongRunningManager):
+ @staticmethod
+ def get_task_context(request: Request) -> TaskContext:
+ _ = request
+ return {}
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py
index b56ba3d21ddd..95b28ceec2fb 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py
@@ -2,11 +2,11 @@
from fastapi import APIRouter, Depends, Request, status
-from ...long_running_tasks._errors import TaskNotCompletedError, TaskNotFoundError
-from ...long_running_tasks._models import TaskGet, TaskId, TaskResult, TaskStatus
-from ...long_running_tasks._task import TasksManager
+from ...long_running_tasks import lrt_api
+from ...long_running_tasks.models import TaskGet, TaskId, TaskResult, TaskStatus
from ..requests_decorators import cancel_on_disconnect
-from ._dependencies import get_tasks_manager
+from ._dependencies import get_long_running_manager
+from ._manager import FastAPILongRunningManager
router = APIRouter(prefix="/task")
@@ -14,18 +14,24 @@
@router.get("", response_model=list[TaskGet])
@cancel_on_disconnect
async def list_tasks(
- request: Request, tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)]
+ request: Request,
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
) -> list[TaskGet]:
assert request # nosec
return [
TaskGet(
task_id=t.task_id,
- task_name=t.task_name,
- status_href="",
- result_href="",
- abort_href="",
+ status_href=str(request.url_for("get_task_status", task_id=t.task_id)),
+ result_href=str(request.url_for("get_task_result", task_id=t.task_id)),
+ abort_href=str(request.url_for("remove_task", task_id=t.task_id)),
+ )
+ for t in await lrt_api.list_tasks(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
)
- for t in tasks_manager.list_tasks(with_task_context=None)
]
@@ -39,11 +45,18 @@ async def list_tasks(
@cancel_on_disconnect
async def get_task_status(
request: Request,
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
task_id: TaskId,
- tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)],
) -> TaskStatus:
assert request # nosec
- return tasks_manager.get_task_status(task_id=task_id, with_task_context=None)
+ return await lrt_api.get_task_status(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ task_id=task_id,
+ )
@router.get(
@@ -58,29 +71,23 @@ async def get_task_status(
@cancel_on_disconnect
async def get_task_result(
request: Request,
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
task_id: TaskId,
- tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)],
) -> TaskResult | Any:
assert request # nosec
- try:
- task_result = tasks_manager.get_task_result(task_id, with_task_context=None)
- await tasks_manager.remove_task(
- task_id, with_task_context=None, reraise_errors=False
- )
- return task_result
- except (TaskNotFoundError, TaskNotCompletedError):
- raise
- except Exception:
- # the task shall be removed in this case
- await tasks_manager.remove_task(
- task_id, with_task_context=None, reraise_errors=False
- )
- raise
+ return await lrt_api.get_task_result(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ task_id=task_id,
+ )
@router.delete(
"/{task_id}",
- summary="Cancel and deletes a task",
+ summary="Cancels and removes a task",
response_model=None,
status_code=status.HTTP_204_NO_CONTENT,
responses={
@@ -88,10 +95,17 @@ async def get_task_result(
},
)
@cancel_on_disconnect
-async def cancel_and_delete_task(
+async def remove_task(
request: Request,
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
task_id: TaskId,
- tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)],
) -> None:
assert request # nosec
- await tasks_manager.remove_task(task_id, with_task_context=None)
+ await lrt_api.remove_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ long_running_manager.get_task_context(request),
+ task_id=task_id,
+ )
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py
index e8306b6d1874..9cf4c526acee 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py
@@ -1,31 +1,40 @@
-from typing import Final
+import datetime
from fastapi import APIRouter, FastAPI
-from pydantic import PositiveFloat
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
-from ...long_running_tasks._errors import BaseLongRunningError
-from ...long_running_tasks._task import TasksManager
+from ...long_running_tasks.constants import (
+ DEFAULT_STALE_TASK_CHECK_INTERVAL,
+ DEFAULT_STALE_TASK_DETECT_TIMEOUT,
+)
+from ...long_running_tasks.errors import BaseLongRunningError
+from ...long_running_tasks.models import LRTNamespace
from ._error_handlers import base_long_running_error_handler
+from ._manager import FastAPILongRunningManager
from ._routes import router
-_MINUTE: Final[PositiveFloat] = 60
-
def setup(
app: FastAPI,
*,
router_prefix: str = "",
- stale_task_check_interval_s: PositiveFloat = 1 * _MINUTE,
- stale_task_detect_timeout_s: PositiveFloat = 5 * _MINUTE,
+ redis_settings: RedisSettings,
+ rabbit_settings: RabbitSettings,
+ lrt_namespace: LRTNamespace,
+ stale_task_check_interval: datetime.timedelta = DEFAULT_STALE_TASK_CHECK_INTERVAL,
+ stale_task_detect_timeout: datetime.timedelta = DEFAULT_STALE_TASK_DETECT_TIMEOUT,
) -> None:
"""
- - `router_prefix` APIs are mounted on `/task/...`, this
- will change them to be mounted as `{router_prefix}/task/...`
- - `stale_task_check_interval_s` interval at which the
+ - `router_prefix` APIs are mounted on `/...`, this
+ will change them to be mounted as `{router_prefix}/...`
+ - `redis_settings` settings for Redis connection
+ - `rabbit_settings` settings for RabbitMQ connection
+ - `lrt_namespace` namespace for the long-running tasks
+ - `stale_task_check_interval` interval at which the
TaskManager checks for tasks which are no longer being
actively monitored by a client
- - `stale_task_detect_timeout_s` interval after which a
- task is considered stale
+ - `stale_task_detect_timeout` interval after which atask is considered stale
"""
async def on_startup() -> None:
@@ -35,19 +44,27 @@ async def on_startup() -> None:
app.include_router(main_router)
# add components to state
- app.state.long_running_task_manager = TasksManager(
- stale_task_check_interval_s=stale_task_check_interval_s,
- stale_task_detect_timeout_s=stale_task_detect_timeout_s,
+ app.state.long_running_manager = long_running_manager = (
+ FastAPILongRunningManager(
+ stale_task_check_interval=stale_task_check_interval,
+ stale_task_detect_timeout=stale_task_detect_timeout,
+ redis_settings=redis_settings,
+ rabbit_settings=rabbit_settings,
+ lrt_namespace=lrt_namespace,
+ )
)
+ await long_running_manager.setup()
async def on_shutdown() -> None:
- if app.state.long_running_task_manager:
- task_manager: TasksManager = app.state.long_running_task_manager
- await task_manager.close()
+ if app.state.long_running_manager:
+ long_running_manager: FastAPILongRunningManager = (
+ app.state.long_running_manager
+ )
+ await long_running_manager.teardown()
app.add_event_handler("startup", on_startup)
app.add_event_handler("shutdown", on_shutdown)
# add error handlers
# NOTE: Exception handler can not be added during the on_startup script, otherwise not working correctly
- app.add_exception_handler(BaseLongRunningError, base_long_running_error_handler) # type: ignore[arg-type]
+ app.add_exception_handler(BaseLongRunningError, base_long_running_error_handler) # type: ignore[arg-type]
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py
index 62b72256000c..ca72f24e4417 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py
@@ -2,164 +2,11 @@
Provides a convenient way to return the result given a TaskId.
"""
-import asyncio
-import logging
-from collections.abc import AsyncGenerator
-from typing import Any
-
-import httpx
-from fastapi import status
-from models_library.api_schemas_long_running_tasks.base import TaskProgress
-from models_library.api_schemas_long_running_tasks.tasks import (
- TaskGet,
- TaskResult,
- TaskStatus,
-)
-from tenacity import (
- AsyncRetrying,
- TryAgain,
- before_sleep_log,
- retry,
- retry_if_exception_type,
- stop_after_delay,
- wait_random_exponential,
-)
-from yarl import URL
-
-from ...long_running_tasks._constants import DEFAULT_POLL_INTERVAL_S, HOUR
-from ...long_running_tasks._models import (
- ClientConfiguration,
- LRTask,
- ProgressCallback,
- ProgressMessage,
- ProgressPercent,
- RequestBody,
-)
-from ...long_running_tasks._task import TaskId
-from ...rest_responses import unwrap_envelope_if_required
-from ._client import DEFAULT_HTTP_REQUESTS_TIMEOUT, Client, setup
-from ._context_manager import periodic_task_result
-
-_logger = logging.getLogger(__name__)
-
-
-_DEFAULT_FASTAPI_RETRY_POLICY: dict[str, Any] = {
- "retry": retry_if_exception_type(httpx.RequestError),
- "wait": wait_random_exponential(max=20),
- "stop": stop_after_delay(60),
- "reraise": True,
- "before_sleep": before_sleep_log(_logger, logging.INFO),
-}
-
-
-@retry(**_DEFAULT_FASTAPI_RETRY_POLICY)
-async def _start(
- session: httpx.AsyncClient, url: URL, json: RequestBody | None
-) -> TaskGet:
- response = await session.post(f"{url}", json=json)
- response.raise_for_status()
- data = unwrap_envelope_if_required(response.json())
- return TaskGet.model_validate(data)
-
-
-@retry(**_DEFAULT_FASTAPI_RETRY_POLICY)
-async def _wait_for_completion(
- session: httpx.AsyncClient,
- task_id: TaskId,
- status_url: URL,
- client_timeout: int,
-) -> AsyncGenerator[TaskProgress, None]:
- try:
- async for attempt in AsyncRetrying(
- stop=stop_after_delay(client_timeout),
- reraise=True,
- retry=retry_if_exception_type(TryAgain),
- before_sleep=before_sleep_log(_logger, logging.DEBUG),
- ):
- with attempt:
- response = await session.get(f"{status_url}")
- response.raise_for_status()
- data = unwrap_envelope_if_required(response.json())
- task_status = TaskStatus.model_validate(data)
-
- yield task_status.task_progress
- if not task_status.done:
- await asyncio.sleep(
- float(
- response.headers.get("retry-after", DEFAULT_POLL_INTERVAL_S)
- )
- )
- msg = f"{task_id=}, {task_status.started=} has status: '{task_status.task_progress.message}' {task_status.task_progress.percent}%"
- raise TryAgain(msg) # noqa: TRY301
-
- except TryAgain as exc:
- # this is a timeout
- msg = f"Long running task {task_id}, calling to {status_url} timed-out after {client_timeout} seconds"
- raise TimeoutError(msg) from exc
-
-
-@retry(**_DEFAULT_FASTAPI_RETRY_POLICY)
-async def _task_result(session: httpx.AsyncClient, result_url: URL) -> Any:
- response = await session.get(f"{result_url}")
- response.raise_for_status()
- if response.status_code != status.HTTP_204_NO_CONTENT:
- return unwrap_envelope_if_required(response.json())
- return None
-
-
-@retry(**_DEFAULT_FASTAPI_RETRY_POLICY)
-async def _abort_task(session: httpx.AsyncClient, abort_url: URL) -> None:
- response = await session.delete(f"{abort_url}")
- response.raise_for_status()
-
-
-async def long_running_task_request(
- session: httpx.AsyncClient,
- url: URL,
- json: RequestBody | None = None,
- client_timeout: int = 1 * HOUR,
-) -> AsyncGenerator[LRTask, None]:
- """Will use the passed `httpx.AsyncClient` to call an oSparc long
- running task `url` passing `json` as request body.
- NOTE: this follows the usual aiohttp client syntax, and will raise the same errors
-
- Raises:
- [https://docs.aiohttp.org/en/stable/client_reference.html#hierarchy-of-exceptions]
- """
- task = None
- try:
- task = await _start(session, url, json)
- last_progress = None
- async for task_progress in _wait_for_completion(
- session,
- task.task_id,
- URL(task.status_href),
- client_timeout,
- ):
- last_progress = task_progress
- yield LRTask(progress=task_progress)
- assert last_progress # nosec
- yield LRTask(
- progress=last_progress,
- _result=_task_result(session, URL(task.result_href)),
- )
-
- except (TimeoutError, asyncio.CancelledError):
- if task:
- await _abort_task(session, URL(task.abort_href))
- raise
-
+from ._client import HttpClient, setup
+from ._context_manager import periodic_task_result # attach to the same object!
__all__: tuple[str, ...] = (
- "DEFAULT_HTTP_REQUESTS_TIMEOUT",
- "Client",
- "ClientConfiguration",
- "LRTask",
- "ProgressCallback",
- "ProgressMessage",
- "ProgressPercent",
- "TaskId",
- "TaskResult",
+ "HttpClient",
"periodic_task_result",
"setup",
)
diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/server.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/server.py
index b9a29d1d90a4..b7cf0fba60ad 100644
--- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/server.py
+++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/server.py
@@ -6,30 +6,12 @@
running task. The client will take care of recovering the result from it.
"""
-from models_library.api_schemas_long_running_tasks.tasks import TaskResult
-
-from ...long_running_tasks._errors import TaskAlreadyRunningError, TaskCancelledError
-from ...long_running_tasks._task import (
- TaskId,
- TaskProgress,
- TasksManager,
- TaskStatus,
- start_task,
-)
-from ._dependencies import get_tasks_manager
+from ._dependencies import get_long_running_manager
from ._server import setup
__all__: tuple[str, ...] = (
- "get_tasks_manager",
+ "get_long_running_manager",
"setup",
- "start_task",
- "TaskAlreadyRunningError",
- "TaskCancelledError",
- "TaskId",
- "TasksManager",
- "TaskProgress",
- "TaskResult",
- "TaskStatus",
)
# nopycln: file
diff --git a/packages/service-library/src/servicelib/fastapi/monitoring.py b/packages/service-library/src/servicelib/fastapi/monitoring.py
index 32dd26f53d6b..a9c33f0d2162 100644
--- a/packages/service-library/src/servicelib/fastapi/monitoring.py
+++ b/packages/service-library/src/servicelib/fastapi/monitoring.py
@@ -13,12 +13,6 @@
CONTENT_TYPE_LATEST,
generate_latest,
)
-from servicelib.prometheus_metrics import (
- PrometheusMetrics,
- get_prometheus_metrics,
- record_request_metrics,
- record_response_metrics,
-)
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.types import ASGIApp
@@ -26,6 +20,12 @@
UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE,
X_SIMCORE_USER_AGENT,
)
+from ..prometheus_metrics import (
+ PrometheusMetrics,
+ get_prometheus_metrics,
+ record_request_metrics,
+ record_response_metrics,
+)
_logger = logging.getLogger(__name__)
_PROMETHEUS_METRICS = "prometheus_metrics"
diff --git a/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py b/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py
index 319a7121896a..e532d4a435fc 100644
--- a/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py
+++ b/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py
@@ -28,7 +28,9 @@ def create_postgres_database_input_state(settings: PostgresSettings) -> State:
return {PostgresLifespanState.POSTGRES_SETTINGS: settings}
-async def postgres_database_lifespan(_: FastAPI, state: State) -> AsyncIterator[State]:
+async def postgres_database_lifespan(
+ app: FastAPI, state: State
+) -> AsyncIterator[State]:
_lifespan_name = f"{__name__}.{postgres_database_lifespan.__name__}"
@@ -43,7 +45,7 @@ async def postgres_database_lifespan(_: FastAPI, state: State) -> AsyncIterator[
# connect to database
async_engine: AsyncEngine = await create_async_engine_and_database_ready(
- settings
+ settings, app.title
)
try:
diff --git a/packages/service-library/src/servicelib/fastapi/profiler.py b/packages/service-library/src/servicelib/fastapi/profiler.py
index cb3e7c5c0840..9010c6296f09 100644
--- a/packages/service-library/src/servicelib/fastapi/profiler.py
+++ b/packages/service-library/src/servicelib/fastapi/profiler.py
@@ -1,11 +1,11 @@
from typing import Any, Final
from fastapi import FastAPI
-from servicelib.aiohttp import status
-from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON
from starlette.requests import Request
from starlette.types import ASGIApp, Receive, Scope, Send
+from ..aiohttp import status
+from ..mimetype_constants import MIMETYPE_APPLICATION_JSON
from ..utils_profiling_middleware import (
_is_profiling,
_profiler,
diff --git a/packages/service-library/src/servicelib/fastapi/redis_lifespan.py b/packages/service-library/src/servicelib/fastapi/redis_lifespan.py
index b1ac98e9d6ca..b8955d2c8ae8 100644
--- a/packages/service-library/src/servicelib/fastapi/redis_lifespan.py
+++ b/packages/service-library/src/servicelib/fastapi/redis_lifespan.py
@@ -51,6 +51,7 @@ async def redis_client_sdk_lifespan(_: FastAPI, state: State) -> AsyncIterator[S
redis_dsn_with_secrets,
client_name=redis_state.REDIS_CLIENT_NAME,
)
+ await redis_client.setup()
try:
yield {"REDIS_CLIENT_SDK": redis_client, **called_state}
diff --git a/packages/service-library/src/servicelib/fastapi/requests_decorators.py b/packages/service-library/src/servicelib/fastapi/requests_decorators.py
index ae5f1ea047c6..b9116d9c1d5a 100644
--- a/packages/service-library/src/servicelib/fastapi/requests_decorators.py
+++ b/packages/service-library/src/servicelib/fastapi/requests_decorators.py
@@ -4,6 +4,7 @@
from functools import wraps
from typing import Any, Protocol
+from common_library.async_tools import cancel_wait_task
from fastapi import Request, status
from fastapi.exceptions import HTTPException
@@ -13,8 +14,7 @@
class _HandlerWithRequestArg(Protocol):
__name__: str
- async def __call__(self, request: Request, *args: Any, **kwargs: Any) -> Any:
- ...
+ async def __call__(self, request: Request, *args: Any, **kwargs: Any) -> Any: ...
def _validate_signature(handler: _HandlerWithRequestArg):
@@ -75,13 +75,8 @@ async def wrapper(request: Request, *args, **kwargs):
# One has completed, cancel the other
for t in pending:
- t.cancel()
-
try:
- await asyncio.wait_for(t, timeout=3)
-
- except asyncio.CancelledError:
- pass
+ await cancel_wait_task(t, max_delay=3)
except Exception: # pylint: disable=broad-except
if t is handler_task:
raise
diff --git a/packages/service-library/src/servicelib/fastapi/rest_pagination.py b/packages/service-library/src/servicelib/fastapi/rest_pagination.py
index 0a199152acea..0ef84d61ca7c 100644
--- a/packages/service-library/src/servicelib/fastapi/rest_pagination.py
+++ b/packages/service-library/src/servicelib/fastapi/rest_pagination.py
@@ -4,6 +4,7 @@
from fastapi_pagination.cursor import CursorPage # type: ignore[import-not-found]
from fastapi_pagination.customization import ( # type: ignore[import-not-found]
CustomizedPage,
+ UseIncludeTotal,
UseParamsFields,
)
from models_library.api_schemas_storage.storage_schemas import (
@@ -24,5 +25,8 @@
description="Page size",
)
),
+ UseIncludeTotal(
+ include_total=False
+ ), # make total field optional as S3 does not provide that
]
CustomizedPathsCursorPageParams: TypeAlias = CustomizedPathsCursorPage.__params_type__ # type: ignore
diff --git a/packages/service-library/src/servicelib/fastapi/tracing.py b/packages/service-library/src/servicelib/fastapi/tracing.py
index 5b2cba5434d6..50c8aeab1d7d 100644
--- a/packages/service-library/src/servicelib/fastapi/tracing.py
+++ b/packages/service-library/src/servicelib/fastapi/tracing.py
@@ -3,7 +3,7 @@
import logging
from collections.abc import AsyncIterator
-from fastapi import FastAPI
+from fastapi import FastAPI, Request
from fastapi_lifespan_manager import State
from httpx import AsyncClient, Client
from opentelemetry import trace
@@ -13,16 +13,19 @@
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor
from opentelemetry.sdk.resources import Resource
-from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace import SpanProcessor, TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
-from servicelib.logging_utils import log_context
from settings_library.tracing import TracingSettings
+from starlette.middleware.base import BaseHTTPMiddleware
from yarl import URL
+from ..logging_utils import log_context
+from ..tracing import get_trace_id_header
+
_logger = logging.getLogger(__name__)
try:
- from opentelemetry.instrumentation.asyncpg import ( # type: ignore[import-not-found]
+ from opentelemetry.instrumentation.asyncpg import (
AsyncPGInstrumentor,
)
@@ -70,6 +73,11 @@
HAS_AIOPIKA_INSTRUMENTOR = False
+def _create_span_processor(tracing_destination: str) -> SpanProcessor:
+ otlp_exporter = OTLPSpanExporterHTTP(endpoint=tracing_destination)
+ return BatchSpanProcessor(otlp_exporter)
+
+
def _startup(tracing_settings: TracingSettings, service_name: str) -> None:
if (
not tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT
@@ -96,10 +104,10 @@ def _startup(tracing_settings: TracingSettings, service_name: str) -> None:
service_name,
tracing_destination,
)
- # Configure OTLP exporter to send spans to the collector
- otlp_exporter = OTLPSpanExporterHTTP(endpoint=tracing_destination)
- span_processor = BatchSpanProcessor(otlp_exporter)
- global_tracer_provider.add_span_processor(span_processor)
+ # Add the span processor to the tracer provider
+ global_tracer_provider.add_span_processor(
+ _create_span_processor(tracing_destination)
+ )
if HAS_AIOPG:
with log_context(
@@ -180,7 +188,11 @@ def _shutdown() -> None:
_logger.exception("Failed to uninstrument RequestsInstrumentor")
-def initialize_fastapi_app_tracing(app: FastAPI):
+def initialize_fastapi_app_tracing(
+ app: FastAPI, *, add_response_trace_id_header: bool = False
+):
+ if add_response_trace_id_header:
+ app.add_middleware(ResponseTraceIdHeaderMiddleware)
FastAPIInstrumentor.instrument_app(app)
@@ -216,3 +228,13 @@ async def tracing_instrumentation_lifespan(
_shutdown()
return tracing_instrumentation_lifespan
+
+
+class ResponseTraceIdHeaderMiddleware(BaseHTTPMiddleware):
+
+ async def dispatch(self, request: Request, call_next):
+ response = await call_next(request)
+ trace_id_header = get_trace_id_header()
+ if trace_id_header:
+ response.headers.update(trace_id_header)
+ return response
diff --git a/packages/service-library/src/servicelib/logging_utils.py b/packages/service-library/src/servicelib/logging_utils.py
index 7ef3bc28e94f..27bd08dd87f6 100644
--- a/packages/service-library/src/servicelib/logging_utils.py
+++ b/packages/service-library/src/servicelib/logging_utils.py
@@ -8,22 +8,34 @@
import asyncio
import functools
import logging
+import logging.handlers
+import queue
from asyncio import iscoroutinefunction
from collections.abc import Callable, Iterator
from contextlib import contextmanager
+from dataclasses import dataclass
from datetime import datetime
from inspect import getframeinfo, stack
from pathlib import Path
-from typing import Any, NotRequired, TypeAlias, TypedDict, TypeVar
-
+from typing import Any, Final, TypeAlias, TypedDict, TypeVar
+
+from common_library.json_serialization import json_dumps
+from common_library.logging.logging_base import LogExtra
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
+from common_library.logging.logging_utils_filtering import (
+ GeneralLogFilter,
+ LoggerName,
+ MessageSubstring,
+)
from settings_library.tracing import TracingSettings
-from .logging_utils_filtering import GeneralLogFilter, LoggerName, MessageSubstring
from .tracing import setup_log_tracing
from .utils_secrets import mask_sensitive_data
_logger = logging.getLogger(__name__)
+LogLevelInt: TypeAlias = int
+LogMessageStr: TypeAlias = str
BLACK = "\033[0;30m"
BLUE = "\033[0;34m"
@@ -54,27 +66,6 @@
}
-class LogExtra(TypedDict):
- log_uid: NotRequired[str]
- log_oec: NotRequired[str]
-
-
-def get_log_record_extra(
- *,
- user_id: int | str | None = None,
- error_code: str | None = None,
-) -> LogExtra | None:
- extra: LogExtra = {}
-
- if user_id:
- assert int(user_id) > 0 # nosec
- extra["log_uid"] = f"{user_id}"
- if error_code:
- extra["log_oec"] = error_code
-
- return extra or None
-
-
class CustomFormatter(logging.Formatter):
"""Custom Formatter does these 2 things:
1. Overrides 'funcName' with the value of 'func_name_override', if it exists.
@@ -87,11 +78,19 @@ def __init__(self, fmt: str, *, log_format_local_dev_enabled: bool) -> None:
def format(self, record) -> str:
if hasattr(record, "func_name_override"):
- record.funcName = record.func_name_override
+ record.funcName = (
+ record.func_name_override
+ ) # pyright: ignore[reportAttributeAccessIssue]
if hasattr(record, "file_name_override"):
- record.filename = record.file_name_override
+ record.filename = (
+ record.file_name_override
+ ) # pyright: ignore[reportAttributeAccessIssue]
- for name in LogExtra.__optional_keys__: # pylint: disable=no-member
+ # pylint: disable=no-member
+ optional_keys = LogExtra.__optional_keys__ | frozenset(
+ ["otelTraceID", "otelSpanID"]
+ )
+ for name in optional_keys:
if not hasattr(record, name):
setattr(record, name, None)
@@ -106,68 +105,51 @@ def format(self, record) -> str:
# SEE https://docs.python.org/3/library/logging.html#logrecord-attributes
-DEFAULT_FORMATTING = (
- "log_level=%(levelname)s "
- "| log_timestamp=%(asctime)s "
- "| log_source=%(name)s:%(funcName)s(%(lineno)d) "
- "| log_uid=%(log_uid)s "
- "| log_oec=%(log_oec)s"
- "| log_msg=%(message)s"
+_DEFAULT_FORMATTING: Final[str] = " | ".join(
+ [
+ "log_level=%(levelname)s",
+ "log_timestamp=%(asctime)s",
+ "log_source=%(name)s:%(funcName)s(%(lineno)d)",
+ "log_uid=%(log_uid)s",
+ "log_oec=%(log_oec)s",
+ "log_trace_id=%(otelTraceID)s",
+ "log_span_id=%(otelSpanID)s",
+ "log_msg=%(message)s",
+ ]
+)
+
+_LOCAL_FORMATTING: Final[str] = (
+ "%(levelname)s: [%(asctime)s/%(processName)s] "
+ "[log_trace_id=%(otelTraceID)s|log_span_id=%(otelSpanID)s] "
+ "[%(name)s:%(funcName)s(%(lineno)d)] - %(message)s"
)
-LOCAL_FORMATTING = "%(levelname)s: [%(asctime)s/%(processName)s] [%(name)s:%(funcName)s(%(lineno)d)] - %(message)s"
# Graylog Grok pattern extractor:
-# log_level=%{WORD:log_level} \| log_timestamp=%{TIMESTAMP_ISO8601:log_timestamp} \| log_source=%{DATA:log_source} \| (log_uid=%{WORD:log_uid} \| )?log_msg=%{GREEDYDATA:log_msg}
+# log_level=%{WORD:log_level} \| log_timestamp=%{TIMESTAMP_ISO8601:log_timestamp} \| log_source=%{NOTSPACE:log_source} \| log_uid=%{NOTSPACE:log_uid} \| log_oec=%{NOTSPACE:log_oec} \| log_trace_id=%{NOTSPACE:log_trace_id} \| log_span_id=%{NOTSPACE:log_span_id} \| log_msg=%{GREEDYDATA:log_msg}
-def config_all_loggers(
+def _setup_logging_formatter(
*,
log_format_local_dev_enabled: bool,
- logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
- tracing_settings: TracingSettings | None,
-) -> None:
- """
- Applies common configuration to ALL registered loggers
- """
- the_manager: logging.Manager = logging.Logger.manager
- root_logger = logging.getLogger()
+) -> logging.Formatter:
+ fmt = _LOCAL_FORMATTING if log_format_local_dev_enabled else _DEFAULT_FORMATTING
- loggers = [root_logger] + [
- logging.getLogger(name) for name in the_manager.loggerDict
- ]
+ return CustomFormatter(
+ fmt, log_format_local_dev_enabled=log_format_local_dev_enabled
+ )
- fmt = DEFAULT_FORMATTING
- if tracing_settings is not None:
- fmt = (
- "log_level=%(levelname)s "
- "| log_timestamp=%(asctime)s "
- "| log_source=%(name)s:%(funcName)s(%(lineno)d) "
- "| log_uid=%(log_uid)s "
- "| log_oec=%(log_oec)s"
- "| log_trace_id=%(otelTraceID)s "
- "| log_span_id=%(otelSpanID)s "
- "| log_resource.service.name=%(otelServiceName)s "
- "| log_trace_sampled=%(otelTraceSampled)s] "
- "| log_msg=%(message)s"
- )
- setup_log_tracing(tracing_settings=tracing_settings)
- if log_format_local_dev_enabled:
- fmt = LOCAL_FORMATTING
- if tracing_settings is not None:
- fmt = (
- "%(levelname)s: [%(asctime)s/%(processName)s] "
- "[log_trace_id=%(otelTraceID)s log_span_id=%(otelSpanID)s log_resource.service.name=%(otelServiceName)s log_trace_sampled=%(otelTraceSampled)s] "
- "[%(name)s:%(funcName)s(%(lineno)d)] - %(message)s"
- )
- for logger in loggers:
- _set_logging_handler(
- logger, fmt=fmt, log_format_local_dev_enabled=log_format_local_dev_enabled
- )
+def _get_all_loggers() -> list[logging.Logger]:
+ manager = logging.Logger.manager
+ root_logger = logging.getLogger()
+ return [root_logger] + [logging.getLogger(name) for name in manager.loggerDict]
+
+def _apply_logger_filters(
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
+) -> None:
for logger_name, filtered_routes in logger_filter_mapping.items():
logger = logging.getLogger(logger_name)
- # Check if the logger has any handlers or is in active use
if not logger.hasHandlers():
_logger.warning(
"Logger %s does not have any handlers. Filter will not be added.",
@@ -179,43 +161,239 @@ def config_all_loggers(
logger.addFilter(log_filter)
-def _set_logging_handler(
- logger: logging.Logger,
+def _setup_base_logging_level(log_level: LogLevelInt) -> None:
+ logging.basicConfig(level=log_level)
+ logging.root.setLevel(log_level)
+
+
+def _dampen_noisy_loggers(
+ noisy_loggers: tuple[str, ...],
+) -> None:
+ """Sets a less verbose level for noisy loggers."""
+ quiet_level: int = max(
+ min(logging.root.level + logging.CRITICAL - logging.ERROR, logging.CRITICAL),
+ logging.WARNING,
+ )
+
+ for name in noisy_loggers:
+ logging.getLogger(name).setLevel(quiet_level)
+
+
+def _configure_common_logging_settings(
*,
- fmt: str,
log_format_local_dev_enabled: bool,
+ tracing_settings: TracingSettings | None,
+ log_base_level: LogLevelInt,
+ noisy_loggers: tuple[str, ...] | None,
+) -> logging.Formatter:
+ """
+ Common configuration logic shared by both sync and async logging setups.
+
+ Returns the configured formatter to be used with the appropriate handler.
+ """
+ _setup_base_logging_level(log_base_level)
+ if noisy_loggers is not None:
+ _dampen_noisy_loggers(noisy_loggers)
+ if tracing_settings is not None:
+ setup_log_tracing(tracing_settings=tracing_settings)
+
+ return _setup_logging_formatter(
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ )
+
+
+def _apply_logging_configuration(
+ handler: logging.Handler,
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
) -> None:
- for handler in logger.handlers:
- handler.setFormatter(
- CustomFormatter(
- fmt, log_format_local_dev_enabled=log_format_local_dev_enabled
- )
- )
+ """
+ Apply the logging configuration with the given handler.
+ """
+ _clean_all_handlers()
+ _set_root_handler(handler)
+ if logger_filter_mapping:
+ _apply_logger_filters(logger_filter_mapping)
-def test_logger_propagation(logger: logging.Logger) -> None:
- """log propagation and levels can sometimes be daunting to get it right.
- This function uses the `logger`` passed as argument to log the same message at different levels
+def setup_loggers(
+ *,
+ log_format_local_dev_enabled: bool,
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
+ tracing_settings: TracingSettings | None,
+ log_base_level: LogLevelInt,
+ noisy_loggers: tuple[str, ...] | None,
+) -> None:
+ """
+ Applies comprehensive configuration to ALL registered loggers.
+
+ Flow Diagram (Synchronous Logging):
+ ┌─────────────────┐ ┌─────────────────┐
+ │ Application │ │ Root Logger │
+ │ Thread │───────────────────▶│ StreamHandler │
+ │ │ │ ├─ Formatter │
+ │ logger.info() │ │ └─ Output │
+ │ logger.error() │ │ │
+ │ (blocking I/O) │ │ │
+ └─────────────────┘ └─────────────────┘
+ │ │
+ │ ▼
+ │ ┌─────────────┐
+ │ │ Console/ │
+ │ │ Terminal │
+ │ └─────────────┘
+ │
+ └─ Blocks until I/O completes
+
+ This function uses a comprehensive approach:
+ - Removes all handlers from all loggers
+ - Ensures all loggers propagate to root
+ - Sets up root logger with properly formatted handler
+ - All logging calls are synchronous and may block on I/O
+
+ For async/non-blocking logging, use `async_loggers` context manager instead.
+
+ Args:
+ log_format_local_dev_enabled: Enable local development formatting
+ logger_filter_mapping: Mapping of logger names to filtered message substrings
+ tracing_settings: OpenTelemetry tracing configuration
+ log_base_level: Base logging level to set
+ noisy_loggers: Loggers to set to a quieter level
+ """
+ formatter = _configure_common_logging_settings(
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ tracing_settings=tracing_settings,
+ log_base_level=log_base_level,
+ noisy_loggers=noisy_loggers,
+ )
+
+ # Create a properly formatted handler for the root logger
+ stream_handler = logging.StreamHandler()
+ stream_handler.setFormatter(formatter)
- This should help to visually test a given configuration
+ _store_logger_state(_get_all_loggers())
+ _apply_logging_configuration(stream_handler, logger_filter_mapping)
+
+
+@contextmanager
+def _queued_logging_handler(
+ log_formatter: logging.Formatter,
+) -> Iterator[logging.Handler]:
+ log_queue: queue.Queue[logging.LogRecord] = queue.Queue()
+ # Create handler with proper formatting
+ handler = logging.StreamHandler()
+ handler.setFormatter(log_formatter)
+
+ # Create and start the queue listener
+ listener = logging.handlers.QueueListener(
+ log_queue, handler, respect_handler_level=True
+ )
+ listener.start()
- USAGE:
- from .logging_utils import test_logger_propagation
- for n in ("aiohttp.access", "gunicorn.access"):
- test_logger_propagation(logging.getLogger(n))
+ queue_handler = logging.handlers.QueueHandler(log_queue)
+
+ yield queue_handler
+
+ # cleanup
+ with log_context(
+ _logger,
+ level=logging.DEBUG,
+ msg="Shutdown async logging listener",
+ ):
+ listener.stop()
+
+
+def _clean_all_handlers() -> None:
+ """
+ Cleans all handlers from all loggers.
+ This is useful for resetting the logging configuration.
"""
- msg = f"TESTING %s log using {logger=}"
- logger.critical(msg, "critical")
- logger.error(msg, "error")
- logger.info(msg, "info")
- logger.warning(msg, "warning")
- logger.debug(msg, "debug")
+ root_logger = logging.getLogger()
+ all_loggers = _get_all_loggers()
+ for logger in all_loggers:
+ if logger is root_logger:
+ continue
+ logger.handlers.clear()
+ logger.propagate = True # Ensure propagation is enabled
+
+
+def _set_root_handler(handler: logging.Handler) -> None:
+ root_logger = logging.getLogger()
+ root_logger.handlers.clear() # Clear existing handlers
+ root_logger.addHandler(handler) # Add the new handler
+
+
+@contextmanager
+def async_loggers(
+ *,
+ log_format_local_dev_enabled: bool,
+ logger_filter_mapping: dict[LoggerName, list[MessageSubstring]],
+ tracing_settings: TracingSettings | None,
+ log_base_level: LogLevelInt,
+ noisy_loggers: tuple[str, ...] | None,
+) -> Iterator[None]:
+ """
+ Context manager for non-blocking logging infrastructure.
+
+ Flow Diagram:
+ ┌─────────────────┐ ┌──────────────┐ ┌─────────────────┐
+ │ Application │ │ Queue │ │ Background │
+ │ Thread │───▶│ (unlimited) │───▶│ Listener Thread │
+ │ │ │ │ │ │
+ │ logger.info() │ │ LogRecord │ │ StreamHandler │
+ │ logger.error() │ │ LogRecord │ │ ├─ Formatter │
+ │ (non-blocking) │ │ LogRecord │ │ └─ Output │
+ └─────────────────┘ └──────────────┘ └─────────────────┘
+ │ │ │
+ │ │ ▼
+ │ │ ┌─────────────┐
+ │ │ │ Console/ │
+ │ │ │ Terminal │
+ │ │ └─────────────┘
+ │ │
+ └───────────────────────┴─ No blocking, immediate return
+
+ The async logging setup ensures that:
+ 1. All log calls return immediately (non-blocking)
+ 2. Log records are queued in an unlimited queue
+ 3. A background thread processes the queue and handles actual I/O
+ 4. All loggers propagate to root for centralized handling
+
+ For more details on the underlying implementation, see:
+ https://docs.python.org/3/library/logging.handlers.html#queuehandler
+
+ Usage:
+ with async_loggers(log_format_local_dev_enabled=True, logger_filter_mapping={}, tracing_settings=None):
+ # Your async application code here
+ logger.info("This is non-blocking!")
+
+ Args:
+ log_format_local_dev_enabled: Enable local development formatting
+ logger_filter_mapping: Mapping of logger names to filtered message substrings
+ tracing_settings: OpenTelemetry tracing configuration
+ log_base_level: Base logging level to set
+ noisy_loggers: Loggers to set to a quieter level
+ """
+ formatter = _configure_common_logging_settings(
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ tracing_settings=tracing_settings,
+ log_base_level=log_base_level,
+ noisy_loggers=noisy_loggers,
+ )
+
+ with (
+ _queued_logging_handler(formatter) as queue_handler,
+ _stored_logger_states(_get_all_loggers()),
+ ):
+ _apply_logging_configuration(queue_handler, logger_filter_mapping)
+
+ with log_context(_logger, logging.INFO, "Asynchronous logging"):
+ yield
class LogExceptionsKwargsDict(TypedDict, total=True):
logger: logging.Logger
- level: int
+ level: LogLevelInt
msg_prefix: str
exc_info: bool
stack_info: bool
@@ -224,7 +402,7 @@ class LogExceptionsKwargsDict(TypedDict, total=True):
@contextmanager
def log_exceptions(
logger: logging.Logger,
- level: int,
+ level: LogLevelInt,
msg_prefix: str = "",
*,
exc_info: bool = False,
@@ -264,7 +442,7 @@ def log_exceptions(
def _log_before_call(
- logger_obj: logging.Logger, level: int, func: Callable, *args, **kwargs
+ logger_obj: logging.Logger, level: LogLevelInt, func: Callable, *args, **kwargs
) -> dict[str, str]:
# NOTE: We should avoid logging arguments but in the meantime, we are trying to
# avoid exposing sensitive data in the logs. For `args` is more difficult. We could eventually
@@ -302,7 +480,7 @@ def _log_before_call(
def _log_after_call(
logger_obj: logging.Logger,
- level: int,
+ level: LogLevelInt,
func: Callable,
result: Any,
extra_args: dict[str, str],
@@ -322,7 +500,7 @@ def _log_after_call(
def log_decorator(
logger: logging.Logger | None,
- level: int = logging.DEBUG,
+ level: LogLevelInt = logging.DEBUG,
*,
# NOTE: default defined by legacy: ANE defined full stack tracebacks
# on exceptions
@@ -339,7 +517,6 @@ def log_decorator(
logger_obj = logger or _logger
def _decorator(func_or_coro: F) -> F:
-
_log_exc_kwargs = LogExceptionsKwargsDict(
logger=logger_obj,
level=level,
@@ -385,15 +562,16 @@ def log_catch(logger: logging.Logger, *, reraise: bool = True) -> Iterator[None]
logger.debug("call was cancelled")
raise
except Exception as exc: # pylint: disable=broad-except
- logger.exception("Unhandled exception:")
+ logger.exception(
+ **create_troubleshooting_log_kwargs(
+ "Caught unhandled exception",
+ error=exc,
+ )
+ )
if reraise:
raise exc from exc
-LogLevelInt: TypeAlias = int
-LogMessageStr: TypeAlias = str
-
-
def _un_capitalize(s: str) -> str:
return s[:1].lower() + s[1:] if s else ""
@@ -420,7 +598,7 @@ def log_context(
logger.log(level, log_msg, *args, **kwargs, stacklevel=stackelvel)
yield
duration = (
- f" in {(datetime.now() - start ).total_seconds()}s" # noqa: DTZ005
+ f" in {(datetime.now() - start).total_seconds()}s" # noqa: DTZ005
if log_duration
else ""
)
@@ -456,6 +634,58 @@ def guess_message_log_level(message: str) -> LogLevelInt:
return logging.INFO
-def set_parent_module_log_level(current_module: str, desired_log_level: int) -> None:
+def set_parent_module_log_level(
+ current_module: str, desired_log_level: LogLevelInt
+) -> None:
parent_module = ".".join(current_module.split(".")[:-1])
logging.getLogger(parent_module).setLevel(desired_log_level)
+
+
+@dataclass(frozen=True)
+class _LoggerState:
+ logger: logging.Logger
+ handlers: list[logging.Handler]
+ propagate: bool
+
+
+@contextmanager
+def _stored_logger_states(
+ loggers: list[logging.Logger],
+) -> Iterator[list[_LoggerState]]:
+ """
+ Context manager to store and restore the state of loggers.
+ It captures the current handlers and propagation state of each logger.
+ """
+ original_state = _store_logger_state(loggers)
+
+ try:
+ yield original_state
+ finally:
+ _restore_logger_state(original_state)
+
+
+def _store_logger_state(loggers: list[logging.Logger]) -> list[_LoggerState]:
+ logger_states = [
+ _LoggerState(logger, logger.handlers.copy(), logger.propagate)
+ for logger in loggers
+ if logger.handlers or not logger.propagate
+ ]
+ # log which loggers states were stored
+ _logger.info(
+ "Stored logger states: %s. TIP: these loggers configuration will be restored later.",
+ json_dumps(
+ [
+ f"{state.logger.name}(handlers={len(state.handlers)}, propagate={state.propagate})"
+ for state in logger_states
+ ]
+ ),
+ )
+ return logger_states
+
+
+def _restore_logger_state(original_state: list[_LoggerState]) -> None:
+ for state in original_state:
+ logger = state.logger
+ logger.handlers.clear()
+ logger.handlers.extend(state.handlers)
+ logger.propagate = state.propagate
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_constants.py b/packages/service-library/src/servicelib/long_running_tasks/_constants.py
deleted file mode 100644
index 5cc87208a369..000000000000
--- a/packages/service-library/src/servicelib/long_running_tasks/_constants.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from typing import Final
-
-MINUTE: Final[int] = 60 # in secs
-HOUR: Final[int] = 60 * MINUTE # in secs
-DEFAULT_POLL_INTERVAL_S: Final[float] = 1
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_models.py b/packages/service-library/src/servicelib/long_running_tasks/_models.py
deleted file mode 100644
index 89fb8b1b3997..000000000000
--- a/packages/service-library/src/servicelib/long_running_tasks/_models.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# mypy: disable-error-code=truthy-function
-from asyncio import Task
-from collections.abc import Awaitable, Callable, Coroutine
-from dataclasses import dataclass
-from datetime import datetime
-from typing import Any, TypeAlias
-
-from models_library.api_schemas_long_running_tasks.base import (
- ProgressMessage,
- ProgressPercent,
- TaskId,
- TaskProgress,
-)
-from models_library.api_schemas_long_running_tasks.tasks import (
- TaskGet,
- TaskResult,
- TaskStatus,
-)
-from pydantic import BaseModel, ConfigDict, Field, PositiveFloat
-
-TaskName: TypeAlias = str
-
-TaskType: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
-
-ProgressCallback: TypeAlias = Callable[
- [ProgressMessage, ProgressPercent | None, TaskId], Awaitable[None]
-]
-
-RequestBody: TypeAlias = Any
-
-
-class TrackedTask(BaseModel):
- task_id: str
- task: Task
- task_name: TaskName
- task_progress: TaskProgress
- # NOTE: this context lifetime is with the tracked task (similar to aiohttp storage concept)
- task_context: dict[str, Any]
- fire_and_forget: bool = Field(
- ...,
- description="if True then the task will not be auto-cancelled if no one enquires of its status",
- )
-
- started: datetime = Field(default_factory=datetime.utcnow)
- last_status_check: datetime | None = Field(
- default=None,
- description=(
- "used to detect when if the task is not actively "
- "polled by the client who created it"
- ),
- )
- model_config = ConfigDict(
- arbitrary_types_allowed=True,
- )
-
-
-class ClientConfiguration(BaseModel):
- router_prefix: str
- default_timeout: PositiveFloat
-
-
-@dataclass(frozen=True)
-class LRTask:
- progress: TaskProgress
- _result: Coroutine[Any, Any, Any] | None = None
-
- def done(self) -> bool:
- return self._result is not None
-
- async def result(self) -> Any:
- if not self._result:
- msg = "No result ready!"
- raise ValueError(msg)
- return await self._result
-
-
-# explicit export of models for api-schemas
-
-assert TaskResult # nosec
-assert TaskGet # nosec
-assert TaskStatus # nosec
-
-__all__: tuple[str, ...] = (
- "ProgressMessage",
- "ProgressPercent",
- "TaskGet",
- "TaskId",
- "TaskProgress",
- "TaskResult",
- "TaskStatus",
-)
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_rabbit_namespace.py b/packages/service-library/src/servicelib/long_running_tasks/_rabbit_namespace.py
new file mode 100644
index 000000000000..7ace2e53a3dd
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/_rabbit_namespace.py
@@ -0,0 +1,8 @@
+from models_library.rabbitmq_basic_types import RPCNamespace
+from pydantic import TypeAdapter
+
+from .models import LRTNamespace
+
+
+def get_rabbit_namespace(namespace: LRTNamespace) -> RPCNamespace:
+ return TypeAdapter(RPCNamespace).validate_python(f"lrt-{namespace}")
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_redis_store.py b/packages/service-library/src/servicelib/long_running_tasks/_redis_store.py
new file mode 100644
index 000000000000..fbed41205a95
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/_redis_store.py
@@ -0,0 +1,127 @@
+from typing import Any, Final
+
+import redis.asyncio as aioredis
+from common_library.json_serialization import json_dumps, json_loads
+from pydantic import TypeAdapter
+from settings_library.redis import RedisDatabase, RedisSettings
+
+from ..redis._client import RedisClientSDK
+from ..redis._utils import handle_redis_returns_union_types
+from ..utils import limited_gather
+from .models import LRTNamespace, TaskData, TaskId
+
+_STORE_TYPE_TASK_DATA: Final[str] = "TD"
+_LIST_CONCURRENCY: Final[int] = 3
+_MARKED_FOR_REMOVAL_FIELD: Final[str] = "marked_for_removal"
+
+
+def _to_redis_hash_mapping(data: dict[str, Any]) -> dict[str, str]:
+ return {k: json_dumps(v) for k, v in data.items()}
+
+
+def _load_from_redis_hash(data: dict[str, str]) -> dict[str, Any]:
+ return {k: json_loads(v) for k, v in data.items()}
+
+
+class RedisStore:
+ def __init__(self, redis_settings: RedisSettings, namespace: LRTNamespace):
+ self.redis_settings = redis_settings
+ self.namespace: LRTNamespace = namespace.upper()
+
+ self._client: RedisClientSDK | None = None
+
+ async def setup(self) -> None:
+ self._client = RedisClientSDK(
+ self.redis_settings.build_redis_dsn(RedisDatabase.LONG_RUNNING_TASKS),
+ client_name=f"long_running_tasks_store_{self.namespace}",
+ )
+ await self._client.setup()
+
+ async def shutdown(self) -> None:
+ if self._client:
+ await self._client.shutdown()
+
+ @property
+ def _redis(self) -> aioredis.Redis:
+ assert self._client # nosec
+ return self._client.redis
+
+ def _get_redis_key_task_data_match(self) -> str:
+ return f"{self.namespace}:{_STORE_TYPE_TASK_DATA}*"
+
+ def _get_redis_task_data_key(self, task_id: TaskId) -> str:
+ return f"{self.namespace}:{_STORE_TYPE_TASK_DATA}:{task_id}"
+
+ async def get_task_data(self, task_id: TaskId) -> TaskData | None:
+ result: dict[str, Any] = await handle_redis_returns_union_types(
+ self._redis.hgetall(
+ self._get_redis_task_data_key(task_id),
+ )
+ )
+ return (
+ TypeAdapter(TaskData).validate_python(_load_from_redis_hash(result))
+ if result and len(result)
+ else None
+ )
+
+ async def add_task_data(self, task_id: TaskId, value: TaskData) -> None:
+ await handle_redis_returns_union_types(
+ self._redis.hset(
+ self._get_redis_task_data_key(task_id),
+ mapping=_to_redis_hash_mapping(value.model_dump()),
+ )
+ )
+
+ async def update_task_data(
+ self,
+ task_id: TaskId,
+ *,
+ updates: dict[str, Any],
+ ) -> None:
+ await handle_redis_returns_union_types(
+ self._redis.hset(
+ self._get_redis_task_data_key(task_id),
+ mapping=_to_redis_hash_mapping(updates),
+ )
+ )
+
+ async def list_tasks_data(self) -> list[TaskData]:
+ hash_keys: list[str] = [
+ x
+ async for x in self._redis.scan_iter(self._get_redis_key_task_data_match())
+ ]
+
+ result = await limited_gather(
+ *[
+ handle_redis_returns_union_types(self._redis.hgetall(key))
+ for key in hash_keys
+ ],
+ limit=_LIST_CONCURRENCY,
+ )
+
+ return [
+ TypeAdapter(TaskData).validate_python(_load_from_redis_hash(item))
+ for item in result
+ if item
+ ]
+
+ async def delete_task_data(self, task_id: TaskId) -> None:
+ await handle_redis_returns_union_types(
+ self._redis.delete(self._get_redis_task_data_key(task_id))
+ )
+
+ async def mark_for_removal(self, task_id: TaskId) -> None:
+ await handle_redis_returns_union_types(
+ self._redis.hset(
+ self._get_redis_task_data_key(task_id),
+ mapping=_to_redis_hash_mapping({_MARKED_FOR_REMOVAL_FIELD: True}),
+ )
+ )
+
+ async def is_marked_for_removal(self, task_id: TaskId) -> bool:
+ result = await handle_redis_returns_union_types(
+ self._redis.hget(
+ self._get_redis_task_data_key(task_id), _MARKED_FOR_REMOVAL_FIELD
+ )
+ )
+ return False if result is None else json_loads(result)
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_rpc_client.py b/packages/service-library/src/servicelib/long_running_tasks/_rpc_client.py
new file mode 100644
index 000000000000..6ad3fe9785ba
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/_rpc_client.py
@@ -0,0 +1,130 @@
+import logging
+from datetime import timedelta
+from typing import Any, Final
+
+from models_library.rabbitmq_basic_types import RPCMethodName
+from pydantic import PositiveInt, TypeAdapter
+
+from ..logging_utils import log_decorator
+from ..rabbitmq._client_rpc import RabbitMQRPCClient
+from ._rabbit_namespace import get_rabbit_namespace
+from ._serialization import loads
+from .errors import RPCTransferrableTaskError
+from .models import (
+ LRTNamespace,
+ RegisteredTaskName,
+ TaskBase,
+ TaskContext,
+ TaskId,
+ TaskStatus,
+)
+
+_logger = logging.getLogger(__name__)
+
+_RPC_TIMEOUT_SHORT_REQUESTS: Final[PositiveInt] = int(
+ timedelta(seconds=20).total_seconds()
+)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def start_task(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ namespace: LRTNamespace,
+ *,
+ registered_task_name: RegisteredTaskName,
+ unique: bool = False,
+ task_context: TaskContext | None = None,
+ task_name: str | None = None,
+ fire_and_forget: bool = False,
+ **task_kwargs: Any,
+) -> TaskId:
+ result = await rabbitmq_rpc_client.request(
+ get_rabbit_namespace(namespace),
+ TypeAdapter(RPCMethodName).validate_python("start_task"),
+ registered_task_name=registered_task_name,
+ unique=unique,
+ task_context=task_context,
+ task_name=task_name,
+ fire_and_forget=fire_and_forget,
+ **task_kwargs,
+ timeout_s=_RPC_TIMEOUT_SHORT_REQUESTS,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def list_tasks(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ namespace: LRTNamespace,
+ *,
+ task_context: TaskContext,
+) -> list[TaskBase]:
+ result = await rabbitmq_rpc_client.request(
+ get_rabbit_namespace(namespace),
+ TypeAdapter(RPCMethodName).validate_python("list_tasks"),
+ task_context=task_context,
+ timeout_s=_RPC_TIMEOUT_SHORT_REQUESTS,
+ )
+ return TypeAdapter(list[TaskBase]).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_task_status(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ namespace: LRTNamespace,
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> TaskStatus:
+ result = await rabbitmq_rpc_client.request(
+ get_rabbit_namespace(namespace),
+ TypeAdapter(RPCMethodName).validate_python("get_task_status"),
+ task_context=task_context,
+ task_id=task_id,
+ timeout_s=_RPC_TIMEOUT_SHORT_REQUESTS,
+ )
+ assert isinstance(result, TaskStatus) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_task_result(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ namespace: LRTNamespace,
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> Any:
+ try:
+ serialized_result = await rabbitmq_rpc_client.request(
+ get_rabbit_namespace(namespace),
+ TypeAdapter(RPCMethodName).validate_python("get_task_result"),
+ task_context=task_context,
+ task_id=task_id,
+ timeout_s=_RPC_TIMEOUT_SHORT_REQUESTS,
+ )
+ assert isinstance(serialized_result, str) # nosec
+ return loads(serialized_result)
+ except RPCTransferrableTaskError as e:
+ decoded_error = loads(f"{e}")
+ raise decoded_error from e
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def remove_task(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ namespace: LRTNamespace,
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> None:
+
+ result = await rabbitmq_rpc_client.request(
+ get_rabbit_namespace(namespace),
+ TypeAdapter(RPCMethodName).validate_python("remove_task"),
+ task_context=task_context,
+ task_id=task_id,
+ timeout_s=_RPC_TIMEOUT_SHORT_REQUESTS,
+ )
+ assert result is None # nosec
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_rpc_server.py b/packages/service-library/src/servicelib/long_running_tasks/_rpc_server.py
new file mode 100644
index 000000000000..2d7ff79ac087
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/_rpc_server.py
@@ -0,0 +1,104 @@
+import logging
+from contextlib import suppress
+from typing import TYPE_CHECKING, Any
+
+from ..rabbitmq import RPCRouter
+from .errors import BaseLongRunningError, RPCTransferrableTaskError, TaskNotFoundError
+from .models import (
+ RegisteredTaskName,
+ TaskBase,
+ TaskContext,
+ TaskId,
+ TaskStatus,
+)
+
+_logger = logging.getLogger(__name__)
+
+if TYPE_CHECKING:
+ from .manager import LongRunningManager
+
+
+router = RPCRouter()
+
+
+@router.expose(reraise_if_error_type=(BaseLongRunningError,))
+async def start_task(
+ long_running_manager: "LongRunningManager",
+ *,
+ registered_task_name: RegisteredTaskName,
+ unique: bool = False,
+ task_context: TaskContext | None = None,
+ task_name: str | None = None,
+ fire_and_forget: bool = False,
+ **task_kwargs: Any,
+) -> TaskId:
+ return await long_running_manager.tasks_manager.start_task(
+ registered_task_name,
+ unique=unique,
+ task_context=task_context,
+ task_name=task_name,
+ fire_and_forget=fire_and_forget,
+ **task_kwargs,
+ )
+
+
+@router.expose(reraise_if_error_type=(BaseLongRunningError,))
+async def list_tasks(
+ long_running_manager: "LongRunningManager", *, task_context: TaskContext
+) -> list[TaskBase]:
+ return await long_running_manager.tasks_manager.list_tasks(
+ with_task_context=task_context
+ )
+
+
+@router.expose(reraise_if_error_type=(BaseLongRunningError,))
+async def get_task_status(
+ long_running_manager: "LongRunningManager",
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> TaskStatus:
+ return await long_running_manager.tasks_manager.get_task_status(
+ task_id=task_id, with_task_context=task_context
+ )
+
+
+@router.expose(reraise_if_error_type=(BaseLongRunningError, RPCTransferrableTaskError))
+async def get_task_result(
+ long_running_manager: "LongRunningManager",
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> str:
+ try:
+ result_field = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=task_context
+ )
+ if result_field.str_error is not None:
+ raise RPCTransferrableTaskError(result_field.str_error)
+
+ if result_field.str_result is not None:
+ return result_field.str_result
+
+ msg = f"Please check {result_field=}, both fields should never be None"
+ raise ValueError(msg)
+ finally:
+ # Ensure the task is removed regardless of the result
+ with suppress(TaskNotFoundError):
+ await long_running_manager.tasks_manager.remove_task(
+ task_id,
+ with_task_context=task_context,
+ wait_for_removal=False,
+ )
+
+
+@router.expose(reraise_if_error_type=(BaseLongRunningError,))
+async def remove_task(
+ long_running_manager: "LongRunningManager",
+ *,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> None:
+ await long_running_manager.tasks_manager.remove_task(
+ task_id, with_task_context=task_context, wait_for_removal=False
+ )
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_serialization.py b/packages/service-library/src/servicelib/long_running_tasks/_serialization.py
new file mode 100644
index 000000000000..16460f7ceeed
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/_serialization.py
@@ -0,0 +1,83 @@
+import base64
+import pickle
+from abc import ABC, abstractmethod
+from typing import Any, Final, Generic, TypeVar
+
+T = TypeVar("T")
+
+
+class BaseObjectSerializer(ABC, Generic[T]):
+
+ @classmethod
+ @abstractmethod
+ def get_init_kwargs_from_object(cls, obj: T) -> dict:
+ """dictionary reppreseting the kwargs passed to the __init__ method"""
+
+ @classmethod
+ @abstractmethod
+ def prepare_object_init_kwargs(cls, data: dict) -> dict:
+ """cleanup data to be used as kwargs for the __init__ method if required"""
+
+
+_SERIALIZERS: Final[dict[type, type[BaseObjectSerializer]]] = {}
+
+
+def register_custom_serialization(
+ object_type: type, object_serializer: type[BaseObjectSerializer]
+) -> None:
+ """Register a custom serializer for a specific object type.
+
+ Arguments:
+ object_type -- the type or parent class of the object to be serialized
+ object_serializer -- custom implementation of BaseObjectSerializer for the object type
+ """
+ _SERIALIZERS[object_type] = object_serializer
+
+
+_TYPE_FIELD: Final[str] = "__pickle__type__field__"
+_MODULE_FIELD: Final[str] = "__pickle__module__field__"
+
+
+def dumps(obj: Any) -> str:
+ """Serialize object to base64-encoded string."""
+ to_serialize: Any | dict = obj
+ object_class = type(obj)
+
+ for registered_class, object_serializer in _SERIALIZERS.items():
+ if issubclass(object_class, registered_class):
+ to_serialize = {
+ _TYPE_FIELD: type(obj).__name__,
+ _MODULE_FIELD: type(obj).__module__,
+ **object_serializer.get_init_kwargs_from_object(obj),
+ }
+ break
+
+ return base64.b85encode(pickle.dumps(to_serialize)).decode("utf-8")
+
+
+def loads(obj_str: str) -> Any:
+ """Deserialize object from base64-encoded string."""
+ data = pickle.loads(base64.b85decode(obj_str)) # noqa: S301
+
+ if isinstance(data, dict) and _TYPE_FIELD in data and _MODULE_FIELD in data:
+ try:
+ # Import the module and get the exception class
+ module = __import__(data[_MODULE_FIELD], fromlist=[data[_TYPE_FIELD]])
+ exception_class = getattr(module, data[_TYPE_FIELD])
+
+ for registered_class, object_serializer in _SERIALIZERS.items():
+ if issubclass(exception_class, registered_class):
+ # remove unrequired
+ data.pop(_TYPE_FIELD)
+ data.pop(_MODULE_FIELD)
+
+ raise exception_class(
+ **object_serializer.prepare_object_init_kwargs(data)
+ )
+ except (ImportError, AttributeError, TypeError) as e:
+ msg = f"Could not reconstruct object from data: {data}"
+ raise ValueError(msg) from e
+
+ if isinstance(data, Exception):
+ raise data
+ return data
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_task.py b/packages/service-library/src/servicelib/long_running_tasks/_task.py
deleted file mode 100644
index b1b0bedfcc05..000000000000
--- a/packages/service-library/src/servicelib/long_running_tasks/_task.py
+++ /dev/null
@@ -1,423 +0,0 @@
-import asyncio
-import inspect
-import logging
-import traceback
-import urllib.parse
-from collections import deque
-from contextlib import suppress
-from datetime import datetime
-from typing import Any, Protocol
-from uuid import uuid4
-
-from models_library.api_schemas_long_running_tasks.base import (
- ProgressPercent,
- TaskProgress,
-)
-from pydantic import PositiveFloat
-
-from ._errors import (
- TaskAlreadyRunningError,
- TaskCancelledError,
- TaskExceptionError,
- TaskNotCompletedError,
- TaskNotFoundError,
-)
-from ._models import TaskId, TaskName, TaskStatus, TrackedTask
-
-logger = logging.getLogger(__name__)
-
-
-async def _await_task(task: asyncio.Task) -> None:
- await task
-
-
-def _mark_task_to_remove_if_required(
- task_id: TaskId,
- tasks_to_remove: list[TaskId],
- tracked_task: TrackedTask,
- utc_now: datetime,
- stale_timeout_s: float,
-) -> None:
- if tracked_task.fire_and_forget:
- return
-
- if tracked_task.last_status_check is None:
- # the task just added or never received a poll request
- elapsed_from_start = (utc_now - tracked_task.started).seconds
- if elapsed_from_start > stale_timeout_s:
- tasks_to_remove.append(task_id)
- else:
- # the task status was already queried by the client
- elapsed_from_last_poll = (utc_now - tracked_task.last_status_check).seconds
- if elapsed_from_last_poll > stale_timeout_s:
- tasks_to_remove.append(task_id)
-
-
-TrackedTaskGroupDict = dict[TaskId, TrackedTask]
-TaskContext = dict[str, Any]
-
-
-class TasksManager:
- """
- Monitors execution and results retrieval of a collection of asyncio.Tasks
- """
-
- def __init__(
- self,
- stale_task_check_interval_s: PositiveFloat,
- stale_task_detect_timeout_s: PositiveFloat,
- ):
- # Task groups: Every taskname maps to multiple asyncio.Task within TrackedTask model
- self._tasks_groups: dict[TaskName, TrackedTaskGroupDict] = {}
-
- self._cancel_task_timeout_s: PositiveFloat = 1.0
-
- self.stale_task_check_interval_s = stale_task_check_interval_s
- self.stale_task_detect_timeout_s = stale_task_detect_timeout_s
- self._stale_tasks_monitor_task: asyncio.Task = asyncio.create_task(
- self._stale_tasks_monitor_worker(),
- name=f"{__name__}.stale_task_monitor_worker",
- )
-
- def get_task_group(self, task_name: TaskName) -> TrackedTaskGroupDict:
- return self._tasks_groups[task_name]
-
- async def _stale_tasks_monitor_worker(self) -> None:
- """
- A task is considered stale, if the task status is not queried
- in the last `stale_task_detect_timeout_s` and it is not a fire and forget type of task.
-
- This helps detect clients who:
- - started tasks and did not remove them
- - crashed without removing the task
- - did not fetch the result
- """
- # NOTE:
- # When a task has finished with a result or error and its
- # status is being polled it would appear that there is
- # an issue with the client.
- # Since we own the client, we assume (for now) this
- # will not be the case.
-
- while await asyncio.sleep(self.stale_task_check_interval_s, result=True):
- utc_now = datetime.utcnow()
-
- tasks_to_remove: list[TaskId] = []
- for tasks in self._tasks_groups.values():
- for task_id, tracked_task in tasks.items():
- _mark_task_to_remove_if_required(
- task_id,
- tasks_to_remove,
- tracked_task,
- utc_now,
- self.stale_task_detect_timeout_s,
- )
-
- # finally remove tasks and warn
- for task_id in tasks_to_remove:
- # NOTE: task can be in the following cases:
- # - still ongoing
- # - finished with a result
- # - finished with errors
- # we just print the status from where one can infer the above
- logger.warning(
- "Removing stale task '%s' with status '%s'",
- task_id,
- self.get_task_status(
- task_id, with_task_context=None
- ).model_dump_json(),
- )
- await self.remove_task(
- task_id, with_task_context=None, reraise_errors=False
- )
-
- @staticmethod
- def create_task_id(task_name: TaskName) -> str:
- assert len(task_name) > 0
- return f"{task_name}.{uuid4()}"
-
- def is_task_running(self, task_name: TaskName) -> bool:
- """returns True if a task named `task_name` is running"""
- if task_name not in self._tasks_groups:
- return False
-
- managed_tasks_ids = list(self._tasks_groups[task_name].keys())
- return len(managed_tasks_ids) > 0
-
- def list_tasks(self, with_task_context: TaskContext | None) -> list[TrackedTask]:
- tasks: list[TrackedTask] = []
- for task_group in self._tasks_groups.values():
- if not with_task_context:
- tasks.extend(task_group.values())
- else:
- tasks.extend(
- [
- task
- for task in task_group.values()
- if task.task_context == with_task_context
- ]
- )
- return tasks
-
- def add_task(
- self,
- task_name: TaskName,
- task: asyncio.Task,
- task_progress: TaskProgress,
- task_context: TaskContext,
- task_id: TaskId,
- *,
- fire_and_forget: bool,
- ) -> TrackedTask:
- if task_name not in self._tasks_groups:
- self._tasks_groups[task_name] = {}
-
- tracked_task = TrackedTask(
- task_id=task_id,
- task=task,
- task_name=task_name,
- task_progress=task_progress,
- task_context=task_context,
- fire_and_forget=fire_and_forget,
- )
- self._tasks_groups[task_name][task_id] = tracked_task
-
- return tracked_task
-
- def _get_tracked_task(
- self, task_id: TaskId, with_task_context: TaskContext | None
- ) -> TrackedTask:
- for tasks in self._tasks_groups.values():
- if task_id in tasks:
- if with_task_context and (
- tasks[task_id].task_context != with_task_context
- ):
- raise TaskNotFoundError(task_id=task_id)
- return tasks[task_id]
-
- raise TaskNotFoundError(task_id=task_id)
-
- def get_task_status(
- self, task_id: TaskId, with_task_context: TaskContext | None
- ) -> TaskStatus:
- """
- returns: the status of the task, along with updates
- form the progress
-
- raises TaskNotFoundError if the task cannot be found
- """
- tracked_task: TrackedTask = self._get_tracked_task(task_id, with_task_context)
- tracked_task.last_status_check = datetime.utcnow()
-
- task = tracked_task.task
- done = task.done()
-
- return TaskStatus.model_validate(
- {
- "task_progress": tracked_task.task_progress,
- "done": done,
- "started": tracked_task.started,
- }
- )
-
- def get_task_result(
- self, task_id: TaskId, with_task_context: TaskContext | None
- ) -> Any:
- """
- returns: the result of the task
-
- raises TaskNotFoundError if the task cannot be found
- raises TaskCancelledError if the task was cancelled
- raises TaskNotCompletedError if the task is not completed
- """
- tracked_task = self._get_tracked_task(task_id, with_task_context)
-
- try:
- return tracked_task.task.result()
- except asyncio.InvalidStateError as exc:
- # the task is not ready
- raise TaskNotCompletedError(task_id=task_id) from exc
- except asyncio.CancelledError as exc:
- # the task was cancelled
- raise TaskCancelledError(task_id=task_id) from exc
-
- async def cancel_task(
- self, task_id: TaskId, with_task_context: TaskContext | None
- ) -> None:
- """
- cancels the task
-
- raises TaskNotFoundError if the task cannot be found
- """
- tracked_task = self._get_tracked_task(task_id, with_task_context)
- await self._cancel_tracked_task(tracked_task.task, task_id, reraise_errors=True)
-
- async def _cancel_asyncio_task(
- self, task: asyncio.Task, reference: str, *, reraise_errors: bool
- ) -> None:
- task.cancel()
- with suppress(asyncio.CancelledError):
- try:
- try:
- await asyncio.wait_for(
- _await_task(task), timeout=self._cancel_task_timeout_s
- )
- except asyncio.TimeoutError:
- logger.warning(
- "Timed out while awaiting for cancellation of '%s'", reference
- )
- except Exception: # pylint:disable=broad-except
- if reraise_errors:
- raise
-
- async def _cancel_tracked_task(
- self, task: asyncio.Task, task_id: TaskId, *, reraise_errors: bool
- ) -> None:
- try:
- await self._cancel_asyncio_task(
- task, task_id, reraise_errors=reraise_errors
- )
- except Exception as e: # pylint:disable=broad-except
- formatted_traceback = "".join(traceback.format_exception(e))
- raise TaskExceptionError(
- task_id=task_id, exception=e, traceback=formatted_traceback
- ) from e
-
- async def remove_task(
- self,
- task_id: TaskId,
- with_task_context: TaskContext | None,
- *,
- reraise_errors: bool = True,
- ) -> None:
- """cancels and removes task"""
- try:
- tracked_task = self._get_tracked_task(task_id, with_task_context)
- except TaskNotFoundError:
- if reraise_errors:
- raise
- return
- try:
- await self._cancel_tracked_task(
- tracked_task.task, task_id, reraise_errors=reraise_errors
- )
- finally:
- del self._tasks_groups[tracked_task.task_name][task_id]
-
- async def close(self) -> None:
- """
- cancels all pending tasks and removes them before closing
- """
- task_ids_to_remove: deque[TaskId] = deque()
-
- for tasks_dict in self._tasks_groups.values():
- for tracked_task in tasks_dict.values():
- task_ids_to_remove.append(tracked_task.task_id)
-
- for task_id in task_ids_to_remove:
- # when closing we do not care about pending errors
- await self.remove_task(task_id, None, reraise_errors=False)
-
- await self._cancel_asyncio_task(
- self._stale_tasks_monitor_task, "stale_monitor", reraise_errors=False
- )
-
-
-class TaskProtocol(Protocol):
- async def __call__(
- self, progress: TaskProgress, *args: Any, **kwargs: Any
- ) -> Any: ...
-
- @property
- def __name__(self) -> str: ...
-
-
-def start_task(
- tasks_manager: TasksManager,
- task: TaskProtocol,
- *,
- unique: bool = False,
- task_context: TaskContext | None = None,
- task_name: str | None = None,
- fire_and_forget: bool = False,
- **task_kwargs: Any,
-) -> TaskId:
- """
- Creates a background task from an async function.
-
- An asyncio task will be created out of it by injecting a `TaskProgress` as the first
- positional argument and adding all `handler_kwargs` as named parameters.
-
- NOTE: the progress is automatically bounded between 0 and 1
- NOTE: the `task` name must be unique in the module, otherwise when using
- the unique parameter is True, it will not be able to distinguish between
- them.
-
- Args:
- tasks_manager (TasksManager): the tasks manager
- task (TaskProtocol): the tasks to be run in the background
- unique (bool, optional): If True, then only one such named task may be run. Defaults to False.
- task_context (Optional[TaskContext], optional): a task context storage can be retrieved during the task lifetime. Defaults to None.
- task_name (Optional[str], optional): optional task name. Defaults to None.
- fire_and_forget: if True, then the task will not be cancelled if the status is never called
-
- Raises:
- TaskAlreadyRunningError: if unique is True, will raise if more than 1 such named task is started
-
- Returns:
- TaskId: the task unique identifier
- """
-
- # NOTE: If not task name is given, it will be composed of the handler's module and it's name
- # to keep the urls shorter and more meaningful.
- handler_module = inspect.getmodule(task)
- handler_module_name = handler_module.__name__ if handler_module else ""
- task_name = task_name or f"{handler_module_name}.{task.__name__}"
- task_name = urllib.parse.quote(task_name, safe="")
-
- # only one unique task can be running
- if unique and tasks_manager.is_task_running(task_name):
- managed_tasks_ids = list(tasks_manager.get_task_group(task_name).keys())
- assert len(managed_tasks_ids) == 1 # nosec
- managed_task: TrackedTask = tasks_manager.get_task_group(task_name)[
- managed_tasks_ids[0]
- ]
- raise TaskAlreadyRunningError(task_name=task_name, managed_task=managed_task)
-
- task_id = tasks_manager.create_task_id(task_name=task_name)
- task_progress = TaskProgress.create(task_id=task_id)
-
- # bind the task with progress 0 and 1
- async def _progress_task(progress: TaskProgress, handler: TaskProtocol):
- progress.update(message="starting", percent=ProgressPercent(0))
- try:
- return await handler(progress, **task_kwargs)
- finally:
- progress.update(message="finished", percent=ProgressPercent(1))
-
- async_task = asyncio.create_task(
- _progress_task(task_progress, task), name=f"{task_name}"
- )
-
- tracked_task = tasks_manager.add_task(
- task_name=task_name,
- task=async_task,
- task_progress=task_progress,
- task_context=task_context or {},
- fire_and_forget=fire_and_forget,
- task_id=task_id,
- )
-
- return tracked_task.task_id
-
-
-__all__: tuple[str, ...] = (
- "TaskAlreadyRunningError",
- "TaskCancelledError",
- "TaskId",
- "TasksManager",
- "TaskProgress",
- "TaskProtocol",
- "TaskStatus",
- "TrackedTask",
-)
diff --git a/packages/service-library/src/servicelib/long_running_tasks/constants.py b/packages/service-library/src/servicelib/long_running_tasks/constants.py
new file mode 100644
index 000000000000..b5e729665ccd
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/constants.py
@@ -0,0 +1,7 @@
+from datetime import timedelta
+from typing import Final
+
+DEFAULT_POLL_INTERVAL_S: Final[float] = 1
+
+DEFAULT_STALE_TASK_CHECK_INTERVAL: Final[timedelta] = timedelta(minutes=1)
+DEFAULT_STALE_TASK_DETECT_TIMEOUT: Final[timedelta] = timedelta(minutes=5)
diff --git a/packages/service-library/src/servicelib/long_running_tasks/_errors.py b/packages/service-library/src/servicelib/long_running_tasks/errors.py
similarity index 60%
rename from packages/service-library/src/servicelib/long_running_tasks/_errors.py
rename to packages/service-library/src/servicelib/long_running_tasks/errors.py
index 33439c6436f3..127a7ae1d5a3 100644
--- a/packages/service-library/src/servicelib/long_running_tasks/_errors.py
+++ b/packages/service-library/src/servicelib/long_running_tasks/errors.py
@@ -5,6 +5,13 @@ class BaseLongRunningError(OsparcErrorMixin, Exception):
"""base exception for this module"""
+class TaskNotRegisteredError(BaseLongRunningError):
+ msg_template: str = (
+ "no task with task_name='{task_name}' was found in the task registry tasks={tasks}. "
+ "Make sure it's registered before starting it."
+ )
+
+
class TaskAlreadyRunningError(BaseLongRunningError):
msg_template: str = "{task_name} must be unique, found: '{managed_task}'"
@@ -27,6 +34,14 @@ class TaskExceptionError(BaseLongRunningError):
)
+class TaskRaisedUnserializableError(BaseLongRunningError):
+ msg_template: str = (
+ "Task {task_id} raised an exception that could not be serialized.\n"
+ "Original exception: '{original_exception_str}'\n"
+ "As a consequence, the following error was raised: '{exception}'"
+ )
+
+
class TaskClientTimeoutError(BaseLongRunningError):
msg_template: str = (
"Timed out after {timeout} seconds while awaiting '{task_id}' to complete"
@@ -37,3 +52,10 @@ class GenericClientError(BaseLongRunningError):
msg_template: str = (
"Unexpected error while '{action}' for '{task_id}': status={status} body={body}"
)
+
+
+class RPCTransferrableTaskError(Exception):
+ """
+ The message contains the task's exception serialized as string.
+ Decode it and raise to obtain the task's original exception.
+ """
diff --git a/packages/service-library/src/servicelib/long_running_tasks/long_running_client_helper.py b/packages/service-library/src/servicelib/long_running_tasks/long_running_client_helper.py
new file mode 100644
index 000000000000..b51acaf1b862
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/long_running_client_helper.py
@@ -0,0 +1,46 @@
+import logging
+
+import redis.asyncio as aioredis
+from settings_library.redis import RedisDatabase, RedisSettings
+
+from ..logging_utils import log_context
+from ..redis._client import RedisClientSDK
+from .models import LRTNamespace
+
+_logger = logging.getLogger(__name__)
+
+
+class LongRunningClientHelper:
+ def __init__(self, redis_settings: RedisSettings):
+ self.redis_settings = redis_settings
+
+ self._client: RedisClientSDK | None = None
+
+ async def setup(self) -> None:
+ self._client = RedisClientSDK(
+ self.redis_settings.build_redis_dsn(RedisDatabase.LONG_RUNNING_TASKS),
+ client_name="long_running_tasks_cleanup_client",
+ )
+ await self._client.setup()
+
+ async def shutdown(self) -> None:
+ if self._client:
+ await self._client.shutdown()
+
+ @property
+ def _redis(self) -> aioredis.Redis:
+ assert self._client # nosec
+ return self._client.redis
+
+ async def cleanup(self, lrt_namespace: LRTNamespace) -> None:
+ """removes Redis keys associated to the LRTNamespace if they exist"""
+ keys_to_remove: list[str] = [
+ x async for x in self._redis.scan_iter(f"{lrt_namespace}*")
+ ]
+ with log_context(
+ _logger,
+ logging.DEBUG,
+ msg=f"Removing {keys_to_remove=} from Redis for {lrt_namespace=}",
+ ):
+ if len(keys_to_remove) > 0:
+ await self._redis.delete(*keys_to_remove)
diff --git a/packages/service-library/src/servicelib/long_running_tasks/lrt_api.py b/packages/service-library/src/servicelib/long_running_tasks/lrt_api.py
new file mode 100644
index 000000000000..02f4c5265f38
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/lrt_api.py
@@ -0,0 +1,115 @@
+from typing import Any
+
+from ..rabbitmq._client_rpc import RabbitMQRPCClient
+from . import _rpc_client
+from .models import (
+ LRTNamespace,
+ RegisteredTaskName,
+ TaskBase,
+ TaskContext,
+ TaskId,
+ TaskStatus,
+)
+
+
+async def start_task(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ lrt_namespace: LRTNamespace,
+ registered_task_name: RegisteredTaskName,
+ *,
+ unique: bool = False,
+ task_context: TaskContext | None = None,
+ task_name: str | None = None,
+ fire_and_forget: bool = False,
+ **task_kwargs: Any,
+) -> TaskId:
+ """
+ Creates a background task from an async function.
+
+ An asyncio task will be created out of it by injecting a `TaskProgress` as the first
+ positional argument and adding all `handler_kwargs` as named parameters.
+
+ NOTE: the progress is automatically bounded between 0 and 1
+ NOTE: the `task` name must be unique in the module, otherwise when using
+ the unique parameter is True, it will not be able to distinguish between
+ them.
+
+ Args:
+ tasks_manager (TasksManager): the tasks manager
+ task (TaskProtocol): the tasks to be run in the background
+ unique (bool, optional): If True, then only one such named task may be run. Defaults to False.
+ task_context (Optional[TaskContext], optional): a task context storage can be retrieved during the task lifetime. Defaults to None.
+ task_name (Optional[str], optional): optional task name. Defaults to None.
+ fire_and_forget: if True, then the task will not be cancelled if the status is never called
+
+ Raises:
+ TaskAlreadyRunningError: if unique is True, will raise if more than 1 such named task is started
+
+ Returns:
+ TaskId: the task unique identifier
+ """
+
+ return await _rpc_client.start_task(
+ rabbitmq_rpc_client,
+ lrt_namespace,
+ registered_task_name=registered_task_name,
+ unique=unique,
+ task_context=task_context,
+ task_name=task_name,
+ fire_and_forget=fire_and_forget,
+ **task_kwargs,
+ )
+
+
+async def list_tasks(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ lrt_namespace: LRTNamespace,
+ task_context: TaskContext,
+) -> list[TaskBase]:
+ return await _rpc_client.list_tasks(
+ rabbitmq_rpc_client, lrt_namespace, task_context=task_context
+ )
+
+
+async def get_task_status(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ lrt_namespace: LRTNamespace,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> TaskStatus:
+ """returns the status of a task"""
+ return await _rpc_client.get_task_status(
+ rabbitmq_rpc_client, lrt_namespace, task_id=task_id, task_context=task_context
+ )
+
+
+async def get_task_result(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ lrt_namespace: LRTNamespace,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> Any:
+ return await _rpc_client.get_task_result(
+ rabbitmq_rpc_client,
+ lrt_namespace,
+ task_context=task_context,
+ task_id=task_id,
+ )
+
+
+async def remove_task(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ lrt_namespace: LRTNamespace,
+ task_context: TaskContext,
+ task_id: TaskId,
+) -> None:
+ """cancels and removes a task
+
+ When `wait_for_removal` is True, `cancellationt_timeout` is set to _RPC_TIMEOUT_SHORT_REQUESTS
+ """
+ await _rpc_client.remove_task(
+ rabbitmq_rpc_client,
+ lrt_namespace,
+ task_id=task_id,
+ task_context=task_context,
+ )
diff --git a/packages/service-library/src/servicelib/long_running_tasks/manager.py b/packages/service-library/src/servicelib/long_running_tasks/manager.py
new file mode 100644
index 000000000000..6e6ebe6a820f
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/manager.py
@@ -0,0 +1,87 @@
+import datetime
+from abc import ABC, abstractmethod
+
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+
+from ..rabbitmq._client_rpc import RabbitMQRPCClient
+from ._rabbit_namespace import get_rabbit_namespace
+from ._rpc_server import router
+from .models import LRTNamespace, TaskContext
+from .task import TasksManager
+
+
+class LongRunningManager(ABC):
+ """
+ Provides a commond inteface for aiohttp and fastapi services
+ """
+
+ def __init__(
+ self,
+ stale_task_check_interval: datetime.timedelta,
+ stale_task_detect_timeout: datetime.timedelta,
+ redis_settings: RedisSettings,
+ rabbit_settings: RabbitSettings,
+ lrt_namespace: LRTNamespace,
+ ):
+ self._tasks_manager = TasksManager(
+ stale_task_check_interval=stale_task_check_interval,
+ stale_task_detect_timeout=stale_task_detect_timeout,
+ redis_settings=redis_settings,
+ lrt_namespace=lrt_namespace,
+ )
+ self._lrt_namespace = lrt_namespace
+ self.rabbit_settings = rabbit_settings
+ self._rpc_server: RabbitMQRPCClient | None = None
+ self._rpc_client: RabbitMQRPCClient | None = None
+
+ @property
+ def tasks_manager(self) -> TasksManager:
+ return self._tasks_manager
+
+ @property
+ def rpc_server(self) -> RabbitMQRPCClient:
+ assert self._rpc_server is not None # nosec
+ return self._rpc_server
+
+ @property
+ def rpc_client(self) -> RabbitMQRPCClient:
+ assert self._rpc_client is not None # nosec
+ return self._rpc_client
+
+ @property
+ def lrt_namespace(self) -> LRTNamespace:
+ return self._lrt_namespace
+
+ async def setup(self) -> None:
+ await self._tasks_manager.setup()
+ self._rpc_server = await RabbitMQRPCClient.create(
+ client_name=f"lrt-server-{self.lrt_namespace}",
+ settings=self.rabbit_settings,
+ )
+ self._rpc_client = await RabbitMQRPCClient.create(
+ client_name=f"lrt-client-{self.lrt_namespace}",
+ settings=self.rabbit_settings,
+ )
+
+ await self.rpc_server.register_router(
+ router,
+ get_rabbit_namespace(self.lrt_namespace),
+ self,
+ )
+
+ async def teardown(self) -> None:
+ await self._tasks_manager.teardown()
+
+ if self._rpc_server is not None:
+ await self._rpc_server.close()
+ self._rpc_server = None
+
+ if self._rpc_client is not None:
+ await self._rpc_client.close()
+ self._rpc_client = None
+
+ @staticmethod
+ @abstractmethod
+ def get_task_context(request) -> TaskContext:
+ """return the task context based on the current request"""
diff --git a/packages/service-library/src/servicelib/long_running_tasks/models.py b/packages/service-library/src/servicelib/long_running_tasks/models.py
new file mode 100644
index 000000000000..193c5eadbde3
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/models.py
@@ -0,0 +1,137 @@
+# mypy: disable-error-code=truthy-function
+from collections.abc import Awaitable, Callable, Coroutine
+from dataclasses import dataclass
+from datetime import UTC, datetime
+from typing import Annotated, Any, TypeAlias
+
+from common_library.basic_types import DEFAULT_FACTORY
+from models_library.api_schemas_long_running_tasks.base import (
+ ProgressMessage,
+ ProgressPercent,
+ TaskId,
+ TaskProgress,
+)
+from models_library.api_schemas_long_running_tasks.tasks import (
+ TaskBase,
+ TaskGet,
+ TaskResult,
+ TaskStatus,
+)
+from pydantic import BaseModel, ConfigDict, Field, PositiveFloat, model_validator
+
+TaskType: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
+
+ProgressCallback: TypeAlias = Callable[
+ [ProgressMessage, ProgressPercent | None, TaskId], Awaitable[None]
+]
+
+RequestBody: TypeAlias = Any
+TaskContext: TypeAlias = dict[str, Any]
+
+LRTNamespace: TypeAlias = str
+
+RegisteredTaskName: TypeAlias = str
+
+
+class ResultField(BaseModel):
+ str_result: str | None = None
+ str_error: str | None = None
+
+ @model_validator(mode="after")
+ def validate_mutually_exclusive(self) -> "ResultField":
+ if self.str_result is not None and self.str_error is not None:
+ msg = "Cannot set both 'result' and 'error' - they are mutually exclusive"
+ raise ValueError(msg)
+ return self
+
+
+class TaskData(BaseModel):
+ registered_task_name: RegisteredTaskName
+ task_id: str
+ task_progress: TaskProgress
+ # NOTE: this context lifetime is with the tracked task (similar to aiohttp storage concept)
+ task_context: TaskContext
+ fire_and_forget: Annotated[
+ bool,
+ Field(
+ description="if True then the task will not be auto-cancelled if no one enquires of its status"
+ ),
+ ]
+
+ started: Annotated[datetime, Field(default_factory=lambda: datetime.now(UTC))] = (
+ DEFAULT_FACTORY
+ )
+ last_status_check: Annotated[
+ datetime | None,
+ Field(
+ description=(
+ "used to detect when if the task is not actively "
+ "polled by the client who created it"
+ )
+ ),
+ ] = None
+
+ is_done: Annotated[
+ bool,
+ Field(description="True when the task finished running with or without errors"),
+ ] = False
+ result_field: Annotated[
+ ResultField | None, Field(description="the result of the task")
+ ] = None
+ marked_for_removal: Annotated[
+ bool,
+ Field(description=("if True, indicates the task is marked for removal")),
+ ] = False
+
+ model_config = ConfigDict(
+ arbitrary_types_allowed=True,
+ json_schema_extra={
+ "examples": [
+ {
+ "registered_task_name": "a-task-name",
+ "task_id": "1a119618-7186-4bc1-b8de-7e3ff314cb7e",
+ "task_name": "running-task",
+ "task_status": "running",
+ "task_progress": {
+ "task_id": "1a119618-7186-4bc1-b8de-7e3ff314cb7e"
+ },
+ "task_context": {"key": "value"},
+ "fire_and_forget": False,
+ }
+ ]
+ },
+ )
+
+
+class ClientConfiguration(BaseModel):
+ router_prefix: str
+ default_timeout: PositiveFloat
+
+
+@dataclass(frozen=True)
+class LRTask:
+ progress: TaskProgress
+ _result: Coroutine[Any, Any, Any] | None = None
+
+ def done(self) -> bool:
+ return self._result is not None
+
+ async def result(self) -> Any:
+ if not self._result:
+ msg = "No result ready!"
+ raise ValueError(msg)
+ return await self._result
+
+
+__all__: tuple[str, ...] = (
+ "ProgressMessage",
+ "ProgressPercent",
+ "TaskBase",
+ "TaskGet",
+ "TaskId",
+ "TaskProgress",
+ "TaskResult",
+ "TaskStatus",
+)
+
+# nopycln: file
diff --git a/packages/service-library/src/servicelib/long_running_tasks/task.py b/packages/service-library/src/servicelib/long_running_tasks/task.py
new file mode 100644
index 000000000000..adb76dc0699e
--- /dev/null
+++ b/packages/service-library/src/servicelib/long_running_tasks/task.py
@@ -0,0 +1,628 @@
+import asyncio
+import datetime
+import functools
+import inspect
+import logging
+import urllib.parse
+from contextlib import suppress
+from typing import Any, ClassVar, Final, Protocol, TypeAlias
+from uuid import uuid4
+
+from common_library.async_tools import cancel_wait_task
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
+from models_library.api_schemas_long_running_tasks.base import TaskProgress
+from pydantic import NonNegativeFloat, PositiveFloat
+from settings_library.redis import RedisDatabase, RedisSettings
+from tenacity import (
+ AsyncRetrying,
+ retry_unless_exception_type,
+ stop_after_delay,
+ wait_exponential,
+)
+
+from ..background_task import create_periodic_task
+from ..logging_utils import log_catch, log_context
+from ..redis import RedisClientSDK, exclusive
+from ..utils import limited_gather
+from ._redis_store import RedisStore
+from ._serialization import dumps
+from .errors import (
+ TaskAlreadyRunningError,
+ TaskCancelledError,
+ TaskNotCompletedError,
+ TaskNotFoundError,
+ TaskNotRegisteredError,
+ TaskRaisedUnserializableError,
+)
+from .models import (
+ LRTNamespace,
+ RegisteredTaskName,
+ ResultField,
+ TaskBase,
+ TaskContext,
+ TaskData,
+ TaskId,
+ TaskStatus,
+)
+
+_logger = logging.getLogger(__name__)
+
+
+_CANCEL_TASKS_CHECK_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5)
+_STATUS_UPDATE_CHECK_INTERNAL: Final[datetime.timedelta] = datetime.timedelta(seconds=1)
+_MAX_EXCLUSIVE_TASK_CANCEL_TIMEOUT: Final[NonNegativeFloat] = 5
+_TASK_REMOVAL_MAX_WAIT: Final[NonNegativeFloat] = 60
+_PARALLEL_TASKS_CANCELLATION: Final[int] = 5
+
+AllowedErrrors: TypeAlias = tuple[type[BaseException], ...]
+
+
+class TaskProtocol(Protocol):
+ async def __call__(
+ self, progress: TaskProgress, *args: Any, **kwargs: Any
+ ) -> Any: ...
+
+ @property
+ def __name__(self) -> str: ...
+
+
+class TaskRegistry:
+ _REGISTERED_TASKS: ClassVar[
+ dict[RegisteredTaskName, tuple[AllowedErrrors, TaskProtocol]]
+ ] = {}
+
+ @classmethod
+ def register(
+ cls,
+ task: TaskProtocol,
+ allowed_errors: AllowedErrrors = (),
+ **partial_kwargs,
+ ) -> None:
+ partial_task = functools.partial(task, **partial_kwargs)
+ # allows to call the partial via it's original name
+ partial_task.__name__ = task.__name__ # type: ignore[attr-defined]
+ cls._REGISTERED_TASKS[task.__name__] = [allowed_errors, partial_task] # type: ignore[assignment]
+
+ @classmethod
+ def get_registered_tasks(
+ cls,
+ ) -> dict[RegisteredTaskName, tuple[AllowedErrrors, TaskProtocol]]:
+ return cls._REGISTERED_TASKS
+
+ @classmethod
+ def get_task(cls, task_name: RegisteredTaskName) -> TaskProtocol:
+ return cls._REGISTERED_TASKS[task_name][1]
+
+ @classmethod
+ def get_allowed_errors(cls, task_name: RegisteredTaskName) -> AllowedErrrors:
+ return cls._REGISTERED_TASKS[task_name][0]
+
+ @classmethod
+ def unregister(cls, task: TaskProtocol) -> None:
+ if task.__name__ in cls._REGISTERED_TASKS:
+ del cls._REGISTERED_TASKS[task.__name__]
+
+
+async def _get_tasks_to_remove(
+ tracked_tasks: RedisStore,
+ stale_task_detect_timeout_s: PositiveFloat,
+) -> list[tuple[TaskId, TaskContext]]:
+ utc_now = datetime.datetime.now(tz=datetime.UTC)
+
+ tasks_to_remove: list[tuple[TaskId, TaskContext]] = []
+
+ for tracked_task in await tracked_tasks.list_tasks_data():
+ if tracked_task.fire_and_forget:
+ continue
+
+ if tracked_task.last_status_check is None:
+ # the task just added or never received a poll request
+ elapsed_from_start = (utc_now - tracked_task.started).total_seconds()
+ if elapsed_from_start > stale_task_detect_timeout_s:
+ tasks_to_remove.append(
+ (tracked_task.task_id, tracked_task.task_context)
+ )
+ else:
+ # the task status was already queried by the client
+ elapsed_from_last_poll = (
+ utc_now - tracked_task.last_status_check
+ ).total_seconds()
+ if elapsed_from_last_poll > stale_task_detect_timeout_s:
+ tasks_to_remove.append(
+ (tracked_task.task_id, tracked_task.task_context)
+ )
+ return tasks_to_remove
+
+
+class TasksManager: # pylint:disable=too-many-instance-attributes
+ """
+ Monitors execution and results retrieval of a collection of asyncio.Tasks
+ """
+
+ def __init__(
+ self,
+ redis_settings: RedisSettings,
+ stale_task_check_interval: datetime.timedelta,
+ stale_task_detect_timeout: datetime.timedelta,
+ lrt_namespace: LRTNamespace,
+ ):
+ # Task groups: Every taskname maps to multiple asyncio.Task within TrackedTask model
+ self._tasks_data = RedisStore(redis_settings, lrt_namespace)
+ self._created_tasks: dict[TaskId, asyncio.Task] = {}
+
+ self.stale_task_check_interval = stale_task_check_interval
+ self.stale_task_detect_timeout_s: PositiveFloat = (
+ stale_task_detect_timeout.total_seconds()
+ )
+ self.lrt_namespace = lrt_namespace
+ self.redis_settings = redis_settings
+
+ self.locks_redis_client_sdk: RedisClientSDK | None = None
+
+ # stale_tasks_monitor
+ self._task_stale_tasks_monitor: asyncio.Task | None = None
+ self._started_event_task_stale_tasks_monitor = asyncio.Event()
+
+ # cancelled_tasks_removal
+ self._task_cancelled_tasks_removal: asyncio.Task | None = None
+ self._started_event_task_cancelled_tasks_removal = asyncio.Event()
+
+ # tasks_monitor
+ self._task_tasks_monitor: asyncio.Task | None = None
+ self._started_event_task_tasks_monitor = asyncio.Event()
+
+ async def setup(self) -> None:
+ await self._tasks_data.setup()
+
+ self.locks_redis_client_sdk = RedisClientSDK(
+ self.redis_settings.build_redis_dsn(RedisDatabase.LOCKS),
+ client_name=f"{__name__}_{self.lrt_namespace}_lock",
+ )
+ await self.locks_redis_client_sdk.setup()
+
+ # stale_tasks_monitor
+ self._task_stale_tasks_monitor = create_periodic_task(
+ task=exclusive(
+ self.locks_redis_client_sdk,
+ lock_key=f"{__name__}_{self.lrt_namespace}_stale_tasks_monitor",
+ )(self._stale_tasks_monitor),
+ interval=self.stale_task_check_interval,
+ task_name=f"{__name__}.{self._stale_tasks_monitor.__name__}",
+ )
+ await self._started_event_task_stale_tasks_monitor.wait()
+
+ # cancelled_tasks_removal
+ self._task_cancelled_tasks_removal = create_periodic_task(
+ task=self._cancelled_tasks_removal,
+ interval=_CANCEL_TASKS_CHECK_INTERVAL,
+ task_name=f"{__name__}.{self._cancelled_tasks_removal.__name__}",
+ )
+ await self._started_event_task_cancelled_tasks_removal.wait()
+
+ # tasks_monitor
+ self._task_tasks_monitor = create_periodic_task(
+ task=self._tasks_monitor,
+ interval=_STATUS_UPDATE_CHECK_INTERNAL,
+ task_name=f"{__name__}.{self._tasks_monitor.__name__}",
+ )
+ await self._started_event_task_tasks_monitor.wait()
+
+ async def teardown(self) -> None:
+ # stop cancelled_tasks_removal
+ if self._task_cancelled_tasks_removal:
+ await cancel_wait_task(self._task_cancelled_tasks_removal)
+
+ # stopping only tasks that are handled by this manager
+ # otherwise it will cancel long running tasks that were running in diffierent processes
+ async def _remove_local_task(task_data: TaskData) -> None:
+ with log_catch(_logger, reraise=False):
+ await self.remove_task(
+ task_data.task_id,
+ task_data.task_context,
+ wait_for_removal=False,
+ )
+ await self._attempt_to_remove_local_task(task_data.task_id)
+
+ await limited_gather(
+ *[
+ _remove_local_task(tracked_task)
+ for task_id in self._created_tasks
+ if (tracked_task := await self._tasks_data.get_task_data(task_id))
+ is not None
+ ],
+ log=_logger,
+ limit=_PARALLEL_TASKS_CANCELLATION,
+ )
+
+ # stop stale_tasks_monitor
+ if self._task_stale_tasks_monitor:
+ await cancel_wait_task(
+ self._task_stale_tasks_monitor,
+ max_delay=_MAX_EXCLUSIVE_TASK_CANCEL_TIMEOUT,
+ )
+
+ # stop tasks_monitor
+ if self._task_tasks_monitor:
+ await cancel_wait_task(self._task_tasks_monitor)
+
+ if self.locks_redis_client_sdk is not None:
+ await self.locks_redis_client_sdk.shutdown()
+
+ await self._tasks_data.shutdown()
+
+ async def _stale_tasks_monitor(self) -> None:
+ """
+ A task is considered stale, if the task status is not queried
+ in the last `stale_task_detect_timeout_s` and it is not a fire and forget type of task.
+
+ This helps detect clients who:
+ - started tasks and did not remove them
+ - crashed without removing the task
+ - did not fetch the result
+ """
+ # NOTE:
+ # When a task has finished with a result or error and its
+ # status is being polled it would appear that there is
+ # an issue with the client.
+ # Since we own the client, we assume (for now) this
+ # will not be the case.
+
+ self._started_event_task_stale_tasks_monitor.set()
+
+ tasks_to_remove = await _get_tasks_to_remove(
+ self._tasks_data, self.stale_task_detect_timeout_s
+ )
+
+ # finally remove tasks and warn
+ for task_id, task_context in tasks_to_remove:
+ # NOTE: task can be in the following cases:
+ # - still ongoing
+ # - finished with a result
+ # - finished with errors
+ # we just print the status from where one can infer the above
+ with suppress(TaskNotFoundError):
+ task_status = await self.get_task_status(
+ task_id, with_task_context=task_context, exclude_to_remove=False
+ )
+ with log_context(
+ _logger,
+ logging.WARNING,
+ f"Removing stale task '{task_id}' with status '{task_status.model_dump_json()}'",
+ ):
+ await self.remove_task(
+ task_id, with_task_context=task_context, wait_for_removal=True
+ )
+
+ async def _cancelled_tasks_removal(self) -> None:
+ """
+ Periodically checks which tasks are marked for removal and attempts to remove the
+ task if it's handled by this process.
+ """
+ self._started_event_task_cancelled_tasks_removal.set()
+
+ tasks_data = await self._tasks_data.list_tasks_data()
+ await limited_gather(
+ *(
+ self._attempt_to_remove_local_task(x.task_id)
+ for x in tasks_data
+ if x.marked_for_removal is True
+ ),
+ limit=_PARALLEL_TASKS_CANCELLATION,
+ )
+
+ async def _tasks_monitor(self) -> None: # noqa: C901
+ """
+ A task which monitors locally running tasks and updates their status
+ in the Redis store when they are done.
+ """
+ self._started_event_task_tasks_monitor.set()
+ task_id: TaskId
+ for task_id in set(self._created_tasks.keys()):
+ if task := self._created_tasks.get(task_id, None):
+ is_done = task.done()
+ if not is_done:
+ # task is still running, do not update
+ continue
+
+ # write to redis only when done
+ task_data = await self._tasks_data.get_task_data(task_id)
+ if task_data is None or task_data.is_done:
+ # already done and updatet data in redis
+ continue
+
+ result_field: ResultField | None = None
+ # get task result
+ try:
+ result_field = ResultField(str_result=dumps(task.result()))
+ except asyncio.InvalidStateError:
+ # task was not completed try again next time and see if it is done
+ continue
+ except asyncio.CancelledError:
+ result_field = ResultField(
+ str_error=dumps(TaskCancelledError(task_id=task_id))
+ )
+ # NOTE: if the task is itself cancelled it shall re-raise: see https://superfastpython.com/asyncio-cancellederror-consumed/
+ current_task = asyncio.current_task()
+ assert current_task is not None # nosec
+ if current_task.cancelling() > 0:
+ # owner function is being cancelled -> propagate cancellation
+ raise
+ except Exception as e: # pylint:disable=broad-except
+ allowed_errors = TaskRegistry.get_allowed_errors(
+ task_data.registered_task_name
+ )
+ if type(e) not in allowed_errors:
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ (
+ f"Execution of {task_id=} finished with unexpected error, "
+ f"only the following {allowed_errors=} are permitted"
+ ),
+ error=e,
+ error_context={
+ "task_id": task_id,
+ "task_data": task_data,
+ "namespace": self.lrt_namespace,
+ },
+ ),
+ )
+ try:
+ result_field = ResultField(str_error=dumps(e))
+ except (
+ Exception # pylint:disable=broad-except
+ ) as serialization_error:
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ (
+ f"Execution of {task_id=} finished with an error "
+ f"which could not be serialized"
+ ),
+ error=serialization_error,
+ tip="Check the error above for more details",
+ ),
+ )
+ result_field = ResultField(
+ str_error=dumps(
+ TaskRaisedUnserializableError(
+ task_id=task_id,
+ exception=serialization_error,
+ original_exception_str=f"{e}",
+ )
+ )
+ )
+
+ # update and store in Redis
+ updates = {"is_done": is_done, "result_field": task_data.result_field}
+ if result_field is not None:
+ updates["result_field"] = result_field
+ await self._tasks_data.update_task_data(task_id, updates=updates)
+
+ async def list_tasks(self, with_task_context: TaskContext | None) -> list[TaskBase]:
+ if not with_task_context:
+ return [
+ TaskBase(task_id=task.task_id)
+ for task in (await self._tasks_data.list_tasks_data())
+ if task.marked_for_removal is False
+ ]
+
+ return [
+ TaskBase(task_id=task.task_id)
+ for task in (await self._tasks_data.list_tasks_data())
+ if task.task_context == with_task_context
+ and task.marked_for_removal is False
+ ]
+
+ async def _get_tracked_task(
+ self, task_id: TaskId, with_task_context: TaskContext
+ ) -> TaskData:
+ task_data = await self._tasks_data.get_task_data(task_id)
+
+ if task_data is None:
+ raise TaskNotFoundError(task_id=task_id)
+
+ if with_task_context and task_data.task_context != with_task_context:
+ raise TaskNotFoundError(task_id=task_id)
+
+ return task_data
+
+ async def get_task_status(
+ self,
+ task_id: TaskId,
+ with_task_context: TaskContext,
+ *,
+ exclude_to_remove: bool = True,
+ ) -> TaskStatus:
+ """
+ returns: the status of the task, along with updates
+ form the progress
+
+ raises TaskNotFoundError if the task cannot be found
+ """
+ if exclude_to_remove and await self._tasks_data.is_marked_for_removal(task_id):
+ raise TaskNotFoundError(task_id=task_id)
+
+ task_data = await self._get_tracked_task(task_id, with_task_context)
+
+ await self._tasks_data.update_task_data(
+ task_id,
+ updates={"last_status_check": datetime.datetime.now(tz=datetime.UTC)},
+ )
+ return TaskStatus.model_validate(
+ {
+ "task_progress": task_data.task_progress,
+ "done": task_data.is_done,
+ "started": task_data.started,
+ }
+ )
+
+ async def get_allowed_errors(
+ self, task_id: TaskId, with_task_context: TaskContext
+ ) -> AllowedErrrors:
+ """
+ returns: the allowed errors for the task
+
+ raises TaskNotFoundError if the task cannot be found
+ """
+ task_data = await self._get_tracked_task(task_id, with_task_context)
+ return TaskRegistry.get_allowed_errors(task_data.registered_task_name)
+
+ async def get_task_result(
+ self, task_id: TaskId, with_task_context: TaskContext
+ ) -> ResultField:
+ """
+ returns: the result of the task wrapped in ResultField
+
+ raises TaskNotFoundError if the task cannot be found
+ raises TaskNotCompletedError if the task is not completed
+ """
+ if await self._tasks_data.is_marked_for_removal(task_id):
+ raise TaskNotFoundError(task_id=task_id)
+
+ tracked_task = await self._get_tracked_task(task_id, with_task_context)
+
+ if not tracked_task.is_done or tracked_task.result_field is None:
+ raise TaskNotCompletedError(task_id=task_id)
+
+ return tracked_task.result_field
+
+ async def _attempt_to_remove_local_task(self, task_id: TaskId) -> None:
+ """if task is running in the local process, try to remove it"""
+
+ task_to_cancel = self._created_tasks.pop(task_id, None)
+ if task_to_cancel is not None:
+ await cancel_wait_task(task_to_cancel)
+ await self._tasks_data.delete_task_data(task_id)
+
+ async def remove_task(
+ self,
+ task_id: TaskId,
+ with_task_context: TaskContext,
+ *,
+ wait_for_removal: bool,
+ ) -> None:
+ """
+ cancels and removes task
+ raises TaskNotFoundError if the task cannot be found
+ """
+ if await self._tasks_data.is_marked_for_removal(task_id):
+ raise TaskNotFoundError(task_id=task_id)
+
+ tracked_task = await self._get_tracked_task(task_id, with_task_context)
+
+ await self._tasks_data.mark_for_removal(tracked_task.task_id)
+
+ if not wait_for_removal:
+ return
+
+ # wait for task to be removed since it might not have been running
+ # in this process
+ with suppress(TaskNotFoundError):
+ async for attempt in AsyncRetrying(
+ wait=wait_exponential(max=1),
+ stop=stop_after_delay(_TASK_REMOVAL_MAX_WAIT),
+ retry=retry_unless_exception_type(TaskNotFoundError),
+ ):
+ with attempt:
+ await self._get_tracked_task(
+ tracked_task.task_id, tracked_task.task_context
+ )
+
+ def _get_task_id(self, task_name: str, *, is_unique: bool) -> TaskId:
+ suffix = "unique" if is_unique else f"{uuid4()}"
+ return f"{self.lrt_namespace}.{task_name}.{suffix}"
+
+ async def _update_progress(
+ self,
+ task_id: TaskId,
+ task_context: TaskContext,
+ task_progress: TaskProgress,
+ ) -> None:
+ # NOTE: avoids errors while updating progress, since the task could have been
+ # cancelled and it's data removed
+ try:
+ tracked_data = await self._get_tracked_task(task_id, task_context)
+ tracked_data.task_progress = task_progress
+ await self._tasks_data.update_task_data(
+ task_id, updates={"task_progress": task_progress.model_dump()}
+ )
+ except TaskNotFoundError:
+ _logger.debug(
+ "Task '%s' not found while updating progress %s",
+ task_id,
+ task_progress,
+ )
+
+ async def start_task(
+ self,
+ registered_task_name: RegisteredTaskName,
+ *,
+ unique: bool,
+ task_context: TaskContext | None,
+ task_name: str | None,
+ fire_and_forget: bool,
+ **task_kwargs: Any,
+ ) -> TaskId:
+ registered_tasks = TaskRegistry.get_registered_tasks()
+ if registered_task_name not in registered_tasks:
+ raise TaskNotRegisteredError(
+ task_name=registered_task_name, tasks=registered_tasks
+ )
+
+ task = TaskRegistry.get_task(registered_task_name)
+
+ # NOTE: If not task name is given, it will be composed of the handler's module and it's name
+ # to keep the urls shorter and more meaningful.
+ handler_module = inspect.getmodule(task)
+ handler_module_name = handler_module.__name__ if handler_module else ""
+ task_name = task_name or f"{handler_module_name}.{task.__name__}"
+ task_name = urllib.parse.quote(task_name, safe="")
+
+ task_id = self._get_task_id(task_name, is_unique=unique)
+
+ # only one unique task can be running
+ queried_task = await self._tasks_data.get_task_data(task_id)
+ if unique and queried_task is not None:
+ raise TaskAlreadyRunningError(
+ task_name=task_name, managed_task=queried_task
+ )
+
+ context_to_use = task_context or {}
+ task_progress = TaskProgress.create(task_id=task_id)
+ # set update callback
+ task_progress.set_update_callback(
+ functools.partial(self._update_progress, task_id, context_to_use)
+ )
+
+ async def _task_with_progress(progress: TaskProgress, handler: TaskProtocol):
+ # bind the task with progress 0 and 1
+ await progress.update(message="starting", percent=0)
+ try:
+ return await handler(progress, **task_kwargs)
+ finally:
+ await progress.update(message="finished", percent=1)
+
+ self._created_tasks[task_id] = asyncio.create_task(
+ _task_with_progress(task_progress, task), name=task_name
+ )
+
+ tracked_task = TaskData(
+ registered_task_name=registered_task_name,
+ task_id=task_id,
+ task_progress=task_progress,
+ task_context=context_to_use,
+ fire_and_forget=fire_and_forget,
+ )
+ await self._tasks_data.add_task_data(task_id, tracked_task)
+ return tracked_task.task_id
+
+
+__all__: tuple[str, ...] = (
+ "TaskAlreadyRunningError",
+ "TaskCancelledError",
+ "TaskData",
+ "TaskId",
+ "TaskProgress",
+ "TaskProtocol",
+ "TaskStatus",
+ "TasksManager",
+)
diff --git a/packages/service-library/src/servicelib/rabbitmq/_client.py b/packages/service-library/src/servicelib/rabbitmq/_client.py
index ccf1445c231d..097da98b8063 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_client.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_client.py
@@ -6,6 +6,8 @@
from uuid import uuid4
import aio_pika
+from aiormq import ChannelInvalidStateError
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from pydantic import NonNegativeInt
from ..logging_utils import log_catch, log_context
@@ -46,14 +48,13 @@ def _get_x_death_count(message: aio_pika.abc.AbstractIncomingMessage) -> int:
and isinstance(x_death[0], dict)
and "count" in x_death[0]
):
-
assert isinstance(x_death[0]["count"], int) # nosec
count = x_death[0]["count"]
return count
-async def _safe_nack(
+async def _nack_message(
message_handler: MessageHandler,
max_retries_upon_error: int,
message: aio_pika.abc.AbstractIncomingMessage,
@@ -73,7 +74,7 @@ async def _safe_nack(
# NOTE: puts message to the Dead Letter Exchange
await message.nack(requeue=False)
else:
- _logger.exception(
+ _logger.error(
"Handler '%s' is giving up on message '%s' with body '%s'",
message_handler,
message,
@@ -86,19 +87,49 @@ async def _on_message(
max_retries_upon_error: int,
message: aio_pika.abc.AbstractIncomingMessage,
) -> None:
- async with message.process(requeue=True, ignore_processed=True):
- try:
- with log_context(
- _logger,
- logging.DEBUG,
- msg=f"Received message from {message.exchange=}, {message.routing_key=}",
- ):
- if not await message_handler(message.body):
- await _safe_nack(message_handler, max_retries_upon_error, message)
- except Exception: # pylint: disable=broad-exception-caught
- _logger.exception("Exception raised when handling message")
- with log_catch(_logger, reraise=False):
- await _safe_nack(message_handler, max_retries_upon_error, message)
+ log_error_context = {
+ "message_id": message.message_id,
+ "message_body": message.body,
+ "message_handler": f"{message_handler}",
+ }
+ try:
+ async with message.process(requeue=True, ignore_processed=True):
+ try:
+ with log_context(
+ _logger,
+ logging.DEBUG,
+ msg=f"Received message from {message.exchange=}, {message.routing_key=}",
+ ):
+ if not await message_handler(message.body):
+ await _nack_message(
+ message_handler, max_retries_upon_error, message
+ )
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ "Unhandled exception raised in message handler or when nacking message",
+ error=exc,
+ error_context=log_error_context,
+ tip="This could indicate an error in the message handler, please check the message handler code",
+ )
+ )
+ with log_catch(_logger, reraise=False):
+ await _nack_message(
+ message_handler, max_retries_upon_error, message
+ )
+ except ChannelInvalidStateError as exc:
+ # NOTE: this error can happen as can be seen in aio-pika code
+ # see https://github.com/mosquito/aio-pika/blob/master/aio_pika/robust_queue.py
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ "Cannot process message because channel is closed. Message will be requeued by RabbitMQ",
+ error=exc,
+ error_context=log_error_context,
+ tip="This could indicate the message handler takes > 30 minutes to complete "
+ "(default time the RabbitMQ broker waits to close a channel when a "
+ "message is not acknowledged) or an issue in RabbitMQ broker itself.",
+ )
+ )
@dataclass
@@ -143,6 +174,7 @@ async def close(self) -> None:
async def _get_channel(self) -> aio_pika.abc.AbstractChannel:
assert self._connection_pool # nosec
async with self._connection_pool.acquire() as connection:
+ assert isinstance(connection, aio_pika.RobustConnection) # nosec
channel: aio_pika.abc.AbstractChannel = await connection.channel()
channel.close_callbacks.add(self._channel_close_callback)
return channel
diff --git a/packages/service-library/src/servicelib/rabbitmq/_client_base.py b/packages/service-library/src/servicelib/rabbitmq/_client_base.py
index 69720659e503..ecc483f784b5 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_client_base.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_client_base.py
@@ -6,9 +6,11 @@
import aio_pika
import aiormq
-from servicelib.logging_utils import log_catch
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from settings_library.rabbit import RabbitSettings
+from ..logging_utils import log_catch
+
_DEFAULT_RABBITMQ_SERVER_HEARTBEAT_S: Final[int] = 60
_logger = logging.getLogger(__name__)
@@ -28,33 +30,49 @@ def _connection_close_callback(
exc: BaseException | None,
) -> None:
if exc:
- if isinstance(exc, asyncio.CancelledError):
- _logger.info("Rabbit connection cancelled")
- elif isinstance(exc, aiormq.exceptions.ConnectionClosed):
- _logger.info("Rabbit connection closed: %s", exc)
+ if isinstance(
+ exc, asyncio.CancelledError | aiormq.exceptions.ConnectionClosed
+ ):
+ _logger.info(
+ **create_troubleshooting_log_kwargs(
+ "RabbitMQ connection closed",
+ error=exc,
+ error_context={"sender": sender},
+ )
+ )
else:
_logger.error(
- "Rabbit connection closed with exception from %s:%s",
- type(exc),
- exc,
+ **create_troubleshooting_log_kwargs(
+ "RabbitMQ connection closed with unexpected error",
+ error=exc,
+ error_context={"sender": sender},
+ )
)
self._healthy_state = False
def _channel_close_callback(
self,
- sender: Any, # pylint: disable=unused-argument # noqa: ARG002
+ sender: Any,
exc: BaseException | None,
) -> None:
if exc:
- if isinstance(exc, asyncio.CancelledError):
- _logger.info("Rabbit channel cancelled")
- elif isinstance(exc, aiormq.exceptions.ChannelClosed):
- _logger.info("Rabbit channel closed")
+ if isinstance(
+ exc, asyncio.CancelledError | aiormq.exceptions.ChannelClosed
+ ):
+ _logger.info(
+ **create_troubleshooting_log_kwargs(
+ "RabbitMQ channel closed",
+ error=exc,
+ error_context={"sender": sender},
+ )
+ )
else:
_logger.error(
- "Rabbit channel closed with exception from %s:%s",
- type(exc),
- exc,
+ **create_troubleshooting_log_kwargs(
+ "RabbitMQ channel closed with unexpected error",
+ error=exc,
+ error_context={"sender": sender},
+ )
)
self._healthy_state = False
diff --git a/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py b/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py
index 53d9f1326585..6064be929230 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py
@@ -49,7 +49,7 @@ async def _rpc_initialize(self) -> None:
self._channel = await self._connection.channel()
self._rpc = aio_pika.patterns.RPC(self._channel)
- await self._rpc.initialize()
+ await self._rpc.initialize(durable=True, auto_delete=True)
async def close(self) -> None:
with log_context(
@@ -134,6 +134,7 @@ async def register_handler(
RPCNamespacedMethodName.from_namespace_and_method(namespace, method_name),
handler,
auto_delete=True,
+ durable=True,
)
async def register_router(
diff --git a/packages/service-library/src/servicelib/rabbitmq/_errors.py b/packages/service-library/src/servicelib/rabbitmq/_errors.py
index ce58b62fd5cb..9705173203d5 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_errors.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_errors.py
@@ -24,7 +24,7 @@ class RemoteMethodNotRegisteredError(BaseRPCError):
class RPCServerError(BaseRPCError):
msg_template = (
"While running method '{method_name}' raised "
- "'{exc_type}': '{exc_message}'\n{traceback}"
+ "'{exc_type}' [{error_code}]: '{exc_message}'\n{traceback}"
)
diff --git a/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py b/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py
index 49cab08f79b2..b7902bbd409e 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py
@@ -6,6 +6,8 @@
from dataclasses import dataclass, field
from typing import Any, TypeVar
+from common_library.error_codes import create_error_code
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from models_library.rabbitmq_basic_types import RPCMethodName
from ..logging_utils import log_context
@@ -13,8 +15,11 @@
DecoratedCallable = TypeVar("DecoratedCallable", bound=Callable[..., Any])
-# NOTE: this is equivalent to http access logs
-_logger = logging.getLogger("rpc.access")
+
+_logger = logging.getLogger(
+ # NOTE: this logger is equivalent to http access logs
+ "rpc.access"
+)
def _create_func_msg(func, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
@@ -64,9 +69,19 @@ async def _wrapper(*args, **kwargs):
):
raise
+ error_code = create_error_code(exc)
_logger.exception(
- "Unhandled exception on the rpc-server side."
- " Re-raising as RPCServerError."
+ # NOTE: equivalent to a 500 http status code error
+ **create_troubleshooting_log_kwargs(
+ f"Unhandled exception on the rpc-server side for '{func.__name__}'",
+ error=exc,
+ error_code=error_code,
+ error_context={
+ "rpc_method": func.__name__,
+ "args": args,
+ "kwargs": kwargs,
+ },
+ )
)
# NOTE: we do not return internal exceptions over RPC
formatted_traceback = "\n".join(
@@ -77,6 +92,7 @@ async def _wrapper(*args, **kwargs):
exc_type=f"{exc.__class__.__module__}.{exc.__class__.__name__}",
exc_message=f"{exc}",
traceback=f"{formatted_traceback}",
+ error_code=error_code,
) from None
self.routes[RPCMethodName(func.__name__)] = _wrapper
diff --git a/packages/service-library/src/servicelib/rabbitmq/_utils.py b/packages/service-library/src/servicelib/rabbitmq/_utils.py
index 404adb1b6525..2fae056f38c5 100644
--- a/packages/service-library/src/servicelib/rabbitmq/_utils.py
+++ b/packages/service-library/src/servicelib/rabbitmq/_utils.py
@@ -85,6 +85,19 @@ async def declare_queue(
# NOTE: setting a name will ensure multiple instance will take their data here
queue_parameters |= {"name": queue_name}
+ # avoids deprecated `transient_nonexcl_queues` warning in RabbitMQ
+ if (
+ queue_parameters.get("durable", False) is False
+ and queue_parameters.get("exclusive", False) is False
+ ):
+ msg = (
+ "Queue must be `durable` or `exclusive`, but not both. "
+ "This is to avoid the `transient_nonexcl_queues` warning. "
+ "NOTE: if both `durable` and `exclusive` are missing they are considered False. "
+ f"{queue_parameters=}"
+ )
+ raise ValueError(msg)
+
# NOTE: if below line raises something similar to ``ChannelPreconditionFailed: PRECONDITION_FAILED``
# most likely someone changed the signature of the queues (parameters etc...)
# Safest way to deal with it:
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py
index 2049f0a409f7..0e64ff625061 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py
@@ -6,8 +6,9 @@
from models_library.projects_nodes_io import NodeID
from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
from pydantic import NonNegativeInt, TypeAdapter
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from ....logging_utils import log_decorator
+from ....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py
index 41cf2ffd8b84..07f8f9617508 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py
@@ -6,8 +6,9 @@
from models_library.projects_nodes_io import NodeID
from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
from pydantic import NonNegativeInt, TypeAdapter
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from ....logging_utils import log_decorator
+from ....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py
index f6e1954c9368..1d2da04185c8 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py
@@ -8,7 +8,6 @@
from models_library.api_schemas_rpc_async_jobs.async_jobs import (
AsyncJobGet,
AsyncJobId,
- AsyncJobNameData,
AsyncJobResult,
AsyncJobStatus,
)
@@ -27,6 +26,7 @@
wait_random_exponential,
)
+from ....celery.models import OwnerMetadata
from ....rabbitmq import RemoteMethodNotRegisteredError
from ... import RabbitMQRPCClient
@@ -41,13 +41,13 @@ async def cancel(
*,
rpc_namespace: RPCNamespace,
job_id: AsyncJobId,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
) -> None:
await rabbitmq_rpc_client.request(
rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("cancel"),
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
timeout_s=_DEFAULT_TIMEOUT_S,
)
@@ -57,13 +57,13 @@ async def status(
*,
rpc_namespace: RPCNamespace,
job_id: AsyncJobId,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
) -> AsyncJobStatus:
_result = await rabbitmq_rpc_client.request(
rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("status"),
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
timeout_s=_DEFAULT_TIMEOUT_S,
)
assert isinstance(_result, AsyncJobStatus)
@@ -75,13 +75,13 @@ async def result(
*,
rpc_namespace: RPCNamespace,
job_id: AsyncJobId,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
) -> AsyncJobResult:
_result = await rabbitmq_rpc_client.request(
rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("result"),
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
timeout_s=_DEFAULT_TIMEOUT_S,
)
assert isinstance(_result, AsyncJobResult)
@@ -92,14 +92,12 @@ async def list_jobs(
rabbitmq_rpc_client: RabbitMQRPCClient,
*,
rpc_namespace: RPCNamespace,
- filter_: str,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
) -> list[AsyncJobGet]:
_result: list[AsyncJobGet] = await rabbitmq_rpc_client.request(
rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("list_jobs"),
- filter_=filter_,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
timeout_s=_DEFAULT_TIMEOUT_S,
)
return _result
@@ -110,13 +108,13 @@ async def submit(
*,
rpc_namespace: RPCNamespace,
method_name: str,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
**kwargs,
) -> AsyncJobGet:
_result = await rabbitmq_rpc_client.request(
rpc_namespace,
TypeAdapter(RPCMethodName).validate_python(method_name),
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
**kwargs,
timeout_s=_DEFAULT_TIMEOUT_S,
)
@@ -140,7 +138,7 @@ async def _wait_for_completion(
rpc_namespace: RPCNamespace,
method_name: RPCMethodName,
job_id: AsyncJobId,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
client_timeout: datetime.timedelta,
) -> AsyncGenerator[AsyncJobStatus, None]:
try:
@@ -156,7 +154,7 @@ async def _wait_for_completion(
rabbitmq_rpc_client,
rpc_namespace=rpc_namespace,
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
yield job_status
if not job_status.done:
@@ -191,7 +189,7 @@ async def wait_and_get_result(
rpc_namespace: RPCNamespace,
method_name: str,
job_id: AsyncJobId,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
client_timeout: datetime.timedelta,
) -> AsyncGenerator[AsyncJobComposedResult, None]:
"""when a job is already submitted this will wait for its completion
@@ -203,7 +201,7 @@ async def wait_and_get_result(
rpc_namespace=rpc_namespace,
method_name=method_name,
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
client_timeout=client_timeout,
):
assert job_status is not None # nosec
@@ -217,7 +215,7 @@ async def wait_and_get_result(
rabbitmq_rpc_client,
rpc_namespace=rpc_namespace,
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
),
)
except (TimeoutError, CancelledError) as error:
@@ -226,7 +224,7 @@ async def wait_and_get_result(
rabbitmq_rpc_client,
rpc_namespace=rpc_namespace,
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
except Exception as exc:
raise exc from error # NOSONAR
@@ -238,7 +236,7 @@ async def submit_and_wait(
*,
rpc_namespace: RPCNamespace,
method_name: str,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
client_timeout: datetime.timedelta,
**kwargs,
) -> AsyncGenerator[AsyncJobComposedResult, None]:
@@ -248,7 +246,7 @@ async def submit_and_wait(
rabbitmq_rpc_client,
rpc_namespace=rpc_namespace,
method_name=method_name,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
**kwargs,
)
except (TimeoutError, CancelledError) as error:
@@ -258,7 +256,7 @@ async def submit_and_wait(
rabbitmq_rpc_client,
rpc_namespace=rpc_namespace,
job_id=async_job_rpc_get.job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
except Exception as exc:
raise exc from error
@@ -269,7 +267,7 @@ async def submit_and_wait(
rpc_namespace=rpc_namespace,
method_name=method_name,
job_id=async_job_rpc_get.job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
client_timeout=client_timeout,
):
yield wait_and_
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py
index ada0c66d26d9..ca409aa7e651 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py
@@ -6,6 +6,7 @@
from models_library.rabbitmq_basic_types import RPCMethodName
from models_library.users import UserID
from models_library.wallets import WalletID
+from pydantic import TypeAdapter
from ....async_utils import run_sequentially_in_context
from ..._client_rpc import RabbitMQRPCClient
@@ -13,6 +14,10 @@
_TTL_CACHE_ON_CLUSTERS_S: Final[int] = 5
+_GET_OR_CREATE_CLUSTER_METHOD_NAME: Final[RPCMethodName] = TypeAdapter(
+ RPCMethodName
+).validate_python("get_or_create_cluster")
+
@run_sequentially_in_context(target_args=["user_id", "wallet_id"])
@cached(
@@ -32,7 +37,7 @@ async def get_or_create_cluster(
# the 2nd decorator ensure that many calls in a short time will return quickly the same value
on_demand_cluster: OnDemandCluster = await client.request(
CLUSTERS_KEEPER_RPC_NAMESPACE,
- RPCMethodName("get_or_create_cluster"),
+ _GET_OR_CREATE_CLUSTER_METHOD_NAME,
timeout_s=RPC_REMOTE_METHOD_TIMEOUT_S,
user_id=user_id,
wallet_id=wallet_id,
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py
index a24ed19aba90..b1b90b999a26 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py
@@ -6,9 +6,12 @@
DIRECTOR_V2_RPC_NAMESPACE,
)
from models_library.api_schemas_directorv2.comp_runs import (
+ ComputationCollectionRunRpcGetPage,
+ ComputationCollectionRunTaskRpcGetPage,
ComputationRunRpcGetPage,
ComputationTaskRpcGetPage,
)
+from models_library.computations import CollectionRunID
from models_library.products import ProductName
from models_library.projects import ProjectID
from models_library.rabbitmq_basic_types import RPCMethodName
@@ -114,3 +117,62 @@ async def list_computations_latest_iteration_tasks_page(
)
assert isinstance(result, ComputationTaskRpcGetPage) # nosec
return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def list_computation_collection_runs_page(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ product_name: ProductName,
+ user_id: UserID,
+ project_ids: list[ProjectID] | None,
+ filter_only_running: bool = False,
+ # pagination
+ offset: int = 0,
+ limit: int = 20,
+) -> ComputationCollectionRunRpcGetPage:
+ result = await rabbitmq_rpc_client.request(
+ DIRECTOR_V2_RPC_NAMESPACE,
+ _RPC_METHOD_NAME_ADAPTER.validate_python(
+ "list_computation_collection_runs_page"
+ ),
+ product_name=product_name,
+ user_id=user_id,
+ project_ids=project_ids,
+ filter_only_running=filter_only_running,
+ offset=offset,
+ limit=limit,
+ timeout_s=_DEFAULT_TIMEOUT_S,
+ )
+ assert isinstance(result, ComputationCollectionRunRpcGetPage) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def list_computation_collection_run_tasks_page(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ product_name: ProductName,
+ user_id: UserID,
+ collection_run_id: CollectionRunID,
+ # pagination
+ offset: int = 0,
+ limit: int = 20,
+ # ordering
+ order_by: OrderBy | None = None,
+) -> ComputationCollectionRunTaskRpcGetPage:
+ result = await rabbitmq_rpc_client.request(
+ DIRECTOR_V2_RPC_NAMESPACE,
+ _RPC_METHOD_NAME_ADAPTER.validate_python(
+ "list_computation_collection_run_tasks_page"
+ ),
+ product_name=product_name,
+ user_id=user_id,
+ collection_run_id=collection_run_id,
+ offset=offset,
+ limit=limit,
+ order_by=order_by,
+ timeout_s=_DEFAULT_TIMEOUT_S,
+ )
+ assert isinstance(result, ComputationCollectionRunTaskRpcGetPage) # nosec
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations_tasks.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations_tasks.py
new file mode 100644
index 000000000000..5d12960444cd
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations_tasks.py
@@ -0,0 +1,41 @@
+# pylint: disable=too-many-arguments
+import logging
+from typing import Final
+
+from models_library.api_schemas_directorv2 import (
+ DIRECTOR_V2_RPC_NAMESPACE,
+)
+from models_library.api_schemas_directorv2.computations import TaskLogFileIdGet
+from models_library.projects import ProjectID
+from models_library.rabbitmq_basic_types import RPCMethodName
+from pydantic import TypeAdapter
+
+from ....logging_utils import log_decorator
+from ... import RabbitMQRPCClient
+
+_logger = logging.getLogger(__name__)
+
+
+_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName)
+
+_GET_COMPUTATION_TASK_LOG_FILE_IDS: Final[RPCMethodName] = (
+ _RPC_METHOD_NAME_ADAPTER.validate_python("get_computation_task_log_file_ids")
+)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_computation_task_log_file_ids(
+ rabbitmq_rpc_client: RabbitMQRPCClient, *, project_id: ProjectID
+) -> list[TaskLogFileIdGet]:
+ """
+ Raises:
+ ComputationalTaskMissingError
+ """
+ result = await rabbitmq_rpc_client.request(
+ DIRECTOR_V2_RPC_NAMESPACE,
+ _GET_COMPUTATION_TASK_LOG_FILE_IDS,
+ project_id=project_id,
+ )
+ assert isinstance(result, list) # nosec
+ assert all(isinstance(item, TaskLogFileIdGet) for item in result) # nosec
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/errors.py
new file mode 100644
index 000000000000..7d58603d1522
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/errors.py
@@ -0,0 +1,9 @@
+from ..._errors import RPCInterfaceError
+
+
+class BaseRpcError(RPCInterfaceError): # pylint: disable=too-many-ancestors
+ pass
+
+
+class ComputationalTaskMissingError(BaseRpcError): # pylint: disable=too-many-ancestors
+ msg_template = "Computational run not found for project {project_id}"
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py
index fb3276ae670c..edf4a480c1fc 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py
@@ -18,8 +18,9 @@
from models_library.services_types import ServicePortKey
from models_library.users import UserID
from pydantic import NonNegativeInt, TypeAdapter
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from ....logging_utils import log_decorator
+from ....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/container_extensions.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/container_extensions.py
new file mode 100644
index 000000000000..ec7201a2091a
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/container_extensions.py
@@ -0,0 +1,84 @@
+import logging
+
+from models_library.projects_nodes_io import NodeID
+from models_library.rabbitmq_basic_types import RPCMethodName
+from models_library.services import ServiceOutput
+from pydantic import TypeAdapter
+
+from ....logging_utils import log_decorator
+from ... import RabbitMQRPCClient
+from ._utils import get_rpc_namespace
+
+_logger = logging.getLogger(__name__)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def toggle_ports_io(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ enable_outputs: bool,
+ enable_inputs: bool
+) -> None:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("toggle_ports_io"),
+ enable_outputs=enable_outputs,
+ enable_inputs=enable_inputs,
+ )
+ assert result is None # nosec
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def create_output_dirs(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ outputs_labels: dict[str, ServiceOutput]
+) -> None:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("create_output_dirs"),
+ outputs_labels=outputs_labels,
+ )
+ assert result is None # nosec
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def attach_container_to_network(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ container_id: str,
+ network_id: str,
+ network_aliases: list[str]
+) -> None:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("attach_container_to_network"),
+ container_id=container_id,
+ network_id=network_id,
+ network_aliases=network_aliases,
+ )
+ assert result is None # nosec
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def detach_container_from_network(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ container_id: str,
+ network_id: str
+) -> None:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("detach_container_from_network"),
+ container_id=container_id,
+ network_id=network_id,
+ )
+ assert result is None # nosec
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers.py
new file mode 100644
index 000000000000..2aace161a1cc
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers.py
@@ -0,0 +1,87 @@
+import logging
+from typing import Any
+
+from models_library.api_schemas_directorv2.dynamic_services import ContainersComposeSpec
+from models_library.api_schemas_dynamic_sidecar.containers import ActivityInfoOrNone
+from models_library.projects_nodes_io import NodeID
+from models_library.rabbitmq_basic_types import RPCMethodName
+from pydantic import TypeAdapter
+
+from ....logging_utils import log_decorator
+from ... import RabbitMQRPCClient
+from ._utils import get_rpc_namespace
+
+_logger = logging.getLogger(__name__)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def create_compose_spec(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ containers_compose_spec: ContainersComposeSpec,
+) -> None:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("create_compose_spec"),
+ containers_compose_spec=containers_compose_spec,
+ )
+ assert result is None # nosec
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def containers_docker_inspect(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ only_status: bool,
+) -> dict[str, Any]:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("containers_docker_inspect"),
+ only_status=only_status,
+ )
+ assert isinstance(result, dict) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_containers_activity(
+ rabbitmq_rpc_client: RabbitMQRPCClient, *, node_id: NodeID
+) -> ActivityInfoOrNone:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("get_containers_activity"),
+ )
+ return TypeAdapter(ActivityInfoOrNone).validate_python(result) if result else None
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_containers_name(
+ rabbitmq_rpc_client: RabbitMQRPCClient, *, node_id: NodeID, filters: str
+) -> str:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("get_containers_name"),
+ filters=filters,
+ )
+ assert isinstance(result, str)
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def inspect_container(
+ rabbitmq_rpc_client: RabbitMQRPCClient, *, node_id: NodeID, container_id: str
+) -> dict[str, Any]:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("inspect_container"),
+ container_id=container_id,
+ )
+ assert isinstance(result, dict)
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers_long_running_tasks.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers_long_running_tasks.py
new file mode 100644
index 000000000000..b8136ebc49b1
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/containers_long_running_tasks.py
@@ -0,0 +1,172 @@
+import logging
+
+from models_library.api_schemas_directorv2.dynamic_services import ContainersCreate
+from models_library.projects_nodes_io import NodeID
+from models_library.rabbitmq_basic_types import RPCMethodName
+from pydantic import TypeAdapter
+from servicelib.long_running_tasks.models import LRTNamespace, TaskId
+
+from ....logging_utils import log_decorator
+from ... import RabbitMQRPCClient
+from ._utils import get_rpc_namespace
+
+_logger = logging.getLogger(__name__)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def pull_user_services_images(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("pull_user_services_images"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def create_user_services(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+ containers_create: ContainersCreate,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("create_user_services"),
+ lrt_namespace=lrt_namespace,
+ containers_create=containers_create,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def remove_user_services(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("remove_user_services"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def restore_user_services_state_paths(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("restore_user_services_state_paths"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def save_user_services_state_paths(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("save_user_services_state_paths"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def pull_user_services_input_ports(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+ port_keys: list[str] | None,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("pull_user_services_input_ports"),
+ lrt_namespace=lrt_namespace,
+ port_keys=port_keys,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def pull_user_services_output_ports(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+ port_keys: list[str] | None,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("pull_user_services_output_ports"),
+ lrt_namespace=lrt_namespace,
+ port_keys=port_keys,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def push_user_services_output_ports(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("push_user_services_output_ports"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def restart_user_services(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ node_id: NodeID,
+ lrt_namespace: LRTNamespace,
+) -> TaskId:
+ rpc_namespace = get_rpc_namespace(node_id)
+ result = await rabbitmq_rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("restart_user_services"),
+ lrt_namespace=lrt_namespace,
+ )
+ assert isinstance(result, TaskId) # nosec
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py
index 00fb9e78d72e..f714ed6ba3f9 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py
@@ -13,7 +13,7 @@
@log_decorator(_logger, level=logging.DEBUG)
-async def save_volume_state(
+async def update_volume_status(
rabbitmq_rpc_client: RabbitMQRPCClient,
*,
node_id: NodeID,
@@ -23,7 +23,7 @@ async def save_volume_state(
rpc_namespace = get_rpc_namespace(node_id)
result = await rabbitmq_rpc_client.request(
rpc_namespace,
- TypeAdapter(RPCMethodName).validate_python("save_volume_state"),
+ TypeAdapter(RPCMethodName).validate_python("update_volume_status"),
status=status,
category=category,
)
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py
index c1049bfc1bbb..c03be37d3937 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py
@@ -2,14 +2,14 @@
from models_library.api_schemas_rpc_async_jobs.async_jobs import (
AsyncJobGet,
- AsyncJobNameData,
)
from models_library.api_schemas_storage import STORAGE_RPC_NAMESPACE
-from models_library.products import ProductName
from models_library.projects_nodes_io import LocationID
from models_library.rabbitmq_basic_types import RPCMethodName
from models_library.users import UserID
+from pydantic import TypeAdapter
+from ....celery.models import OwnerMetadata
from ..._client_rpc import RabbitMQRPCClient
from ..async_jobs.async_jobs import submit
@@ -17,38 +17,38 @@
async def compute_path_size(
client: RabbitMQRPCClient,
*,
- user_id: UserID,
- product_name: ProductName,
location_id: LocationID,
path: Path,
-) -> tuple[AsyncJobGet, AsyncJobNameData]:
- job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name)
+ owner_metadata: OwnerMetadata,
+ user_id: UserID
+) -> tuple[AsyncJobGet, OwnerMetadata]:
async_job_rpc_get = await submit(
rabbitmq_rpc_client=client,
rpc_namespace=STORAGE_RPC_NAMESPACE,
- method_name=RPCMethodName("compute_path_size"),
- job_id_data=job_id_data,
+ method_name=TypeAdapter(RPCMethodName).validate_python("compute_path_size"),
+ owner_metadata=owner_metadata,
location_id=location_id,
path=path,
+ user_id=user_id,
)
- return async_job_rpc_get, job_id_data
+ return async_job_rpc_get, owner_metadata
async def delete_paths(
client: RabbitMQRPCClient,
*,
- user_id: UserID,
- product_name: ProductName,
location_id: LocationID,
paths: set[Path],
-) -> tuple[AsyncJobGet, AsyncJobNameData]:
- job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name)
+ owner_metadata: OwnerMetadata,
+ user_id: UserID
+) -> tuple[AsyncJobGet, OwnerMetadata]:
async_job_rpc_get = await submit(
rabbitmq_rpc_client=client,
rpc_namespace=STORAGE_RPC_NAMESPACE,
- method_name=RPCMethodName("delete_paths"),
- job_id_data=job_id_data,
+ method_name=TypeAdapter(RPCMethodName).validate_python("delete_paths"),
+ owner_metadata=owner_metadata,
location_id=location_id,
paths=paths,
+ user_id=user_id,
)
- return async_job_rpc_get, job_id_data
+ return async_job_rpc_get, owner_metadata
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py
index df78448a5752..31ca1d11440c 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py
@@ -1,14 +1,15 @@
+from typing import Literal
+
from models_library.api_schemas_rpc_async_jobs.async_jobs import (
AsyncJobGet,
- AsyncJobNameData,
)
from models_library.api_schemas_storage import STORAGE_RPC_NAMESPACE
from models_library.api_schemas_storage.storage_schemas import FoldersBody
from models_library.api_schemas_webserver.storage import PathToExport
-from models_library.products import ProductName
from models_library.rabbitmq_basic_types import RPCMethodName
from models_library.users import UserID
from pydantic import TypeAdapter
+from servicelib.celery.models import OwnerMetadata
from ... import RabbitMQRPCClient
from ..async_jobs.async_jobs import submit
@@ -17,34 +18,38 @@
async def copy_folders_from_project(
client: RabbitMQRPCClient,
*,
- user_id: UserID,
- product_name: ProductName,
body: FoldersBody,
-) -> tuple[AsyncJobGet, AsyncJobNameData]:
- job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name)
+ owner_metadata: OwnerMetadata,
+ user_id: UserID
+) -> tuple[AsyncJobGet, OwnerMetadata]:
async_job_rpc_get = await submit(
rabbitmq_rpc_client=client,
rpc_namespace=STORAGE_RPC_NAMESPACE,
- method_name=RPCMethodName("copy_folders_from_project"),
- job_id_data=job_id_data,
+ method_name=TypeAdapter(RPCMethodName).validate_python(
+ "copy_folders_from_project"
+ ),
+ owner_metadata=owner_metadata,
body=body,
+ user_id=user_id,
)
- return async_job_rpc_get, job_id_data
+ return async_job_rpc_get, owner_metadata
async def start_export_data(
rabbitmq_rpc_client: RabbitMQRPCClient,
*,
- user_id: UserID,
- product_name: ProductName,
paths_to_export: list[PathToExport],
-) -> tuple[AsyncJobGet, AsyncJobNameData]:
- job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name)
+ export_as: Literal["path", "download_link"],
+ owner_metadata: OwnerMetadata,
+ user_id: UserID
+) -> tuple[AsyncJobGet, OwnerMetadata]:
async_job_rpc_get = await submit(
rabbitmq_rpc_client,
rpc_namespace=STORAGE_RPC_NAMESPACE,
method_name=TypeAdapter(RPCMethodName).validate_python("start_export_data"),
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
paths_to_export=paths_to_export,
+ export_as=export_as,
+ user_id=user_id,
)
- return async_job_rpc_get, job_id_data
+ return async_job_rpc_get, owner_metadata
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py
index 0358a0e3b6ad..c84dbf84993c 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py
@@ -1,13 +1,13 @@
import logging
-from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE
from models_library.basic_types import IDStr
-from models_library.rabbitmq_basic_types import RPCMethodName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
from models_library.rpc.webserver.auth.api_keys import ApiKeyCreate, ApiKeyGet
from models_library.users import UserID
from pydantic import TypeAdapter
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from .....logging_utils import log_decorator
+from .....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
@@ -15,13 +15,14 @@
@log_decorator(_logger, level=logging.DEBUG)
async def create_api_key(
rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
user_id: UserID,
product_name: str,
api_key: ApiKeyCreate,
) -> ApiKeyGet:
result: ApiKeyGet = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("create_api_key"),
user_id=user_id,
product_name=product_name,
@@ -35,13 +36,14 @@ async def create_api_key(
@log_decorator(_logger, level=logging.DEBUG)
async def get_api_key(
rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
user_id: UserID,
product_name: str,
api_key_id: IDStr,
) -> ApiKeyGet:
result: ApiKeyGet = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("get_api_key"),
user_id=user_id,
product_name=product_name,
@@ -54,13 +56,14 @@ async def get_api_key(
@log_decorator(_logger, level=logging.DEBUG)
async def delete_api_key_by_key(
rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
user_id: UserID,
product_name: str,
api_key: str,
) -> None:
result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("delete_api_key_by_key"),
user_id=user_id,
product_name=product_name,
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py
index e0c3fc2419a2..a5f494191aec 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py
@@ -1,7 +1,11 @@
from ..._errors import RPCInterfaceError
-class ProjectNotFoundRpcError(RPCInterfaceError): ...
+class ProjectNotFoundRpcError( # pylint: disable=too-many-ancestors
+ RPCInterfaceError
+): ...
-class ProjectForbiddenRpcError(RPCInterfaceError): ...
+class ProjectForbiddenRpcError( # pylint: disable=too-many-ancestors
+ RPCInterfaceError
+): ...
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py
index 0ab7e17756aa..4f017023a727 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py
@@ -1,4 +1,5 @@
import logging
+from typing import Literal
from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE
from models_library.api_schemas_webserver.functions import (
@@ -16,8 +17,19 @@
RegisteredFunctionJob,
RegisteredFunctionJobCollection,
)
+from models_library.functions import (
+ FunctionClass,
+ FunctionGroupAccessRights,
+ FunctionJobStatus,
+ FunctionOutputs,
+ FunctionUserAccessRights,
+ FunctionUserApiAccessRights,
+ RegisteredFunctionJobPatch,
+ RegisteredFunctionJobWithStatus,
+)
from models_library.products import ProductName
-from models_library.rabbitmq_basic_types import RPCMethodName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
+from models_library.rest_ordering import OrderBy
from models_library.rest_pagination import PageMetaInfoLimitOffset
from models_library.users import UserID
from pydantic import TypeAdapter
@@ -30,34 +42,34 @@
@log_decorator(_logger, level=logging.DEBUG)
async def register_function(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
user_id: UserID,
product_name: ProductName,
function: Function,
) -> RegisteredFunction:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("register_function"),
function=function,
user_id=user_id,
product_name=product_name,
)
- return TypeAdapter(RegisteredFunction).validate_python(
- result
- ) # Validates the result as a RegisteredFunction
+ return TypeAdapter(RegisteredFunction).validate_python(result)
@log_decorator(_logger, level=logging.DEBUG)
async def get_function(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
user_id: UserID,
product_name: ProductName,
function_id: FunctionID,
) -> RegisteredFunction:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("get_function"),
function_id=function_id,
user_id=user_id,
@@ -68,14 +80,15 @@ async def get_function(
@log_decorator(_logger, level=logging.DEBUG)
async def get_function_input_schema(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
function_id: FunctionID,
user_id: UserID,
product_name: ProductName,
) -> FunctionInputSchema:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("get_function_input_schema"),
function_id=function_id,
user_id=user_id,
@@ -86,14 +99,15 @@ async def get_function_input_schema(
@log_decorator(_logger, level=logging.DEBUG)
async def get_function_output_schema(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
function_id: FunctionID,
user_id: UserID,
product_name: ProductName,
) -> FunctionOutputSchema:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("get_function_output_schema"),
function_id=function_id,
user_id=user_id,
@@ -104,14 +118,15 @@ async def get_function_output_schema(
@log_decorator(_logger, level=logging.DEBUG)
async def delete_function(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
function_id: FunctionID,
user_id: UserID,
product_name: ProductName,
) -> None:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("delete_function"),
function_id=function_id,
user_id=user_id,
@@ -123,21 +138,29 @@ async def delete_function(
@log_decorator(_logger, level=logging.DEBUG)
async def list_functions(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
pagination_offset: int,
pagination_limit: int,
+ order_by: OrderBy | None = None,
+ filter_by_function_class: FunctionClass | None = None,
+ search_by_function_title: str | None = None,
+ search_by_multi_columns: str | None = None,
) -> tuple[list[RegisteredFunction], PageMetaInfoLimitOffset]:
result: tuple[list[RegisteredFunction], PageMetaInfoLimitOffset] = (
- await rabbitmq_rpc_client.request(
+ await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("list_functions"),
pagination_offset=pagination_offset,
pagination_limit=pagination_limit,
user_id=user_id,
product_name=product_name,
+ order_by=order_by,
+ filter_by_function_class=filter_by_function_class,
+ search_by_function_title=search_by_function_title,
+ search_by_multi_columns=search_by_multi_columns,
)
)
return TypeAdapter(
@@ -147,16 +170,18 @@ async def list_functions(
@log_decorator(_logger, level=logging.DEBUG)
async def list_function_jobs(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
pagination_limit: int,
pagination_offset: int,
filter_by_function_id: FunctionID | None = None,
+ filter_by_function_job_ids: list[FunctionJobID] | None = None,
+ filter_by_function_job_collection_id: FunctionJobCollectionID | None = None,
) -> tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset]:
result: tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset] = (
- await rabbitmq_rpc_client.request(
+ await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("list_function_jobs"),
user_id=user_id,
@@ -164,6 +189,8 @@ async def list_function_jobs(
pagination_offset=pagination_offset,
pagination_limit=pagination_limit,
filter_by_function_id=filter_by_function_id,
+ filter_by_function_job_ids=filter_by_function_job_ids,
+ filter_by_function_job_collection_id=filter_by_function_job_collection_id,
)
)
return TypeAdapter(
@@ -171,9 +198,43 @@ async def list_function_jobs(
).validate_python(result)
+@log_decorator(_logger, level=logging.DEBUG)
+async def list_function_jobs_with_status(
+ rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ pagination_offset: int,
+ pagination_limit: int,
+ filter_by_function_id: FunctionID | None = None,
+ filter_by_function_job_ids: list[FunctionJobID] | None = None,
+ filter_by_function_job_collection_id: FunctionJobCollectionID | None = None,
+) -> tuple[
+ list[RegisteredFunctionJobWithStatus],
+ PageMetaInfoLimitOffset,
+]:
+ result = await rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("list_function_jobs_with_status"),
+ user_id=user_id,
+ product_name=product_name,
+ pagination_offset=pagination_offset,
+ pagination_limit=pagination_limit,
+ filter_by_function_id=filter_by_function_id,
+ filter_by_function_job_ids=filter_by_function_job_ids,
+ filter_by_function_job_collection_id=filter_by_function_job_collection_id,
+ )
+ return TypeAdapter(
+ tuple[
+ list[RegisteredFunctionJobWithStatus],
+ PageMetaInfoLimitOffset,
+ ]
+ ).validate_python(result)
+
+
@log_decorator(_logger, level=logging.DEBUG)
async def list_function_job_collections(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
@@ -181,7 +242,7 @@ async def list_function_job_collections(
pagination_offset: int,
filters: FunctionJobCollectionsListFilters | None = None,
) -> tuple[list[RegisteredFunctionJobCollection], PageMetaInfoLimitOffset]:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("list_function_job_collections"),
pagination_offset=pagination_offset,
@@ -197,14 +258,14 @@ async def list_function_job_collections(
@log_decorator(_logger, level=logging.DEBUG)
async def update_function_title(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_id: FunctionID,
title: str,
) -> RegisteredFunction:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("update_function_title"),
function_id=function_id,
@@ -217,14 +278,14 @@ async def update_function_title(
@log_decorator(_logger, level=logging.DEBUG)
async def update_function_description(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_id: FunctionID,
description: str,
) -> RegisteredFunction:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("update_function_description"),
function_id=function_id,
@@ -237,14 +298,14 @@ async def update_function_description(
@log_decorator(_logger, level=logging.DEBUG)
async def run_function(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
function_id: FunctionID,
inputs: FunctionInputs,
user_id: UserID,
product_name: ProductName,
) -> RegisteredFunctionJob:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("run_function"),
function_id=function_id,
@@ -259,13 +320,13 @@ async def run_function(
@log_decorator(_logger, level=logging.DEBUG)
async def register_function_job(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_job: FunctionJob,
) -> RegisteredFunctionJob:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("register_function_job"),
function_job=function_job,
@@ -277,15 +338,37 @@ async def register_function_job(
) # Validates the result as a RegisteredFunctionJob
+@log_decorator(_logger, level=logging.DEBUG)
+async def patch_registered_function_job(
+ rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ function_job_uuid: FunctionJobID,
+ registered_function_job_patch: RegisteredFunctionJobPatch,
+) -> RegisteredFunctionJob:
+ result = await rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("patch_registered_function_job"),
+ user_id=user_id,
+ product_name=product_name,
+ function_job_uuid=function_job_uuid,
+ registered_function_job_patch=registered_function_job_patch,
+ )
+ return TypeAdapter(RegisteredFunctionJob).validate_python(
+ result
+ ) # Validates the result as a RegisteredFunctionJob
+
+
@log_decorator(_logger, level=logging.DEBUG)
async def get_function_job(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
function_job_id: FunctionJobID,
product_name: ProductName,
) -> RegisteredFunctionJob:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("get_function_job"),
function_job_id=function_job_id,
@@ -297,14 +380,94 @@ async def get_function_job(
@log_decorator(_logger, level=logging.DEBUG)
-async def delete_function_job(
+async def get_function_job_status(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ function_job_id: FunctionJobID,
+ product_name: ProductName,
+) -> FunctionJobStatus:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("get_function_job_status"),
+ function_job_id=function_job_id,
+ user_id=user_id,
+ product_name=product_name,
+ )
+ return TypeAdapter(FunctionJobStatus).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_function_job_outputs(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ function_job_id: FunctionJobID,
+ product_name: ProductName,
+) -> FunctionOutputs:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("get_function_job_outputs"),
+ function_job_id=function_job_id,
+ user_id=user_id,
+ product_name=product_name,
+ )
+ return TypeAdapter(FunctionOutputs).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def update_function_job_status(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ function_job_id: FunctionJobID,
+ job_status: FunctionJobStatus,
+ check_write_permissions: bool = True,
+) -> FunctionJobStatus:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("update_function_job_status"),
+ function_job_id=function_job_id,
+ job_status=job_status,
+ user_id=user_id,
+ product_name=product_name,
+ check_write_permissions=check_write_permissions,
+ )
+ return TypeAdapter(FunctionJobStatus).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def update_function_job_outputs(
rabbitmq_rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_job_id: FunctionJobID,
+ outputs: FunctionOutputs,
+ check_write_permissions: bool = True,
+) -> FunctionOutputs:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("update_function_job_outputs"),
+ function_job_id=function_job_id,
+ outputs=outputs,
+ user_id=user_id,
+ product_name=product_name,
+ check_write_permissions=check_write_permissions,
+ )
+ return TypeAdapter(FunctionOutputs).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def delete_function_job(
+ rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ function_job_id: FunctionJobID,
) -> None:
- result: None = await rabbitmq_rpc_client.request(
+ result: None = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("delete_function_job"),
function_job_id=function_job_id,
@@ -316,14 +479,14 @@ async def delete_function_job(
@log_decorator(_logger, level=logging.DEBUG)
async def find_cached_function_jobs(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_id: FunctionID,
inputs: FunctionInputs,
) -> list[RegisteredFunctionJob] | None:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("find_cached_function_jobs"),
function_id=function_id,
@@ -338,13 +501,13 @@ async def find_cached_function_jobs(
@log_decorator(_logger, level=logging.DEBUG)
async def register_function_job_collection(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_job_collection: FunctionJobCollection,
) -> RegisteredFunctionJobCollection:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("register_function_job_collection"),
function_job_collection=function_job_collection,
@@ -356,13 +519,13 @@ async def register_function_job_collection(
@log_decorator(_logger, level=logging.DEBUG)
async def get_function_job_collection(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
function_job_collection_id: FunctionJobCollectionID,
product_name: ProductName,
) -> RegisteredFunctionJobCollection:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("get_function_job_collection"),
function_job_collection_id=function_job_collection_id,
@@ -374,13 +537,13 @@ async def get_function_job_collection(
@log_decorator(_logger, level=logging.DEBUG)
async def delete_function_job_collection(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
*,
user_id: UserID,
product_name: ProductName,
function_job_collection_id: FunctionJobCollectionID,
) -> None:
- result = await rabbitmq_rpc_client.request(
+ result = await rpc_client.request(
WEBSERVER_RPC_NAMESPACE,
TypeAdapter(RPCMethodName).validate_python("delete_function_job_collection"),
function_job_collection_id=function_job_collection_id,
@@ -388,3 +551,73 @@ async def delete_function_job_collection(
product_name=product_name,
)
assert result is None # nosec
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_function_user_permissions(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ function_id: FunctionID,
+) -> FunctionUserAccessRights:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("get_function_user_permissions"),
+ function_id=function_id,
+ user_id=user_id,
+ product_name=product_name,
+ )
+ return TypeAdapter(FunctionUserAccessRights).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_functions_user_api_access_rights(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+) -> FunctionUserApiAccessRights:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python(
+ "get_functions_user_api_access_rights"
+ ),
+ user_id=user_id,
+ product_name=product_name,
+ )
+ return TypeAdapter(FunctionUserApiAccessRights).validate_python(result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def set_group_permissions(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ *,
+ user_id: UserID,
+ product_name: ProductName,
+ object_type: Literal["function", "function_job", "function_job_collection"],
+ object_ids: list[FunctionID | FunctionJobID | FunctionJobCollectionID],
+ permission_group_id: int,
+ read: bool | None = None,
+ write: bool | None = None,
+ execute: bool | None = None,
+) -> list[
+ tuple[
+ FunctionID | FunctionJobID | FunctionJobCollectionID, FunctionGroupAccessRights
+ ]
+]:
+ result = await rabbitmq_rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("set_group_permissions"),
+ user_id=user_id,
+ product_name=product_name,
+ object_type=object_type,
+ object_ids=object_ids,
+ permission_group_id=permission_group_id,
+ read=read,
+ write=write,
+ execute=execute,
+ )
+ return TypeAdapter(
+ list[tuple[FunctionID | FunctionJobID, FunctionGroupAccessRights]]
+ ).validate_python(result)
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py
index acb367de27b0..4bf2c0fec6d8 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py
@@ -1,13 +1,12 @@
import logging
-from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE
from models_library.api_schemas_webserver.licensed_items import LicensedItemRpcGetPage
from models_library.api_schemas_webserver.licensed_items_checkouts import (
LicensedItemCheckoutRpcGet,
)
from models_library.licenses import LicensedItemID
from models_library.products import ProductName
-from models_library.rabbitmq_basic_types import RPCMethodName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
from models_library.resource_tracker_licensed_items_checkouts import (
LicensedItemCheckoutID,
)
@@ -15,22 +14,24 @@
from models_library.users import UserID
from models_library.wallets import WalletID
from pydantic import TypeAdapter
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from .....logging_utils import log_decorator
+from .....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
@log_decorator(_logger, level=logging.DEBUG)
async def get_licensed_items(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: str,
offset: int = 0,
limit: int = 20,
) -> LicensedItemRpcGetPage:
- result: LicensedItemRpcGetPage = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result: LicensedItemRpcGetPage = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("get_licensed_items"),
product_name=product_name,
offset=offset,
@@ -42,7 +43,8 @@ async def get_licensed_items(
@log_decorator(_logger, level=logging.DEBUG)
async def get_available_licensed_items_for_wallet(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: ProductName,
wallet_id: WalletID,
@@ -50,8 +52,8 @@ async def get_available_licensed_items_for_wallet(
offset: int = 0,
limit: int = 20,
) -> LicensedItemRpcGetPage:
- result: LicensedItemRpcGetPage = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result: LicensedItemRpcGetPage = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python(
"get_available_licensed_items_for_wallet"
),
@@ -67,7 +69,8 @@ async def get_available_licensed_items_for_wallet(
@log_decorator(_logger, level=logging.DEBUG)
async def checkout_licensed_item_for_wallet(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: ProductName,
user_id: UserID,
@@ -76,8 +79,8 @@ async def checkout_licensed_item_for_wallet(
num_of_seats: int,
service_run_id: ServiceRunID,
) -> LicensedItemCheckoutRpcGet:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("checkout_licensed_item_for_wallet"),
licensed_item_id=licensed_item_id,
product_name=product_name,
@@ -92,14 +95,15 @@ async def checkout_licensed_item_for_wallet(
@log_decorator(_logger, level=logging.DEBUG)
async def release_licensed_item_for_wallet(
- rabbitmq_rpc_client: RabbitMQRPCClient,
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: ProductName,
user_id: UserID,
licensed_item_checkout_id: LicensedItemCheckoutID,
) -> LicensedItemCheckoutRpcGet:
- result = await rabbitmq_rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ result = await rpc_client.request(
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("release_licensed_item_for_wallet"),
product_name=product_name,
user_id=user_id,
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/payments.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/payments.py
new file mode 100644
index 000000000000..685dd706d3ce
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/payments.py
@@ -0,0 +1,32 @@
+import logging
+from decimal import Decimal
+
+from models_library.payments import InvoiceDataGet
+from models_library.products import ProductName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
+from models_library.users import UserID
+from pydantic import TypeAdapter
+from servicelib.logging_utils import log_decorator
+from servicelib.rabbitmq import RabbitMQRPCClient
+
+_logger = logging.getLogger(__name__)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_invoice_data(
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
+ *,
+ user_id: UserID,
+ dollar_amount: Decimal,
+ product_name: ProductName,
+) -> InvoiceDataGet:
+ result: InvoiceDataGet = await rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("get_invoice_data"),
+ user_id=user_id,
+ dollar_amount=dollar_amount,
+ product_name=product_name,
+ )
+ assert isinstance(result, InvoiceDataGet) # nosec
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/products.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/products.py
new file mode 100644
index 000000000000..6c1dc9744669
--- /dev/null
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/products.py
@@ -0,0 +1,41 @@
+import logging
+from decimal import Decimal
+
+from models_library.api_schemas_webserver.products import CreditResultRpcGet
+from models_library.products import ProductName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
+from pydantic import TypeAdapter
+from servicelib.logging_utils import log_decorator
+from servicelib.rabbitmq import RabbitMQRPCClient
+
+_logger = logging.getLogger(__name__)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+async def get_credit_amount(
+ rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
+ *,
+ dollar_amount: Decimal,
+ product_name: ProductName,
+) -> CreditResultRpcGet:
+ """
+ Get credit amount for a specific dollar amount and product.
+
+ Args:
+ rpc_client: RPC client to communicate with the webserver
+ rpc_namespace: Namespace for the RPC call
+ dollar_amount: The amount in dollars to be converted to credits
+ product_name: The product for which to calculate the credit amount
+
+ Returns:
+ Credit result information containing the credit amount
+ """
+ result: CreditResultRpcGet = await rpc_client.request(
+ rpc_namespace,
+ TypeAdapter(RPCMethodName).validate_python("get_credit_amount"),
+ dollar_amount=dollar_amount,
+ product_name=product_name,
+ )
+ assert isinstance(result, CreditResultRpcGet) # nosec
+ return result
diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py
index 15f40d66011e..02781d1cff54 100644
--- a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py
+++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py
@@ -1,14 +1,14 @@
import logging
from typing import cast
-from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE
from models_library.products import ProductName
from models_library.projects import ProjectID
-from models_library.rabbitmq_basic_types import RPCMethodName
+from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
from models_library.rest_pagination import PageOffsetInt
from models_library.rpc.webserver.projects import (
ListProjectsMarkedAsJobRpcFilters,
PageRpcProjectJobRpcGet,
+ ProjectJobRpcGet,
)
from models_library.rpc_pagination import (
DEFAULT_NUMBER_OF_ITEMS_PER_PAGE,
@@ -16,8 +16,9 @@
)
from models_library.users import UserID
from pydantic import TypeAdapter, validate_call
-from servicelib.logging_utils import log_decorator
-from servicelib.rabbitmq import RabbitMQRPCClient
+
+from ....logging_utils import log_decorator
+from ....rabbitmq import RabbitMQRPCClient
_logger = logging.getLogger(__name__)
@@ -26,20 +27,23 @@
@validate_call(config={"arbitrary_types_allowed": True})
async def mark_project_as_job(
rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: ProductName,
user_id: UserID,
project_uuid: ProjectID,
job_parent_resource_name: str,
+ storage_assets_deleted: bool,
) -> None:
result = await rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("mark_project_as_job"),
product_name=product_name,
user_id=user_id,
project_uuid=project_uuid,
job_parent_resource_name=job_parent_resource_name,
+ storage_assets_deleted=storage_assets_deleted,
)
assert result is None
@@ -48,6 +52,7 @@ async def mark_project_as_job(
@validate_call(config={"arbitrary_types_allowed": True})
async def list_projects_marked_as_jobs(
rpc_client: RabbitMQRPCClient,
+ rpc_namespace: RPCNamespace,
*,
product_name: ProductName,
user_id: UserID,
@@ -57,7 +62,7 @@ async def list_projects_marked_as_jobs(
filters: ListProjectsMarkedAsJobRpcFilters | None = None,
) -> PageRpcProjectJobRpcGet:
result = await rpc_client.request(
- WEBSERVER_RPC_NAMESPACE,
+ rpc_namespace,
TypeAdapter(RPCMethodName).validate_python("list_projects_marked_as_jobs"),
product_name=product_name,
user_id=user_id,
@@ -67,3 +72,25 @@ async def list_projects_marked_as_jobs(
)
assert TypeAdapter(PageRpcProjectJobRpcGet).validate_python(result) # nosec
return cast(PageRpcProjectJobRpcGet, result)
+
+
+@log_decorator(_logger, level=logging.DEBUG)
+@validate_call(config={"arbitrary_types_allowed": True})
+async def get_project_marked_as_job(
+ rpc_client: RabbitMQRPCClient,
+ *,
+ product_name: ProductName,
+ user_id: UserID,
+ project_uuid: ProjectID,
+ job_parent_resource_name: str,
+) -> ProjectJobRpcGet:
+ result = await rpc_client.request(
+ WEBSERVER_RPC_NAMESPACE,
+ TypeAdapter(RPCMethodName).validate_python("get_project_marked_as_job"),
+ product_name=product_name,
+ user_id=user_id,
+ project_uuid=project_uuid,
+ job_parent_resource_name=job_parent_resource_name,
+ )
+ assert TypeAdapter(ProjectJobRpcGet).validate_python(result) # nosec
+ return cast(ProjectJobRpcGet, result)
diff --git a/packages/service-library/src/servicelib/redis/__init__.py b/packages/service-library/src/servicelib/redis/__init__.py
index 9e63a9f6525c..08d1ff40c47d 100644
--- a/packages/service-library/src/servicelib/redis/__init__.py
+++ b/packages/service-library/src/servicelib/redis/__init__.py
@@ -6,28 +6,40 @@
CouldNotConnectToRedisError,
LockLostError,
ProjectLockError,
+ SemaphoreAcquisitionError,
+ SemaphoreNotAcquiredError,
)
from ._models import RedisManagerDBConfig
+from ._project_document_version import (
+ PROJECT_DB_UPDATE_REDIS_LOCK_KEY,
+ PROJECT_DOCUMENT_VERSION_KEY,
+ increment_and_return_project_document_version,
+)
from ._project_lock import (
get_project_locked_state,
is_project_locked,
with_project_locked,
)
+from ._semaphore_decorator import with_limited_concurrency
from ._utils import handle_redis_returns_union_types
__all__: tuple[str, ...] = (
+ "PROJECT_DB_UPDATE_REDIS_LOCK_KEY",
+ "PROJECT_DOCUMENT_VERSION_KEY",
"CouldNotAcquireLockError",
"CouldNotConnectToRedisError",
- "exclusive",
- "get_project_locked_state",
- "handle_redis_returns_union_types",
- "is_project_locked",
"LockLostError",
"ProjectLockError",
"RedisClientSDK",
"RedisClientsManager",
"RedisManagerDBConfig",
+ "SemaphoreAcquisitionError",
+ "SemaphoreNotAcquiredError",
+ "exclusive",
+ "get_project_locked_state",
+ "handle_redis_returns_union_types",
+ "increment_and_return_project_document_version",
+ "is_project_locked",
+ "with_limited_concurrency",
"with_project_locked",
)
-
-# nopycln: file
diff --git a/packages/service-library/src/servicelib/redis/_client.py b/packages/service-library/src/servicelib/redis/_client.py
index c2a081541104..ee4e9a2040e0 100644
--- a/packages/service-library/src/servicelib/redis/_client.py
+++ b/packages/service-library/src/servicelib/redis/_client.py
@@ -8,24 +8,34 @@
import redis.asyncio as aioredis
import redis.exceptions
+import tenacity
+from common_library.async_tools import cancel_wait_task
from redis.asyncio.lock import Lock
from redis.asyncio.retry import Retry
from redis.backoff import ExponentialBackoff
-from ..async_utils import cancel_wait_task
from ..background_task import periodic
from ..logging_utils import log_catch, log_context
from ._constants import (
DEFAULT_DECODE_RESPONSES,
DEFAULT_HEALTH_CHECK_INTERVAL,
DEFAULT_LOCK_TTL,
- DEFAULT_SOCKET_TIMEOUT,
)
_logger = logging.getLogger(__name__)
-# SEE https://github.com/ITISFoundation/osparc-simcore/pull/7077
-_HEALTHCHECK_TASK_TIMEOUT_S: Final[float] = 3.0
+_HEALTHCHECK_TIMEOUT_S: Final[float] = 3.0
+
+
+@tenacity.retry(
+ wait=tenacity.wait_fixed(2),
+ stop=tenacity.stop_after_delay(20),
+ before_sleep=tenacity.before_sleep_log(_logger, logging.INFO),
+ reraise=True,
+)
+async def wait_till_redis_is_responsive(client: aioredis.Redis) -> None:
+ if not await client.ping():
+ raise tenacity.TryAgain
@dataclass
@@ -36,8 +46,9 @@ class RedisClientSDK:
health_check_interval: datetime.timedelta = DEFAULT_HEALTH_CHECK_INTERVAL
_client: aioredis.Redis = field(init=False)
- _health_check_task: Task | None = None
- _health_check_task_started_event: asyncio.Event | None = None
+ _task_health_check: Task | None = None
+ _started_event_task_health_check: asyncio.Event | None = None
+ _cancelled_event_task_health_check: asyncio.Event | None = None
_is_healthy: bool = False
@property
@@ -54,26 +65,32 @@ def __post_init__(self) -> None:
redis.exceptions.ConnectionError,
],
retry_on_timeout=True,
- socket_timeout=DEFAULT_SOCKET_TIMEOUT.total_seconds(),
+ socket_timeout=None, # NOTE: setting a timeout here can lead to issues with long running commands
encoding="utf-8",
decode_responses=self.decode_responses,
client_name=self.client_name,
)
- # NOTE: connection is done here already
self._is_healthy = False
- self._health_check_task_started_event = asyncio.Event()
+ self._started_event_task_health_check = asyncio.Event()
+ self._cancelled_event_task_health_check = asyncio.Event()
+ async def setup(self) -> None:
@periodic(interval=self.health_check_interval)
async def _periodic_check_health() -> None:
- assert self._health_check_task_started_event # nosec
- self._health_check_task_started_event.set()
+ assert self._started_event_task_health_check # nosec
+ assert self._cancelled_event_task_health_check # nosec
+ self._started_event_task_health_check.set()
self._is_healthy = await self.ping()
+ if self._cancelled_event_task_health_check.is_set():
+ raise asyncio.CancelledError
- self._health_check_task = asyncio.create_task(
+ self._task_health_check = asyncio.create_task(
_periodic_check_health(),
name=f"redis_service_health_check_{self.redis_dsn}__{uuid4()}",
)
+ await wait_till_redis_is_responsive(self._client)
+
_logger.info(
"Connection to %s succeeded with %s",
f"redis at {self.redis_dsn=}",
@@ -84,21 +101,21 @@ async def shutdown(self) -> None:
with log_context(
_logger, level=logging.DEBUG, msg=f"Shutdown RedisClientSDK {self}"
):
- if self._health_check_task:
- assert self._health_check_task_started_event # nosec
- # NOTE: wait for the health check task to have started once before we can cancel it
- await self._health_check_task_started_event.wait()
- await cancel_wait_task(
- self._health_check_task, max_delay=_HEALTHCHECK_TASK_TIMEOUT_S
- )
+ if self._task_health_check:
+ assert self._started_event_task_health_check # nosec
+ await self._started_event_task_health_check.wait()
+ assert self._cancelled_event_task_health_check # nosec
+ self._cancelled_event_task_health_check.set()
+ await cancel_wait_task(self._task_health_check, max_delay=None)
await self._client.aclose(close_connection_pool=True)
async def ping(self) -> bool:
with log_catch(_logger, reraise=False):
# NOTE: retry_* input parameters from aioredis.from_url do not apply for the ping call
- await self._client.ping()
+ await asyncio.wait_for(self._client.ping(), timeout=_HEALTHCHECK_TIMEOUT_S)
return True
+
return False
@property
diff --git a/packages/service-library/src/servicelib/redis/_clients_manager.py b/packages/service-library/src/servicelib/redis/_clients_manager.py
index 60b93360b88d..758977f8526b 100644
--- a/packages/service-library/src/servicelib/redis/_clients_manager.py
+++ b/packages/service-library/src/servicelib/redis/_clients_manager.py
@@ -27,6 +27,7 @@ async def setup(self) -> None:
health_check_interval=config.health_check_interval,
client_name=f"{self.client_name}",
)
+ await self._client_sdks[config.database].setup()
async def shutdown(self) -> None:
await asyncio.gather(
diff --git a/packages/service-library/src/servicelib/redis/_constants.py b/packages/service-library/src/servicelib/redis/_constants.py
index 6a10c6b75b0e..845e70d7fa8b 100644
--- a/packages/service-library/src/servicelib/redis/_constants.py
+++ b/packages/service-library/src/servicelib/redis/_constants.py
@@ -3,9 +3,16 @@
from pydantic import NonNegativeInt
+DEFAULT_EXPECTED_LOCK_OVERALL_TIME: Final[datetime.timedelta] = datetime.timedelta(
+ seconds=30
+)
DEFAULT_LOCK_TTL: Final[datetime.timedelta] = datetime.timedelta(seconds=10)
-DEFAULT_SOCKET_TIMEOUT: Final[datetime.timedelta] = datetime.timedelta(seconds=30)
+DEFAULT_SEMAPHORE_BLOCK_TIMEOUT: Final[datetime.timedelta] = datetime.timedelta(
+ seconds=30
+)
+DEFAULT_SEMAPHORE_TTL: Final[datetime.timedelta] = datetime.timedelta(seconds=10)
+SEMAPHORE_KEY_PREFIX: Final[str] = "semaphores:"
DEFAULT_DECODE_RESPONSES: Final[bool] = True
DEFAULT_HEALTH_CHECK_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5)
diff --git a/packages/service-library/src/servicelib/redis/_decorators.py b/packages/service-library/src/servicelib/redis/_decorators.py
index 6d686a33af59..63b1019ba656 100644
--- a/packages/service-library/src/servicelib/redis/_decorators.py
+++ b/packages/service-library/src/servicelib/redis/_decorators.py
@@ -1,5 +1,4 @@
import asyncio
-import contextlib
import functools
import logging
import socket
@@ -9,11 +8,13 @@
import arrow
import redis.exceptions
+from common_library.async_tools import cancel_wait_task
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
from redis.asyncio.lock import Lock
from ..background_task import periodic
from ._client import RedisClientSDK
-from ._constants import DEFAULT_LOCK_TTL
+from ._constants import DEFAULT_EXPECTED_LOCK_OVERALL_TIME, DEFAULT_LOCK_TTL
from ._errors import CouldNotAcquireLockError, LockLostError
from ._utils import auto_extend_lock
@@ -23,9 +24,9 @@
R = TypeVar("R")
_EXCLUSIVE_TASK_NAME: Final[str] = "exclusive/{module_name}.{func_name}"
-_EXCLUSIVE_AUTO_EXTEND_TASK_NAME: Final[
- str
-] = "exclusive/autoextend_lock_{redis_lock_key}"
+_EXCLUSIVE_AUTO_EXTEND_TASK_NAME: Final[str] = (
+ "exclusive/autoextend_lock_{redis_lock_key}"
+)
@periodic(interval=DEFAULT_LOCK_TTL / 2, raise_on_error=True)
@@ -94,6 +95,7 @@ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
):
raise CouldNotAcquireLockError(lock=lock)
+ lock_acquisition_time = arrow.utcnow()
try:
async with asyncio.TaskGroup() as tg:
started_event = asyncio.Event()
@@ -116,10 +118,12 @@ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
module_name=coro.__module__, func_name=coro.__name__
),
)
-
res = await work_task
- auto_extend_lock_task.cancel()
- return res
+ # cancel the auto-extend task (work is done)
+ # NOTE: if we do not explicitely await the task inside the context manager
+ # it sometimes hangs forever (Python issue?)
+ await cancel_wait_task(auto_extend_lock_task, max_delay=None)
+ return res
except BaseExceptionGroup as eg:
# Separate exceptions into LockLostError and others
@@ -134,10 +138,39 @@ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
assert len(lock_lost_errors.exceptions) == 1 # nosec
raise lock_lost_errors.exceptions[0] from eg
finally:
- with contextlib.suppress(redis.exceptions.LockNotOwnedError):
+ try:
# in the case where the lock would have been lost,
# this would raise again and is not necessary
await lock.release()
+ except redis.exceptions.LockNotOwnedError as exc:
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ f"Unexpected error while releasing lock '{redis_lock_key}'",
+ error=exc,
+ error_context={
+ "redis_lock_key": redis_lock_key,
+ "lock_value": lock_value,
+ "client_name": client.client_name,
+ "hostname": socket.gethostname(),
+ "coroutine": coro.__name__,
+ },
+ tip="This might happen if the lock was lost before releasing it. "
+ "Look for synchronous code that prevents refreshing the lock or asyncio loop overload.",
+ )
+ )
+ finally:
+ lock_release_time = arrow.utcnow()
+ locking_time = lock_release_time - lock_acquisition_time
+ if locking_time > DEFAULT_EXPECTED_LOCK_OVERALL_TIME:
+ _logger.warning(
+ "Lock `%s' for %s was held for %s which is longer than the expected (%s). "
+ "TIP: consider reducing the locking time by optimizing the code inside "
+ "the critical section or increasing the default locking time",
+ redis_lock_key,
+ coro.__name__,
+ locking_time,
+ DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+ )
return _wrapper
diff --git a/packages/service-library/src/servicelib/redis/_errors.py b/packages/service-library/src/servicelib/redis/_errors.py
index 7fc3c7823ae0..2d09a3730869 100644
--- a/packages/service-library/src/servicelib/redis/_errors.py
+++ b/packages/service-library/src/servicelib/redis/_errors.py
@@ -4,8 +4,7 @@
from common_library.errors_classes import OsparcErrorMixin
-class BaseRedisError(OsparcErrorMixin, RuntimeError):
- ...
+class BaseRedisError(OsparcErrorMixin, RuntimeError): ...
class CouldNotAcquireLockError(BaseRedisError):
@@ -25,3 +24,25 @@ class LockLostError(BaseRedisError):
ProjectLockError: TypeAlias = redis.exceptions.LockError # NOTE: backwards compatible
+
+
+class SemaphoreError(BaseRedisError):
+ msg_template: str = (
+ "Unexpected error with semaphore '{name}' by this instance `{instance_id}`"
+ )
+
+
+class SemaphoreAcquisitionError(SemaphoreError):
+ msg_template: str = (
+ "Could not acquire semaphore '{name}' by this instance `{instance_id}`"
+ )
+
+
+class SemaphoreNotAcquiredError(SemaphoreError):
+ msg_template: str = (
+ "Semaphore '{name}' was not acquired by this instance `{instance_id}`"
+ )
+
+
+class SemaphoreLostError(SemaphoreError):
+ msg_template: str = "Semaphore '{name}' was lost by this instance `{instance_id}`"
diff --git a/packages/service-library/src/servicelib/redis/_project_document_version.py b/packages/service-library/src/servicelib/redis/_project_document_version.py
new file mode 100644
index 000000000000..7193adb8ca79
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/_project_document_version.py
@@ -0,0 +1,39 @@
+"""Project document versioning utilities.
+
+This module provides utilities for managing project document versions using Redis.
+The versioning system ensures that all users working on a project are synchronized
+with the latest changes through atomic version incrementing.
+"""
+
+from typing import Final
+
+from models_library.projects import ProjectID
+
+from ._client import RedisClientSDK
+
+# Redis key patterns
+PROJECT_DOCUMENT_VERSION_KEY: Final[str] = "projects:{}:version"
+PROJECT_DB_UPDATE_REDIS_LOCK_KEY: Final[str] = "project_db_update:{}"
+
+
+async def increment_and_return_project_document_version(
+ redis_client: RedisClientSDK, project_uuid: ProjectID
+) -> int:
+ """
+ Atomically increments and returns the project document version using Redis.
+ Returns the incremented version number.
+
+ This function ensures thread-safe version incrementing by using Redis INCR command
+ which is atomic. The version starts at 1 for the first call.
+
+ Args:
+ redis_client: The Redis client SDK instance
+ project_uuid: The project UUID to get/increment version for
+
+ Returns:
+ The new (incremented) version number
+ """
+ version_key = PROJECT_DOCUMENT_VERSION_KEY.format(project_uuid)
+ # If key doesn't exist, it's created with value 0 and then incremented to 1
+ output = await redis_client.redis.incr(version_key)
+ return int(output)
diff --git a/packages/service-library/src/servicelib/redis/_project_lock.py b/packages/service-library/src/servicelib/redis/_project_lock.py
index d618d88c58f0..3ea3cc2bcf19 100644
--- a/packages/service-library/src/servicelib/redis/_project_lock.py
+++ b/packages/service-library/src/servicelib/redis/_project_lock.py
@@ -6,8 +6,8 @@
from models_library.projects import ProjectID
from models_library.projects_access import Owner
from models_library.projects_state import ProjectLocked, ProjectStatus
-from servicelib.logging_utils import log_catch
+from ..logging_utils import log_catch
from ._client import RedisClientSDK
from ._decorators import exclusive
from ._errors import CouldNotAcquireLockError, ProjectLockError
diff --git a/packages/service-library/src/servicelib/redis/_semaphore.py b/packages/service-library/src/servicelib/redis/_semaphore.py
new file mode 100644
index 000000000000..a5ec957275f9
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/_semaphore.py
@@ -0,0 +1,549 @@
+import asyncio
+import contextlib
+import datetime
+import logging
+import socket
+import uuid
+from collections.abc import AsyncIterator
+from typing import Annotated, ClassVar
+
+import arrow
+import redis.exceptions
+from common_library.async_tools import cancel_wait_task
+from common_library.basic_types import DEFAULT_FACTORY
+from common_library.logging.logging_errors import create_troubleshooting_log_kwargs
+from pydantic import (
+ BaseModel,
+ Field,
+ PositiveInt,
+ computed_field,
+ field_validator,
+)
+from redis.commands.core import AsyncScript
+from tenacity import (
+ retry,
+ retry_if_exception_type,
+ wait_random_exponential,
+)
+
+from ..background_task import periodic
+from ._client import RedisClientSDK
+from ._constants import (
+ DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+ DEFAULT_SEMAPHORE_TTL,
+ SEMAPHORE_KEY_PREFIX,
+)
+from ._errors import (
+ SemaphoreAcquisitionError,
+ SemaphoreError,
+ SemaphoreLostError,
+ SemaphoreNotAcquiredError,
+)
+from ._semaphore_lua import (
+ ACQUIRE_SEMAPHORE_SCRIPT,
+ REGISTER_SEMAPHORE_TOKEN_SCRIPT,
+ RELEASE_SEMAPHORE_SCRIPT,
+ RENEW_SEMAPHORE_SCRIPT,
+ SCRIPT_BAD_EXIT_CODE,
+ SCRIPT_OK_EXIT_CODE,
+)
+from ._utils import handle_redis_returns_union_types
+
+_logger = logging.getLogger(__name__)
+
+
+class DistributedSemaphore(BaseModel):
+ """
+ Warning: This should only be used directly via the decorator
+
+ A distributed semaphore implementation using Redis.
+
+ This semaphore allows limiting the number of concurrent operations across
+ multiple processes/instances using Redis as the coordination backend.
+
+ Args:
+ redis_client: Redis client for coordination
+ key: Unique identifier for the semaphore
+ capacity: Maximum number of concurrent holders
+ ttl: Time-to-live for semaphore entries (auto-cleanup)
+ blocking: Whether acquire() should block until available
+ blocking_timeout: Maximum time to wait when blocking (None = no timeout)
+
+ Example:
+ async with DistributedSemaphore(
+ redis_client, "my_resource", capacity=3
+ ):
+ # Only 3 instances can execute this block concurrently
+ await do_limited_work()
+ """
+
+ model_config = {
+ "arbitrary_types_allowed": True, # For RedisClientSDK
+ }
+
+ # Configuration fields with validation
+ redis_client: RedisClientSDK
+ key: Annotated[
+ str, Field(min_length=1, description="Unique identifier for the semaphore")
+ ]
+ capacity: Annotated[
+ PositiveInt, Field(description="Maximum number of concurrent holders")
+ ]
+ ttl: datetime.timedelta = DEFAULT_SEMAPHORE_TTL
+ blocking: Annotated[
+ bool, Field(description="Whether acquire() should block until available")
+ ] = True
+ blocking_timeout: Annotated[
+ datetime.timedelta | None,
+ Field(description="Maximum time to wait when blocking"),
+ ] = None
+ instance_id: Annotated[
+ str,
+ Field(
+ description="Unique instance identifier",
+ default_factory=lambda: f"{uuid.uuid4()}",
+ ),
+ ] = DEFAULT_FACTORY
+
+ # Class and/or Private state attributes (not part of the model)
+ register_semaphore: ClassVar[AsyncScript | None] = None
+ acquire_script: ClassVar[AsyncScript | None] = None
+ release_script: ClassVar[AsyncScript | None] = None
+ renew_script: ClassVar[AsyncScript | None] = None
+
+ _token: str | None = None # currently held token, if any
+
+ @classmethod
+ def _register_scripts(cls, redis_client: RedisClientSDK) -> None:
+ """Register Lua scripts with Redis if not already done.
+ This is done once per class, not per instance. Internally the Redis client
+ caches the script SHA, so this is efficient. Even if called multiple times,
+ the script is only registered once."""
+ if cls.acquire_script is None:
+ cls.register_semaphore = redis_client.redis.register_script(
+ REGISTER_SEMAPHORE_TOKEN_SCRIPT
+ )
+ cls.acquire_script = redis_client.redis.register_script(
+ ACQUIRE_SEMAPHORE_SCRIPT
+ )
+ cls.release_script = redis_client.redis.register_script(
+ RELEASE_SEMAPHORE_SCRIPT
+ )
+ cls.renew_script = redis_client.redis.register_script(
+ RENEW_SEMAPHORE_SCRIPT
+ )
+
+ def __init__(self, **data) -> None:
+ super().__init__(**data)
+ self.__class__._register_scripts(self.redis_client) # noqa: SLF001
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def semaphore_key(self) -> str:
+ """Redis key for the semaphore sorted set."""
+ return f"{SEMAPHORE_KEY_PREFIX}{self.key}_cap{self.capacity}"
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def tokens_key(self) -> str:
+ """Redis key for the token pool LIST."""
+ return f"{self.semaphore_key}:tokens"
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def holders_set(self) -> str:
+ """Redis key for the holders SET."""
+ return f"{self.semaphore_key}:holders_set"
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def holder_key(self) -> str:
+ """Redis key for this instance's holder entry."""
+ return f"{self.semaphore_key}:holders:{self.instance_id}"
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def holders_set_ttl(self) -> datetime.timedelta:
+ """TTL for the holders SET"""
+ return self.ttl * 5
+
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def tokens_set_ttl(self) -> datetime.timedelta:
+ """TTL for the tokens SET"""
+ return self.ttl * 5
+
+ @field_validator("ttl")
+ @classmethod
+ def validate_ttl(cls, v: datetime.timedelta) -> datetime.timedelta:
+ if v.total_seconds() < 1:
+ msg = "TTL must be positive"
+ raise ValueError(msg)
+ return v
+
+ @field_validator("blocking_timeout")
+ @classmethod
+ def validate_timeout(
+ cls, v: datetime.timedelta | None
+ ) -> datetime.timedelta | None:
+ if v is not None and v.total_seconds() <= 0:
+ msg = "Timeout must be positive"
+ raise ValueError(msg)
+ return v
+
+ async def _ensure_semaphore_initialized(self) -> None:
+ """Initializes the semaphore in Redis if not already done."""
+ assert self.register_semaphore is not None # nosec
+ result = await self.register_semaphore( # pylint: disable=not-callable
+ keys=[self.tokens_key, self.holders_set],
+ args=[self.capacity, self.holders_set_ttl.total_seconds()],
+ client=self.redis_client.redis,
+ )
+ assert isinstance(result, list) # nosec
+ exit_code, status = result
+ assert exit_code == SCRIPT_OK_EXIT_CODE # nosec
+ _logger.debug("Semaphore '%s' init status: %s", self.key, status)
+
+ async def _blocking_acquire(self) -> str | None:
+ @retry(
+ wait=wait_random_exponential(min=0.1, max=0.5),
+ retry=retry_if_exception_type(redis.exceptions.TimeoutError),
+ )
+ async def _acquire_forever_on_socket_timeout() -> list[str] | None:
+ # NOTE: brpop returns None on timeout
+
+ tokens_key_token: list[str] | None = await handle_redis_returns_union_types(
+ self.redis_client.redis.brpop(
+ [self.tokens_key],
+ timeout=None, # NOTE: we always block forever since tenacity takes care of timing out
+ )
+ )
+ return tokens_key_token
+
+ try:
+ # NOTE: redis-py library timeouts when the defined socket timeout triggers
+ # The BRPOP command itself could timeout but the redis-py socket timeout defeats the purpose
+ # so we always block forever on BRPOP, tenacity takes care of retrying when a socket timeout happens
+ # and we use asyncio.timeout to enforce the blocking_timeout if defined
+ async with asyncio.timeout(
+ self.blocking_timeout.total_seconds() if self.blocking_timeout else None
+ ):
+ tokens_key_token = await _acquire_forever_on_socket_timeout()
+ assert tokens_key_token is not None # nosec
+ assert len(tokens_key_token) == 2 # nosec # noqa: PLR2004
+ assert tokens_key_token[0] == self.tokens_key # nosec
+ return tokens_key_token[1]
+ except TimeoutError as e:
+ raise SemaphoreAcquisitionError(
+ name=self.key, instance_id=self.instance_id
+ ) from e
+
+ async def _non_blocking_acquire(self) -> str | None:
+ token: str | list[str] | None = await handle_redis_returns_union_types(
+ self.redis_client.redis.rpop(self.tokens_key)
+ )
+ if token is None:
+ _logger.debug(
+ "Semaphore '%s' not acquired (no tokens available) (instance: %s)",
+ self.key,
+ self.instance_id,
+ )
+ return None
+
+ assert isinstance(token, str) # nosec
+ return token
+
+ async def acquire(self) -> bool:
+ """
+ Acquire the semaphore.
+
+ Returns:
+ True if acquired successfully, False if not acquired and non-blocking
+
+ Raises:
+ SemaphoreAcquisitionError: If acquisition fails and blocking=True
+ """
+ await self._ensure_semaphore_initialized()
+
+ if await self.is_acquired():
+ _logger.debug(
+ "Semaphore '%s' already acquired by this instance (instance: %s)",
+ self.key,
+ self.instance_id,
+ )
+ return True
+
+ if self.blocking is False:
+ self._token = await self._non_blocking_acquire()
+ if not self._token:
+ return False
+ else:
+ self._token = await self._blocking_acquire()
+
+ assert self._token is not None # nosec
+ # set up the semaphore holder with a TTL
+ assert self.acquire_script is not None # nosec
+ result = await self.acquire_script( # pylint: disable=not-callable
+ keys=[self.holders_set, self.holder_key],
+ args=[
+ self._token,
+ self.instance_id,
+ self.ttl.total_seconds(),
+ self.holders_set_ttl.total_seconds(),
+ ],
+ client=self.redis_client.redis,
+ )
+
+ # Lua script returns: [exit_code, status, current_count, expired_count]
+ assert isinstance(result, list) # nosec
+ exit_code, status, token, current_count = result
+
+ assert exit_code == SCRIPT_OK_EXIT_CODE # nosec
+ assert status == "acquired" # nosec
+
+ _logger.debug(
+ "Acquired semaphore '%s' with token %s (instance: %s, count: %s)",
+ self.key,
+ token,
+ self.instance_id,
+ current_count,
+ )
+ return True
+
+ async def release(self) -> None:
+ """
+ Release the semaphore
+
+ Raises:
+ SemaphoreNotAcquiredError: If semaphore was not acquired by this instance
+ """
+
+ # Execute the release Lua script atomically
+ assert self.release_script is not None # nosec
+ release_args = [self.instance_id]
+ if self._token is not None:
+ release_args.append(self._token)
+ result = await self.release_script( # pylint: disable=not-callable
+ keys=[self.tokens_key, self.holders_set, self.holder_key],
+ args=release_args,
+ client=self.redis_client.redis,
+ )
+ self._token = None
+
+ assert isinstance(result, list) # nosec
+ exit_code, status, current_count = result
+ if exit_code == SCRIPT_OK_EXIT_CODE:
+ assert status == "released" # nosec
+ _logger.debug(
+ "Released semaphore '%s' (instance: %s, count: %s)",
+ self.key,
+ self.instance_id,
+ current_count,
+ )
+ return
+
+ # Instance was already expired or not acquired
+ assert exit_code == SCRIPT_BAD_EXIT_CODE # nosec
+ _logger.error(
+ "Failed to release semaphore '%s' - %s (instance: %s, count: %s)",
+ self.key,
+ status,
+ self.instance_id,
+ current_count,
+ )
+ if status == "not_held":
+ raise SemaphoreNotAcquiredError(name=self.key, instance_id=self.instance_id)
+ assert status == "expired" # nosec
+ raise SemaphoreLostError(name=self.key, instance_id=self.instance_id)
+
+ async def reacquire(self) -> None:
+ """
+ Re-acquire a semaphore
+ This function is intended to be called by decorators or external renewal mechanisms.
+
+
+ Raises:
+ SemaphoreLostError: If the semaphore was lost or expired
+ """
+
+ ttl_seconds = self.ttl.total_seconds()
+
+ # Execute the renewal Lua script atomically
+ assert self.renew_script is not None # nosec
+ result = await self.renew_script( # pylint: disable=not-callable
+ keys=[self.holders_set, self.holder_key, self.tokens_key],
+ args=[
+ self.instance_id,
+ ttl_seconds,
+ self.holders_set_ttl.total_seconds(),
+ self.tokens_set_ttl.total_seconds(),
+ ],
+ client=self.redis_client.redis,
+ )
+
+ assert isinstance(result, list) # nosec
+ exit_code, status, current_count = result
+
+ if exit_code == SCRIPT_OK_EXIT_CODE:
+ assert status == "renewed" # nosec
+ _logger.debug(
+ "Renewed semaphore '%s' (instance: %s, count: %s)",
+ self.key,
+ self.instance_id,
+ current_count,
+ )
+ return
+ assert exit_code == SCRIPT_BAD_EXIT_CODE # nosec
+
+ _logger.warning(
+ "Semaphore '%s' holder key was lost (instance: %s, status: %s, count: %s)",
+ self.key,
+ self.instance_id,
+ status,
+ current_count,
+ )
+ if status == "not_held":
+ raise SemaphoreNotAcquiredError(name=self.key, instance_id=self.instance_id)
+ assert status == "expired" # nosec
+ raise SemaphoreLostError(name=self.key, instance_id=self.instance_id)
+
+ async def is_acquired(self) -> bool:
+ """Check if the semaphore is currently acquired by this instance."""
+ return bool(
+ await handle_redis_returns_union_types(
+ self.redis_client.redis.exists(self.holder_key)
+ )
+ == 1
+ )
+
+ async def current_count(self) -> int:
+ """Get the current number of semaphore holders"""
+ return await handle_redis_returns_union_types(
+ self.redis_client.redis.scard(self.holders_set)
+ )
+
+ async def available_tokens(self) -> int:
+ """Get the size of the semaphore (number of available tokens)"""
+ await self._ensure_semaphore_initialized()
+ return await handle_redis_returns_union_types(
+ self.redis_client.redis.llen(self.tokens_key)
+ )
+
+
+@contextlib.asynccontextmanager
+async def distributed_semaphore( # noqa: C901
+ redis_client: RedisClientSDK,
+ *,
+ key: str,
+ capacity: PositiveInt,
+ ttl: datetime.timedelta = DEFAULT_SEMAPHORE_TTL,
+ blocking: bool = True,
+ blocking_timeout: datetime.timedelta | None = None,
+ expected_lock_overall_time: datetime.timedelta = DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+) -> AsyncIterator[DistributedSemaphore]:
+ """
+ Async context manager for DistributedSemaphore.
+
+ Example:
+ async with distributed_semaphore(redis_client, "my_resource", capacity=3) as sem:
+ # Only 3 instances can execute this block concurrently
+ await do_limited_work()
+ """
+ semaphore = DistributedSemaphore(
+ redis_client=redis_client,
+ key=key,
+ capacity=capacity,
+ ttl=ttl,
+ blocking=blocking,
+ blocking_timeout=blocking_timeout,
+ )
+
+ @periodic(interval=semaphore.ttl / 3, raise_on_error=True)
+ async def _periodic_reacquisition(
+ semaphore: DistributedSemaphore,
+ started: asyncio.Event,
+ cancellation_event: asyncio.Event,
+ ) -> None:
+ if cancellation_event.is_set():
+ raise asyncio.CancelledError
+ if not started.is_set():
+ started.set()
+ await semaphore.reacquire()
+
+ lock_acquisition_time = None
+ try:
+ if not await semaphore.acquire():
+ raise SemaphoreAcquisitionError(name=key, instance_id=semaphore.instance_id)
+
+ lock_acquisition_time = arrow.utcnow()
+
+ async with (
+ asyncio.TaskGroup() as tg
+ ): # NOTE: using task group ensures proper cancellation propagation of parent task
+ auto_reacquisition_started = asyncio.Event()
+ cancellation_event = asyncio.Event()
+ auto_reacquisition_task = tg.create_task(
+ _periodic_reacquisition(
+ semaphore, auto_reacquisition_started, cancellation_event
+ ),
+ name=f"semaphore/auto_reacquisition_task_{semaphore.key}_{semaphore.instance_id}",
+ )
+ await auto_reacquisition_started.wait()
+ try:
+ # NOTE: this try/finally ensures that cancellation_event is set when we exit the context
+ # even in case of exceptions
+ yield semaphore
+ finally:
+ cancellation_event.set() # NOTE: this ensure cancellation is effective
+ await cancel_wait_task(auto_reacquisition_task)
+ except BaseExceptionGroup as eg:
+ semaphore_errors, other_errors = eg.split(SemaphoreError)
+ if other_errors:
+ assert len(other_errors.exceptions) == 1 # nosec
+ raise other_errors.exceptions[0] from eg
+ assert semaphore_errors is not None # nosec
+ assert len(semaphore_errors.exceptions) == 1 # nosec
+ raise semaphore_errors.exceptions[0] from eg
+ finally:
+ try:
+ await semaphore.release()
+ except SemaphoreNotAcquiredError as exc:
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ f"Unexpected error while releasing semaphore '{semaphore.key}'",
+ error=exc,
+ error_context={
+ "semaphore_key": semaphore.key,
+ "semaphore_instance_id": semaphore.instance_id,
+ "hostname": socket.gethostname(),
+ },
+ tip="This indicates a logic error in the code using the semaphore",
+ )
+ )
+ except SemaphoreLostError as exc:
+ _logger.exception(
+ **create_troubleshooting_log_kwargs(
+ f"Unexpected error while releasing semaphore '{semaphore.key}'",
+ error=exc,
+ error_context={
+ "semaphore_key": semaphore.key,
+ "semaphore_instance_id": semaphore.instance_id,
+ "hostname": socket.gethostname(),
+ },
+ tip="This indicates that the semaphore was lost or expired before release. "
+ "Look for synchronouse code or the loop is very busy and cannot schedule the reacquisition task.",
+ )
+ )
+ if lock_acquisition_time is not None:
+ lock_release_time = arrow.utcnow()
+ locking_time = lock_release_time - lock_acquisition_time
+ if locking_time > expected_lock_overall_time:
+ _logger.warning(
+ "Semaphore '%s' was held for %s by %s which is longer than expected (%s). "
+ "TIP: consider reducing the locking time by optimizing the code inside "
+ "the critical section or increasing the default locking time",
+ semaphore.key,
+ locking_time,
+ semaphore.instance_id,
+ expected_lock_overall_time,
+ )
diff --git a/packages/service-library/src/servicelib/redis/_semaphore_decorator.py b/packages/service-library/src/servicelib/redis/_semaphore_decorator.py
new file mode 100644
index 000000000000..72e7fd9d309e
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/_semaphore_decorator.py
@@ -0,0 +1,184 @@
+import datetime
+import functools
+import logging
+from collections.abc import AsyncIterator, Callable, Coroutine
+from contextlib import AbstractAsyncContextManager, asynccontextmanager
+from typing import Any, ParamSpec, TypeVar
+
+from ._client import RedisClientSDK
+from ._constants import (
+ DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+ DEFAULT_SEMAPHORE_TTL,
+)
+from ._semaphore import distributed_semaphore
+
+_logger = logging.getLogger(__name__)
+
+
+P = ParamSpec("P")
+R = TypeVar("R")
+
+
+def with_limited_concurrency(
+ redis_client: RedisClientSDK | Callable[..., RedisClientSDK],
+ *,
+ key: str | Callable[..., str],
+ capacity: int | Callable[..., int],
+ ttl: datetime.timedelta = DEFAULT_SEMAPHORE_TTL,
+ blocking: bool = True,
+ blocking_timeout: datetime.timedelta | None = None,
+ expected_lock_overall_time: datetime.timedelta = DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+) -> Callable[
+ [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]
+]:
+ """
+ Decorator to limit concurrent execution of a function using a distributed semaphore.
+
+ This decorator ensures that only a specified number of instances of the decorated
+ function can run concurrently across multiple processes/instances using Redis
+ as the coordination backend.
+
+ Args:
+ redis_client: Redis client for coordination (can be callable)
+ key: Unique identifier for the semaphore (can be callable)
+ capacity: Maximum number of concurrent executions (can be callable)
+ ttl: Time-to-live for semaphore entries (default: 5 minutes)
+ blocking: Whether to block when semaphore is full (default: True)
+ blocking_timeout: Maximum time to wait when blocking (default: socket timeout)
+ expected_lock_overall_time: helper for logging warnings if lock is held longer than expected
+
+ Example:
+ @with_limited_concurrency(
+ redis_client,
+ key=f"{user_id}-{wallet_id}",
+ capacity=20,
+ blocking=True,
+ blocking_timeout=None
+ )
+ async def process_user_wallet(user_id: str, wallet_id: str):
+ # Only 20 instances of this function can run concurrently
+ # for the same user_id-wallet_id combination
+ await do_processing()
+
+ Raises:
+ SemaphoreAcquisitionError: If semaphore cannot be acquired and blocking=True
+ """
+
+ def _decorator(
+ coro: Callable[P, Coroutine[Any, Any, R]],
+ ) -> Callable[P, Coroutine[Any, Any, R]]:
+ @functools.wraps(coro)
+ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ semaphore_key = key(*args, **kwargs) if callable(key) else key
+ semaphore_capacity = (
+ capacity(*args, **kwargs) if callable(capacity) else capacity
+ )
+ client = (
+ redis_client(*args, **kwargs)
+ if callable(redis_client)
+ else redis_client
+ )
+
+ assert isinstance(semaphore_key, str) # nosec
+ assert isinstance(semaphore_capacity, int) # nosec
+ assert isinstance(client, RedisClientSDK) # nosec
+
+ async with distributed_semaphore(
+ redis_client=client,
+ key=semaphore_key,
+ capacity=semaphore_capacity,
+ ttl=ttl,
+ blocking=blocking,
+ blocking_timeout=blocking_timeout,
+ expected_lock_overall_time=expected_lock_overall_time,
+ ):
+ return await coro(*args, **kwargs)
+
+ return _wrapper
+
+ return _decorator
+
+
+def with_limited_concurrency_cm(
+ redis_client: RedisClientSDK | Callable[..., RedisClientSDK],
+ *,
+ key: str | Callable[..., str],
+ capacity: int | Callable[..., int],
+ ttl: datetime.timedelta = DEFAULT_SEMAPHORE_TTL,
+ blocking: bool = True,
+ blocking_timeout: datetime.timedelta | None = None,
+ expected_lock_overall_time: datetime.timedelta = DEFAULT_EXPECTED_LOCK_OVERALL_TIME,
+) -> Callable[
+ [Callable[P, AbstractAsyncContextManager[R]]],
+ Callable[P, AbstractAsyncContextManager[R]],
+]:
+ """
+ Decorator to limit concurrent execution of async context managers using a distributed semaphore.
+
+ This decorator ensures that only a specified number of instances of the decorated
+ async context manager can be active concurrently across multiple processes/instances
+ using Redis as the coordination backend.
+
+ Args:
+ redis_client: Redis client for coordination (can be callable)
+ key: Unique identifier for the semaphore (can be callable)
+ capacity: Maximum number of concurrent executions (can be callable)
+ ttl: Time-to-live for semaphore entries (default: 5 minutes)
+ blocking: Whether to block when semaphore is full (default: True)
+ blocking_timeout: Maximum time to wait when blocking (default: socket timeout)
+ expected_lock_overall_time: helper for logging warnings if lock is held longer than expected
+
+ Example:
+ @asynccontextmanager
+ @with_limited_concurrency_cm(
+ redis_client,
+ key="cluster:my-cluster",
+ capacity=5,
+ blocking=True,
+ blocking_timeout=None
+ )
+ async def get_cluster_client():
+ async with pool.acquire() as client:
+ yield client
+
+ Raises:
+ SemaphoreAcquisitionError: If semaphore cannot be acquired and blocking=True
+ """
+
+ def _decorator(
+ cm_func: Callable[P, AbstractAsyncContextManager[R]],
+ ) -> Callable[P, AbstractAsyncContextManager[R]]:
+ @functools.wraps(cm_func)
+ @asynccontextmanager
+ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> AsyncIterator[R]:
+ semaphore_key = key(*args, **kwargs) if callable(key) else key
+ semaphore_capacity = (
+ capacity(*args, **kwargs) if callable(capacity) else capacity
+ )
+ client = (
+ redis_client(*args, **kwargs)
+ if callable(redis_client)
+ else redis_client
+ )
+
+ assert isinstance(semaphore_key, str) # nosec
+ assert isinstance(semaphore_capacity, int) # nosec
+ assert isinstance(client, RedisClientSDK) # nosec
+
+ async with (
+ distributed_semaphore(
+ redis_client=client,
+ key=semaphore_key,
+ capacity=semaphore_capacity,
+ ttl=ttl,
+ blocking=blocking,
+ blocking_timeout=blocking_timeout,
+ expected_lock_overall_time=expected_lock_overall_time,
+ ),
+ cm_func(*args, **kwargs) as value,
+ ):
+ yield value
+
+ return _wrapper
+
+ return _decorator
diff --git a/packages/service-library/src/servicelib/redis/_semaphore_lua.py b/packages/service-library/src/servicelib/redis/_semaphore_lua.py
new file mode 100644
index 000000000000..71f29fa88817
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/_semaphore_lua.py
@@ -0,0 +1,38 @@
+"""used to load a lua script from the package resources in memory
+
+Example:
+ >>> from servicelib.redis._semaphore_lua import ACQUIRE_SEMAPHORE_SCRIPT
+ # This will register the script in redis and return a Script object
+ # which can be used to execute the script. Even from multiple processes
+ # the script will be loaded only once in redis as the redis server computes
+ # the SHA1 of the script and uses it to identify it.
+ >>> from aioredis import Redis
+ >>> redis = Redis(...)
+ >>> my_acquire_script = redis.register_script(
+ ACQUIRE_SEMAPHORE_SCRIPT
+ >>> my_acquire_script(keys=[...], args=[...])
+"""
+
+from functools import lru_cache
+from importlib import resources
+from typing import Final
+
+
+@lru_cache
+def _load_script(script_name: str) -> str:
+ with resources.as_file(
+ resources.files("servicelib.redis.lua") / f"{script_name}.lua"
+ ) as script_file:
+ return script_file.read_text(encoding="utf-8").strip()
+
+
+# fair semaphore scripts (token pool based)
+REGISTER_SEMAPHORE_TOKEN_SCRIPT: Final[str] = _load_script("register_semaphore_tokens")
+ACQUIRE_SEMAPHORE_SCRIPT: Final[str] = _load_script("acquire_semaphore")
+RELEASE_SEMAPHORE_SCRIPT: Final[str] = _load_script("release_semaphore")
+CLEANUP_SEMAPHORE_SCRIPT: Final[str] = _load_script("cleanup_semaphore")
+RENEW_SEMAPHORE_SCRIPT: Final[str] = _load_script("renew_semaphore")
+
+
+SCRIPT_OK_EXIT_CODE: Final[int] = 0
+SCRIPT_BAD_EXIT_CODE: Final[int] = 255
diff --git a/packages/service-library/src/servicelib/redis/_utils.py b/packages/service-library/src/servicelib/redis/_utils.py
index 52d112ca4fee..cf695afef6aa 100644
--- a/packages/service-library/src/servicelib/redis/_utils.py
+++ b/packages/service-library/src/servicelib/redis/_utils.py
@@ -1,6 +1,6 @@
import logging
from collections.abc import Awaitable
-from typing import Any
+from typing import ParamSpec, TypeVar
import redis.exceptions
from redis.asyncio.lock import Lock
@@ -28,7 +28,11 @@ async def auto_extend_lock(lock: Lock) -> None:
raise LockLostError(lock=lock) from exc
-async def handle_redis_returns_union_types(result: Any | Awaitable[Any]) -> Any:
+P = ParamSpec("P")
+R = TypeVar("R")
+
+
+async def handle_redis_returns_union_types(result: R | Awaitable[R]) -> R:
"""Used to handle mypy issues with redis 5.x return types"""
if isinstance(result, Awaitable):
return await result
diff --git a/packages/service-library/src/servicelib/redis/lua/acquire_semaphore.lua b/packages/service-library/src/servicelib/redis/lua/acquire_semaphore.lua
new file mode 100644
index 000000000000..396a2ec34df4
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/lua/acquire_semaphore.lua
@@ -0,0 +1,33 @@
+-- Fair distributed semaphore using token pool (BRPOP-based)
+-- KEYS[1]: holders_key (SET of current holder instance IDs)
+-- KEYS[2]: holder_key (individual holder TTL key for this instance)
+
+-- ARGV[1]: token (the token received from BRPOP)
+-- ARGV[2]: instance_id (the instance trying to acquire the semaphore)
+-- ARGV[3]: ttl_seconds (for the holder_key)
+-- ARGV[4]: holders_set_ttl_seconds (to set expiry on holders set)
+--
+-- Returns: {exit_code, status, token, current_count}
+-- exit_code: 0 if acquired
+-- status: 'acquired'
+
+local holders_key = KEYS[1]
+local holder_key = KEYS[2]
+
+local token = ARGV[1]
+local instance_id = ARGV[2]
+local ttl_seconds = tonumber(ARGV[3])
+local holders_set_ttl_seconds = tonumber(ARGV[4])
+
+
+
+-- Step 1: Register as holder
+redis.call('SADD', holders_key, instance_id)
+redis.call('SETEX', holder_key, ttl_seconds, token)
+
+-- Step 2: Set expiry on holders set to prevent infinite growth
+redis.call('EXPIRE', holders_key, holders_set_ttl_seconds)
+
+local current_count = redis.call('SCARD', holders_key)
+
+return {0, 'acquired', token, current_count}
diff --git a/packages/service-library/src/servicelib/redis/lua/cleanup_semaphore.lua b/packages/service-library/src/servicelib/redis/lua/cleanup_semaphore.lua
new file mode 100644
index 000000000000..34a3b87dd28d
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/lua/cleanup_semaphore.lua
@@ -0,0 +1,56 @@
+-- Cleanup orphaned tokens from crashed clients
+-- KEYS[1]: tokens_key (LIST of available tokens)
+-- KEYS[2]: holders_key (SET of current holders)
+-- KEYS[3]: holder_prefix (prefix for holder keys, e.g. "semaphores:holders:key:")
+-- ARGV[1]: capacity (total semaphore capacity)
+--
+-- Returns: {recovered_tokens, missing_tokens, excess_tokens}
+-- This script should be run periodically to recover tokens from crashed clients
+
+local tokens_key = KEYS[1]
+local holders_key = KEYS[2]
+local holder_prefix = KEYS[3]
+
+local capacity = tonumber(ARGV[1])
+
+-- Step 1: Get all current holders
+local current_holders = redis.call('SMEMBERS', holders_key)
+local recovered_tokens = 0
+local cleaned_holders = {}
+
+-- Step 2: Check each holder to see if their TTL key still exists
+for i = 1, #current_holders do
+ local holder_id = current_holders[i]
+ local holder_key = holder_prefix .. holder_id
+ local exists = redis.call('EXISTS', holder_key)
+
+ if exists == 0 then
+ -- Holder key doesn't exist but holder is in SET
+ -- This indicates a crashed client - clean up and recover token
+ redis.call('SREM', holders_key, holder_id)
+ redis.call('LPUSH', tokens_key, 'token_recovered_' .. holder_id)
+ recovered_tokens = recovered_tokens + 1
+ table.insert(cleaned_holders, holder_id)
+ end
+end
+
+-- Step 3: Ensure we have the correct total number of tokens
+local remaining_holders = redis.call('SCARD', holders_key)
+local available_tokens_count = redis.call('LLEN', tokens_key)
+local total_tokens = remaining_holders + available_tokens_count
+
+-- If we're missing tokens (due to crashes or Redis issues), add them back
+local missing_tokens = capacity - total_tokens
+for i = 1, missing_tokens do
+ redis.call('LPUSH', tokens_key, 'token_missing_' .. i)
+ recovered_tokens = recovered_tokens + 1
+end
+
+-- If we somehow have too many tokens (shouldn't happen), remove extras
+local excess_tokens = total_tokens - capacity
+for i = 1, excess_tokens do
+ redis.call('RPOP', tokens_key)
+end
+
+
+return {recovered_tokens, missing_tokens, excess_tokens}
diff --git a/packages/service-library/src/servicelib/redis/lua/register_semaphore_tokens.lua b/packages/service-library/src/servicelib/redis/lua/register_semaphore_tokens.lua
new file mode 100644
index 000000000000..36b2c769ef25
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/lua/register_semaphore_tokens.lua
@@ -0,0 +1,38 @@
+-- Simple token initialization and management for Python BRPOP
+-- KEYS[1]: tokens_key (LIST of available tokens)
+-- KEYS[2]: holders_key (SET of current holder instance IDs)
+
+-- ARGV[1]: capacity (max concurrent holders)
+-- ARGV[2]: ttl_seconds
+--
+-- Returns: {exit_code}
+-- exit_code: 0 if registered successfully
+
+local tokens_key = KEYS[1]
+local holders_key = KEYS[2]
+
+local capacity = tonumber(ARGV[1])
+local ttl_seconds = tonumber(ARGV[2])
+
+-- Use a persistent marker to track if semaphore was ever initialized
+local init_marker_key = tokens_key .. ':initialized'
+
+-- Check if we've ever initialized this semaphore
+local was_initialized = redis.call('EXISTS', init_marker_key)
+
+if was_initialized == 0 then
+ -- First time initialization - set the permanent marker
+ redis.call('SET', init_marker_key, '1')
+ redis.call('EXPIRE', init_marker_key, ttl_seconds)
+
+ -- Initialize with capacity number of tokens
+ for i = 1, capacity do
+ redis.call('LPUSH', tokens_key, 'token_' .. i)
+ end
+ -- Set expiry on tokens list
+ redis.call('EXPIRE', tokens_key, ttl_seconds)
+ return {0, 'initialized'}
+end
+
+
+return {0, 'already_initialized'}
diff --git a/packages/service-library/src/servicelib/redis/lua/release_semaphore.lua b/packages/service-library/src/servicelib/redis/lua/release_semaphore.lua
new file mode 100644
index 000000000000..088662c83842
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/lua/release_semaphore.lua
@@ -0,0 +1,51 @@
+-- Release fair semaphore and return token to pool
+-- KEYS[1]: tokens_key (LIST of available tokens)
+-- KEYS[2]: holders_key (SET of current holders)
+-- KEYS[3]: holder_key (individual holder TTL key for this instance)
+
+-- ARGV[1]: instance_id
+-- ARGV[2]: passed_token (the token held by this instance or nil if unknown)
+--
+-- Returns: {exit_code, status, current_count}
+-- exit_code: 0 if released, 255 if failed
+-- status: 'released', 'not_held', or 'expired'
+
+local tokens_key = KEYS[1]
+local holders_key = KEYS[2]
+local holder_key = KEYS[3]
+
+local instance_id = ARGV[1]
+local passed_token = ARGV[2]
+
+-- Step 1: Check if this instance is currently a holder
+local is_holder = redis.call('SISMEMBER', holders_key, instance_id)
+if is_holder == 0 then
+ -- Not in holders set - check if holder key exists
+ return {255, 'not_held', redis.call('SCARD', holders_key)}
+end
+
+-- Step 2: Get the token from holder key
+local token = redis.call('GET', holder_key)
+if not token then
+ -- the token expired but we are still in the holders set
+ -- this indicates a lost semaphore (e.g. due to TTL expiry)
+ -- remove from holders set and return error
+ redis.call('SREM', holders_key, instance_id)
+ -- if the token was passed return it to the pool
+ if passed_token then
+ redis.call('LPUSH', tokens_key, passed_token)
+ end
+ -- Note: we do NOT push a recovered token since we don't know its state
+ return {255, 'expired', redis.call('SCARD', holders_key)}
+end
+
+-- Step 3: Release the semaphore
+redis.call('SREM', holders_key, instance_id)
+redis.call('DEL', holder_key)
+
+-- Step 4: Return token to available pool
+-- This automatically unblocks any waiting BRPOP calls
+redis.call('LPUSH', tokens_key, token)
+
+
+return {0, 'released', redis.call('SCARD', holders_key)}
diff --git a/packages/service-library/src/servicelib/redis/lua/renew_semaphore.lua b/packages/service-library/src/servicelib/redis/lua/renew_semaphore.lua
new file mode 100644
index 000000000000..35b290b29d92
--- /dev/null
+++ b/packages/service-library/src/servicelib/redis/lua/renew_semaphore.lua
@@ -0,0 +1,47 @@
+-- Renew semaphore holder TTL (simplified for token pool design)
+-- KEYS[1]: holders_key (SET of current holders)
+-- KEYS[2]: holder_key (individual holder TTL key for this instance)
+-- KEYS[3]: tokens_key (LIST of available tokens)
+-- ARGV[1]: instance_id
+-- ARGV[2]: ttl_seconds
+-- ARGV[3]: holders_ttl_seconds (to renew holders set)
+-- ARGV[4]: tokens_ttl_seconds (to renew tokens list)
+--
+-- Returns: {exit_code, status, current_count}
+-- exit_code: 0 if renewed, 255 if failed
+-- status: 'renewed', 'not_held', or 'expired'
+
+local holders_key = KEYS[1]
+local holder_key = KEYS[2]
+local tokens_key = KEYS[3]
+
+local instance_id = ARGV[1]
+local ttl_seconds = tonumber(ARGV[2])
+local holders_ttl_seconds = tonumber(ARGV[3])
+local tokens_ttl_seconds = tonumber(ARGV[4])
+
+-- Step 1: Check if this instance is currently a holder
+local is_holder = redis.call('SISMEMBER', holders_key, instance_id)
+if is_holder == 0 then
+ -- Not in holders set
+ return {255, 'not_held', redis.call('SCARD', holders_key)}
+end
+
+-- Step 2: Check if holder key exists (to detect if it expired)
+local exists = redis.call('EXISTS', holder_key)
+if exists == 0 then
+ -- Holder key expired
+ return {255, 'expired', redis.call('SCARD', holders_key)}
+end
+
+-- Step 3: Renew the holder key TTL
+local token = redis.call('GET', holder_key)
+redis.call('SETEX', holder_key, ttl_seconds, token)
+
+-- Step 4: Renew the holders set and tokens list TTLs to prevent infinite growth
+redis.call('EXPIRE', holders_key, holders_ttl_seconds)
+redis.call('EXPIRE', tokens_key, tokens_ttl_seconds)
+local init_marker_tokens_key = tokens_key .. ':initialized'
+redis.call('EXPIRE', init_marker_tokens_key, tokens_ttl_seconds)
+
+return {0, 'renewed', redis.call('SCARD', holders_key)}
diff --git a/packages/service-library/src/servicelib/rest_constants.py b/packages/service-library/src/servicelib/rest_constants.py
index d763657b6c97..4791b189df7f 100644
--- a/packages/service-library/src/servicelib/rest_constants.py
+++ b/packages/service-library/src/servicelib/rest_constants.py
@@ -23,3 +23,4 @@ class PydanticExportParametersDict(TypedDict):
# Headers keys
X_PRODUCT_NAME_HEADER: Final[str] = "X-Simcore-Products-Name"
+X_CLIENT_SESSION_ID_HEADER: Final[str] = "X-Client-Session-Id"
diff --git a/packages/service-library/src/servicelib/socketio_utils.py b/packages/service-library/src/servicelib/socketio_utils.py
index efc634367157..b6cce908d49a 100644
--- a/packages/service-library/src/servicelib/socketio_utils.py
+++ b/packages/service-library/src/servicelib/socketio_utils.py
@@ -1,4 +1,4 @@
-""" Common utilities for python-socketio library
+"""Common utilities for python-socketio library
NOTE: we intentionally avoided importing socketio here to avoid adding an extra dependency at
@@ -9,7 +9,6 @@
async def cleanup_socketio_async_pubsub_manager(server_manager):
-
# NOTE: this is ugly. It seems though that python-socketio does not
# cleanup its background tasks properly.
# https://github.com/miguelgrinberg/python-socketio/discussions/1092
@@ -35,6 +34,7 @@ async def cleanup_socketio_async_pubsub_manager(server_manager):
for coro_name in [
"AsyncServer._service_task",
"AsyncSocket.schedule_ping",
+ "AsyncSocket._send_ping",
"AsyncPubSubManager._thread",
]
):
diff --git a/packages/service-library/src/servicelib/tracing.py b/packages/service-library/src/servicelib/tracing.py
index e1b3b348a723..a95f386495e6 100644
--- a/packages/service-library/src/servicelib/tracing.py
+++ b/packages/service-library/src/servicelib/tracing.py
@@ -1,6 +1,11 @@
+from collections.abc import Callable, Coroutine
from contextlib import contextmanager
-from typing import TypeAlias
+from contextvars import Token
+from functools import wraps
+from typing import Any, Final, TypeAlias
+import pyinstrument
+import pyinstrument.renderers
from opentelemetry import context as otcontext
from opentelemetry import trace
from opentelemetry.instrumentation.logging import LoggingInstrumentor
@@ -8,6 +13,10 @@
TracingContext: TypeAlias = otcontext.Context | None
+_TRACER_NAME: Final[str] = "servicelib.tracing"
+_PROFILE_ATTRIBUTE_NAME: Final[str] = "pyinstrument.profile"
+_OSPARC_TRACE_ID_HEADER: Final[str] = "x-osparc-trace-id"
+
def _is_tracing() -> bool:
return trace.get_current_span().is_recording()
@@ -21,7 +30,7 @@ def get_context() -> TracingContext:
@contextmanager
def use_tracing_context(context: TracingContext):
- token: object | None = None
+ token: Token[otcontext.Context] | None = None
if context is not None:
token = otcontext.attach(context)
try:
@@ -34,3 +43,53 @@ def use_tracing_context(context: TracingContext):
def setup_log_tracing(tracing_settings: TracingSettings):
_ = tracing_settings
LoggingInstrumentor().instrument(set_logging_format=False)
+
+
+def get_trace_id_header() -> dict[str, str] | None:
+ """Generates a dictionary containing the trace ID header if tracing is active."""
+ span = trace.get_current_span()
+ if span.is_recording():
+ trace_id = span.get_span_context().trace_id
+ trace_id_hex = format(
+ trace_id, "032x"
+ ) # Convert trace_id to 32-character hex string
+ return {_OSPARC_TRACE_ID_HEADER: trace_id_hex}
+ return None
+
+
+def with_profiled_span(
+ func: Callable[..., Coroutine[Any, Any, Any]],
+) -> Callable[..., Coroutine[Any, Any, Any]]:
+ """Decorator that wraps an async function in an OpenTelemetry span with pyinstrument profiling."""
+
+ @wraps(func)
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
+ if not _is_tracing():
+ return await func(*args, **kwargs)
+
+ tracer = trace.get_tracer(_TRACER_NAME)
+ span_name = f"{func.__module__}.{func.__qualname__}"
+
+ with tracer.start_as_current_span(span_name) as span:
+ profiler = pyinstrument.Profiler(async_mode="enabled")
+ profiler.start()
+
+ try:
+ return await func(*args, **kwargs)
+
+ except Exception as e:
+ span.record_exception(e)
+ span.set_status(trace.Status(trace.StatusCode.ERROR, f"{e}"))
+ raise
+
+ finally:
+ profiler.stop()
+ renderer = pyinstrument.renderers.ConsoleRenderer(
+ unicode=True, color=False, show_all=True
+ )
+ span.set_attribute(
+ _PROFILE_ATTRIBUTE_NAME,
+ profiler.output(renderer=renderer),
+ )
+
+ return wrapper
diff --git a/packages/service-library/src/servicelib/utils.py b/packages/service-library/src/servicelib/utils.py
index e6de282068cc..e6ae4b147c51 100644
--- a/packages/service-library/src/servicelib/utils.py
+++ b/packages/service-library/src/servicelib/utils.py
@@ -1,4 +1,4 @@
-""" General utils
+"""General utils
IMPORTANT: lowest level module
I order to avoid cyclic dependences, please
@@ -245,7 +245,7 @@ async def limited_as_completed(
future.set_name(f"{tasks_group_prefix}-{future.get_name()}")
pending_futures.add(future)
- except (StopIteration, StopAsyncIteration): # noqa: PERF203
+ except (StopIteration, StopAsyncIteration):
completed_all_awaitables = True
if not pending_futures:
return
@@ -294,8 +294,7 @@ async def limited_gather(
log: logging.Logger = _DEFAULT_LOGGER,
limit: int = _DEFAULT_LIMITED_CONCURRENCY,
tasks_group_prefix: str | None = None,
-) -> list[T]:
- ...
+) -> list[T]: ...
@overload
@@ -305,8 +304,7 @@ async def limited_gather(
log: logging.Logger = _DEFAULT_LOGGER,
limit: int = _DEFAULT_LIMITED_CONCURRENCY,
tasks_group_prefix: str | None = None,
-) -> list[T | BaseException]:
- ...
+) -> list[T | BaseException]: ...
async def limited_gather(
diff --git a/packages/service-library/src/servicelib/utils_meta.py b/packages/service-library/src/servicelib/utils_meta.py
index 6ee48fd4d56d..1109e3089c59 100644
--- a/packages/service-library/src/servicelib/utils_meta.py
+++ b/packages/service-library/src/servicelib/utils_meta.py
@@ -1,13 +1,16 @@
-""" Utilities to implement _meta.py
-
-"""
+"""Utilities to implement _meta.py"""
+import re
from importlib.metadata import distribution
from models_library.basic_types import VersionStr
from packaging.version import Version
from pydantic import TypeAdapter
+_APP_NAME_PATTERN = re.compile(
+ r"^[a-z0-9]+(-[a-z0-9]+)*$"
+) # matches lowercase string with words and non-negative integers separated by dashes (no whitespace)
+
class PackageInfo:
"""Thin wrapper around pgk_resources.Distribution to access package distribution metadata
@@ -29,11 +32,32 @@ def __init__(self, package_name: str):
package_name: as defined in 'setup.name'
"""
self._distribution = distribution(package_name)
+ # property checks
+ if re.match(_APP_NAME_PATTERN, self.app_name) is None:
+ msg = (
+ f"Invalid package name {self.app_name}. "
+ "It must be all lowercase and words separated by dashes ('-')."
+ )
+ raise ValueError(msg)
@property
def project_name(self) -> str:
return self._distribution.metadata["Name"]
+ @property
+ def app_name(self) -> str:
+ """
+ Returns the application name as a lowercase string with words separated by dashes ('-').
+ """
+ return self._distribution.metadata["Name"]
+
+ @property
+ def prometheus_friendly_app_name(self) -> str:
+ """
+ Returns a version of the app name which is compatible with Prometheus metrics naming conventions (no dashes).
+ """
+ return self.app_name.replace("-", "_")
+
@property
def version(self) -> Version:
return Version(self._distribution.version)
diff --git a/packages/service-library/tests/aiohttp/conftest.py b/packages/service-library/tests/aiohttp/conftest.py
index 1891ee17d157..f2055a80b0c3 100644
--- a/packages/service-library/tests/aiohttp/conftest.py
+++ b/packages/service-library/tests/aiohttp/conftest.py
@@ -1,2 +1,20 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
+
+
+from collections.abc import Iterator
+
+import pytest
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+from pytest_mock import MockerFixture
+
+
+@pytest.fixture
+def mock_otel_collector(mocker: MockerFixture) -> Iterator[InMemorySpanExporter]:
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ mocker.patch(
+ "servicelib.aiohttp.tracing._create_span_processor", return_value=span_processor
+ )
+ yield memory_exporter
diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py b/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py
index 8fe29473cfcb..3bf527ab2c82 100644
--- a/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py
+++ b/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py
@@ -12,17 +12,26 @@
from pydantic import BaseModel, TypeAdapter
from pytest_simcore.helpers.assert_checks import assert_status
from servicelib.aiohttp import long_running_tasks, status
-from servicelib.aiohttp.long_running_tasks.server import TaskId
from servicelib.aiohttp.requests_validation import parse_request_query_parameters_as
-from servicelib.long_running_tasks._task import TaskContext
+from servicelib.long_running_tasks.models import (
+ TaskGet,
+ TaskId,
+ TaskProgress,
+ TaskStatus,
+)
+from servicelib.long_running_tasks.task import TaskContext, TaskRegistry
from tenacity.asyncio import AsyncRetrying
from tenacity.retry import retry_if_exception_type
from tenacity.stop import stop_after_delay
from tenacity.wait import wait_fixed
+class _TestingError(Exception):
+ pass
+
+
async def _string_list_task(
- task_progress: long_running_tasks.server.TaskProgress,
+ progress: TaskProgress,
num_strings: int,
sleep_time: float,
fail: bool,
@@ -31,10 +40,10 @@ async def _string_list_task(
for index in range(num_strings):
generated_strings.append(f"{index}")
await asyncio.sleep(sleep_time)
- task_progress.update(message="generated item", percent=index / num_strings)
+ await progress.update(message="generated item", percent=index / num_strings)
if fail:
msg = "We were asked to fail!!"
- raise RuntimeError(msg)
+ raise _TestingError(msg)
# NOTE: this code is used just for the sake of not returning the default 200
return web.json_response(
@@ -42,6 +51,9 @@ async def _string_list_task(
)
+TaskRegistry.register(_string_list_task, allowed_errors=(_TestingError,))
+
+
@pytest.fixture
def task_context(faker: Faker) -> TaskContext:
return {"user_id": faker.pyint(), "product": faker.pystr()}
@@ -68,7 +80,7 @@ async def generate_list_strings(request: web.Request) -> web.Response:
query_params = parse_request_query_parameters_as(_LongTaskQueryParams, request)
return await long_running_tasks.server.start_long_running_task(
request,
- _string_list_task,
+ _string_list_task.__name__,
num_strings=query_params.num_strings,
sleep_time=query_params.sleep_time,
fail=query_params.fail,
@@ -93,7 +105,7 @@ async def _caller(client: TestClient, **query_kwargs) -> TaskId:
data, error = await assert_status(resp, status.HTTP_202_ACCEPTED)
assert data
assert not error
- task_get = TypeAdapter(long_running_tasks.server.TaskGet).validate_python(data)
+ task_get = TypeAdapter(TaskGet).validate_python(data)
return task_get.task_id
return _caller
@@ -123,7 +135,7 @@ async def _waiter(
data, error = await assert_status(result, status.HTTP_200_OK)
assert data
assert not error
- task_status = long_running_tasks.server.TaskStatus.model_validate(data)
+ task_status = TaskStatus.model_validate(data)
assert task_status
assert task_status.done
diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py
index 71d5501b2ed2..49604fd3a15e 100644
--- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py
+++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py
@@ -20,22 +20,38 @@
from pydantic import TypeAdapter
from pytest_simcore.helpers.assert_checks import assert_status
from servicelib.aiohttp import long_running_tasks, status
-from servicelib.aiohttp.long_running_tasks.server import TaskGet, TaskId
from servicelib.aiohttp.rest_middlewares import append_rest_middlewares
-from servicelib.long_running_tasks._task import TaskContext
+from servicelib.long_running_tasks.models import TaskGet, TaskId, TaskStatus
+from servicelib.long_running_tasks.task import TaskContext
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
from tenacity.asyncio import AsyncRetrying
from tenacity.retry import retry_if_exception_type
from tenacity.stop import stop_after_delay
from tenacity.wait import wait_fixed
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
+
@pytest.fixture
-def app(server_routes: web.RouteTableDef) -> web.Application:
+def app(
+ server_routes: web.RouteTableDef,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
+) -> web.Application:
app = web.Application()
app.add_routes(server_routes)
# this adds enveloping and error middlewares
append_rest_middlewares(app, api_version="")
- long_running_tasks.server.setup(app, router_prefix="/futures")
+ long_running_tasks.server.setup(
+ app,
+ redis_settings=use_in_memory_redis,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
+ router_prefix="/futures",
+ )
return app
@@ -70,7 +86,7 @@ async def test_workflow(
data, error = await assert_status(result, status.HTTP_200_OK)
assert data
assert not error
- task_status = long_running_tasks.server.TaskStatus.model_validate(data)
+ task_status = TaskStatus.model_validate(data)
assert task_status
progress_updates.append(
(task_status.task_progress.message, task_status.task_progress.percent)
@@ -97,7 +113,7 @@ async def test_workflow(
# now get the result
result_url = client.app.router["get_task_result"].url_for(task_id=task_id)
result = await client.get(f"{result_url}")
- task_result, error = await assert_status(result, status.HTTP_201_CREATED)
+ task_result, error = await assert_status(result, status.HTTP_200_OK)
assert task_result
assert not error
assert task_result == [f"{x}" for x in range(10)]
@@ -111,7 +127,7 @@ async def test_workflow(
[
("GET", "get_task_status"),
("GET", "get_task_result"),
- ("DELETE", "cancel_and_delete_task"),
+ ("DELETE", "remove_task"),
],
)
async def test_get_task_wrong_task_id_raises_not_found(
@@ -127,6 +143,7 @@ async def test_failing_task_returns_error(
client: TestClient,
start_long_running_task: Callable[[TestClient, Any], Awaitable[TaskId]],
wait_for_task: Callable[[TestClient, TaskId, TaskContext], Awaitable[None]],
+ caplog: pytest.LogCaptureFixture,
):
assert client.app
task_id = await start_long_running_task(client, fail=f"{True}")
@@ -138,10 +155,17 @@ async def test_failing_task_returns_error(
data, error = await assert_status(result, status.HTTP_500_INTERNAL_SERVER_ERROR)
assert not data
assert error
- assert "errors" in error
- assert len(error["errors"]) == 1
- assert error["errors"][0]["code"] == "RuntimeError"
- assert error["errors"][0]["message"] == "We were asked to fail!!"
+
+ # The error should contain a supportId field for tracking
+ assert "supportId" in error
+ assert isinstance(error["supportId"], str)
+ assert len(error["supportId"]) > 0
+
+ # The actual error details should be logged, not returned in response
+ log_messages = caplog.text
+ assert "OEC" in log_messages
+ assert "_TestingError" in log_messages
+ assert "We were asked to fail!!" in log_messages
async def test_get_results_before_tasks_finishes_returns_404(
@@ -164,7 +188,7 @@ async def test_cancel_task(
task_id = await start_long_running_task(client)
# cancel the task
- delete_url = client.app.router["cancel_and_delete_task"].url_for(task_id=task_id)
+ delete_url = client.app.router["remove_task"].url_for(task_id=task_id)
result = await client.delete(f"{delete_url}")
data, error = await assert_status(result, status.HTTP_204_NO_CONTENT)
assert not data
@@ -216,7 +240,9 @@ async def test_list_tasks(
# the task name is properly formatted
assert all(
- task.task_name == "POST /long_running_task:start?num_strings=10&sleep_time=0.2"
+ task.task_name.startswith(
+ "POST /long_running_task:start?num_strings=10&sleep_time="
+ )
for task in list_of_tasks
)
# now wait for them to finish
diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py
index b211cc3d1cad..9e8c9204acef 100644
--- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py
+++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py
@@ -15,16 +15,32 @@
long_running_task_request,
)
from servicelib.aiohttp.rest_middlewares import append_rest_middlewares
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
from yarl import URL
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
+
@pytest.fixture
-def app(server_routes: web.RouteTableDef) -> web.Application:
+def app(
+ server_routes: web.RouteTableDef,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
+) -> web.Application:
app = web.Application()
app.add_routes(server_routes)
# this adds enveloping and error middlewares
append_rest_middlewares(app, api_version="")
- long_running_tasks.server.setup(app, router_prefix="/futures")
+ long_running_tasks.server.setup(
+ app,
+ redis_settings=use_in_memory_redis,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
+ router_prefix="/futures",
+ )
return app
@@ -50,7 +66,7 @@ async def test_long_running_task_request_raises_400(
client: TestClient, long_running_task_url: URL
):
# missing parameters raises
- with pytest.raises(ClientResponseError):
+ with pytest.raises(ClientResponseError): # noqa: PT012
async for _ in long_running_task_request(
client.session, long_running_task_url, None
):
@@ -87,7 +103,7 @@ async def test_long_running_task_request_timeout(
):
assert client.app
task: LRTask | None = None
- with pytest.raises(asyncio.TimeoutError):
+ with pytest.raises(asyncio.TimeoutError): # noqa: PT012
async for task in long_running_task_request(
client.session,
long_running_task_url.with_query(num_strings=10, sleep_time=1),
diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py
index 0b37c9416692..cef4a845ab8d 100644
--- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py
+++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py
@@ -22,12 +22,17 @@
from servicelib.aiohttp.long_running_tasks._server import (
RQT_LONG_RUNNING_TASKS_CONTEXT_KEY,
)
-from servicelib.aiohttp.long_running_tasks.server import TaskGet, TaskId
from servicelib.aiohttp.requests_validation import parse_request_query_parameters_as
from servicelib.aiohttp.rest_middlewares import append_rest_middlewares
from servicelib.aiohttp.typing_extension import Handler
-from servicelib.long_running_tasks._task import TaskContext
-
+from servicelib.long_running_tasks.models import TaskGet, TaskId
+from servicelib.long_running_tasks.task import TaskContext
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
# WITH TASK CONTEXT
# NOTE: as the long running task framework may be used in any number of services
# in some cases there might be specific so-called task contexts.
@@ -61,7 +66,10 @@ async def _test_task_context_decorator(
@pytest.fixture
def app_with_task_context(
- server_routes: web.RouteTableDef, task_context_decorator
+ server_routes: web.RouteTableDef,
+ task_context_decorator,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
) -> web.Application:
app = web.Application()
app.add_routes(server_routes)
@@ -69,6 +77,9 @@ def app_with_task_context(
append_rest_middlewares(app, api_version="")
long_running_tasks.server.setup(
app,
+ redis_settings=use_in_memory_redis,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
router_prefix="/futures_with_task_context",
task_request_context_decorator=task_context_decorator,
)
@@ -151,7 +162,7 @@ async def test_get_task_result(
await assert_status(resp, status.HTTP_404_NOT_FOUND)
# calling with context should find the task
resp = await client_with_task_context.get(f"{result_url.with_query(task_context)}")
- await assert_status(resp, status.HTTP_201_CREATED)
+ await assert_status(resp, status.HTTP_200_OK)
async def test_cancel_task(
@@ -161,7 +172,7 @@ async def test_cancel_task(
):
assert client_with_task_context.app
task_id = await start_long_running_task(client_with_task_context)
- cancel_url = client_with_task_context.app.router["cancel_and_delete_task"].url_for(
+ cancel_url = client_with_task_context.app.router["remove_task"].url_for(
task_id=task_id
)
# calling cancel without task context should find nothing
diff --git a/packages/service-library/tests/aiohttp/test_application_setup.py b/packages/service-library/tests/aiohttp/test_application_setup.py
index 94af1c07a335..47b83da7576f 100644
--- a/packages/service-library/tests/aiohttp/test_application_setup.py
+++ b/packages/service-library/tests/aiohttp/test_application_setup.py
@@ -1,52 +1,30 @@
-# pylint:disable=unused-variable
-# pylint:disable=unused-argument
-# pylint:disable=redefined-outer-name
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=too-many-arguments
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
-from unittest.mock import Mock
+
+import logging
import pytest
from aiohttp import web
+from pytest_mock import MockerFixture, MockType
from servicelib.aiohttp.application_keys import APP_CONFIG_KEY
from servicelib.aiohttp.application_setup import (
DependencyError,
ModuleCategory,
SkipModuleSetupError,
app_module_setup,
+ ensure_single_setup,
is_setup_completed,
)
-log = Mock()
-
-
-@app_module_setup("package.bar", ModuleCategory.ADDON, logger=log)
-def setup_bar(app: web.Application, arg1, *, raise_skip: bool = False):
- return True
-
-
-@app_module_setup("package.foo", ModuleCategory.ADDON, logger=log)
-def setup_foo(app: web.Application, arg1, kargs=33, *, raise_skip: bool = False):
- if raise_skip:
- raise SkipModuleSetupError(reason="explicit skip")
- return True
-
-
-@app_module_setup(
- "package.zee", ModuleCategory.ADDON, config_enabled="main.zee_enabled", logger=log
-)
-def setup_zee(app: web.Application, arg1, kargs=55):
- return True
-
-@app_module_setup(
- "package.needs_foo",
- ModuleCategory.SYSTEM,
- depends=[
- "package.foo",
- ],
- logger=log,
-)
-def setup_needs_foo(app: web.Application, arg1, kargs=55):
- return True
+@pytest.fixture
+def mock_logger(mocker: MockerFixture) -> MockType:
+ logger_mock: MockType = mocker.create_autospec(logging.Logger, instance=True)
+ return logger_mock
@pytest.fixture
@@ -59,55 +37,150 @@ def app_config() -> dict:
@pytest.fixture
-def app(app_config):
+def app_settings_key() -> web.AppKey:
+ return web.AppKey("test_app_settings", object)
+
+
+@pytest.fixture
+def app(app_config: dict) -> web.Application:
_app = web.Application()
_app[APP_CONFIG_KEY] = app_config
return _app
-def test_setup_config_enabled(app_config, app):
+def test_setup_config_enabled(
+ app_config: dict, app: web.Application, app_settings_key: web.AppKey
+):
+
+ @app_module_setup(
+ "package.zee",
+ ModuleCategory.ADDON,
+ app_settings_key=app_settings_key,
+ # legacy support for config_enabled
+ config_enabled="main.zee_enabled",
+ )
+ def setup_zee(app: web.Application, arg) -> bool:
+ assert arg
+ return True
+
assert setup_zee(app, 1)
assert setup_zee.metadata()["config_enabled"] == "main.zee_enabled"
app_config["main"]["zee_enabled"] = False
+
assert not setup_zee(app, 2)
-def test_setup_dependencies(app_config, app):
+def test_setup_dependencies(app: web.Application, app_settings_key: web.AppKey):
+
+ @app_module_setup(
+ "package.foo", ModuleCategory.ADDON, app_settings_key=app_settings_key
+ )
+ def setup_foo(app: web.Application) -> bool:
+ return True
+ @app_module_setup(
+ "package.needs_foo",
+ ModuleCategory.SYSTEM,
+ app_settings_key=app_settings_key,
+ depends=[
+ # This module needs foo to be setup first
+ "package.foo",
+ ],
+ )
+ def setup_needs_foo(app: web.Application) -> bool:
+ return True
+
+ # setup_foo is not called yet
with pytest.raises(DependencyError):
- setup_needs_foo(app, 1)
+ setup_needs_foo(app)
- assert setup_foo(app, 1)
- assert setup_needs_foo(app, 2)
+ # ok
+ assert setup_foo(app)
+ assert setup_needs_foo(app)
+ # meta
assert setup_needs_foo.metadata()["dependencies"] == [
setup_foo.metadata()["module_name"],
]
-def test_marked_setup(app_config, app):
- assert setup_foo(app, 1)
+def test_marked_setup(
+ app_config: dict, app: web.Application, app_settings_key: web.AppKey
+):
+ @app_module_setup(
+ "package.foo", ModuleCategory.ADDON, app_settings_key=app_settings_key
+ )
+ def setup_foo(app: web.Application) -> bool:
+ return True
+ assert setup_foo(app)
assert setup_foo.metadata()["module_name"] == "package.foo"
assert is_setup_completed(setup_foo.metadata()["module_name"], app)
app_config["foo"]["enabled"] = False
- assert not setup_foo(app, 2)
+ assert not setup_foo(app)
+
+
+def test_skip_setup(
+ app: web.Application, mock_logger: MockType, app_settings_key: web.AppKey
+):
+ @app_module_setup(
+ "package.foo",
+ ModuleCategory.ADDON,
+ app_settings_key=app_settings_key,
+ logger=mock_logger,
+ )
+ def setup_foo(app: web.Application, *, raise_skip: bool = False) -> bool:
+ if raise_skip:
+ raise SkipModuleSetupError(reason="explicit skip")
+ return True
+
+ assert not setup_foo(app, raise_skip=True)
+ assert setup_foo(app)
+
+ assert mock_logger.info.called
+ args = [call.args[-1] for call in mock_logger.info.mock_calls]
+ assert any("explicit skip" in arg for arg in args)
+
+
+def setup_basic(app: web.Application) -> bool:
+ return True
+
+
+def setup_that_raises(app: web.Application) -> bool:
+ error_msg = "Setup failed"
+ raise ValueError(error_msg)
+
+
+def test_ensure_single_setup_runs_once(app: web.Application, mock_logger: MockType):
+ decorated = ensure_single_setup("test.module", logger=mock_logger)(setup_basic)
+
+ # First call succeeds
+ assert decorated(app)
+ assert is_setup_completed("test.module", app)
+
+ # Second call skips
+ assert not decorated(app)
+
+def test_ensure_single_setup_error_handling(
+ app: web.Application, mock_logger: MockType
+):
+ decorated = ensure_single_setup("test.error", logger=mock_logger)(setup_that_raises)
-def test_skip_setup(app_config, app):
- try:
- log.reset_mock()
+ with pytest.raises(ValueError, match="Setup failed"):
+ decorated(app)
+ assert not is_setup_completed("test.error", app)
- assert not setup_foo(app, 1, raise_skip=True)
- # FIXME: mock logger
- # assert log.warning.called
- # warn_msg = log.warning.call_args()[0]
- # assert "package.foo" in warn_msg
- # assert "explicit skip" in warn_msg
+def test_ensure_single_setup_multiple_modules(
+ app: web.Application, mock_logger: MockType
+):
+ decorated1 = ensure_single_setup("module1", logger=mock_logger)(setup_basic)
+ decorated2 = ensure_single_setup("module2", logger=mock_logger)(setup_basic)
- assert setup_foo(app, 1)
- finally:
- log.reset_mock()
+ assert decorated1(app)
+ assert decorated2(app)
+ assert is_setup_completed("module1", app)
+ assert is_setup_completed("module2", app)
diff --git a/packages/service-library/tests/aiohttp/test_client_session.py b/packages/service-library/tests/aiohttp/test_client_session.py
index 74b91655c31a..792c254ee73b 100644
--- a/packages/service-library/tests/aiohttp/test_client_session.py
+++ b/packages/service-library/tests/aiohttp/test_client_session.py
@@ -6,7 +6,7 @@
from collections.abc import Callable, Iterator
from typing import Any
-import pytest
+import pytest_asyncio
from aiohttp import web
from aiohttp.client import ClientSession
from aiohttp.test_utils import TestServer
@@ -18,8 +18,8 @@
)
-@pytest.fixture
-def server(event_loop, aiohttp_server: Callable) -> Iterator[TestServer]:
+@pytest_asyncio.fixture(loop_scope="function", scope="function")
+async def server(aiohttp_server: Callable) -> Iterator[TestServer]:
async def echo(request):
got = await request.json()
return web.json_response(data=got)
@@ -31,7 +31,7 @@ async def echo(request):
assert not app.get(APP_CLIENT_SESSION_KEY)
- test_server = event_loop.run_until_complete(aiohttp_server(app))
+ test_server = await aiohttp_server(app)
assert isinstance(app[APP_CLIENT_SESSION_KEY], ClientSession)
assert not app[APP_CLIENT_SESSION_KEY].closed
diff --git a/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py b/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py
index 6c428eb485d4..cb30d1f73385 100644
--- a/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py
+++ b/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py
@@ -8,33 +8,35 @@
from collections.abc import Iterable
import pytest
+import pytest_asyncio
from servicelib.aiohttp import monitor_slow_callbacks
-from servicelib.aiohttp.aiopg_utils import DatabaseError
from tenacity import retry
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed
-async def slow_task(delay):
- time.sleep(delay) # noqa: ASYNC101
+async def _slow_function(delay):
+ time.sleep(delay) # noqa: ASYNC251
@retry(wait=wait_fixed(1), stop=stop_after_attempt(2))
-async def fails_to_reach_pg_db():
- raise DatabaseError
+async def _raising_function():
+ msg = "This function is expected to raise an error"
+ raise RuntimeError(msg)
-@pytest.fixture
-def incidents_manager(event_loop) -> dict:
+@pytest_asyncio.fixture(loop_scope="function", scope="function")
+async def incidents_manager() -> dict:
incidents = []
monitor_slow_callbacks.enable(slow_duration_secs=0.2, incidents=incidents)
- asyncio.ensure_future(slow_task(0.3), loop=event_loop) # noqa: RUF006
- asyncio.ensure_future(slow_task(0.3), loop=event_loop) # noqa: RUF006
- asyncio.ensure_future(slow_task(0.4), loop=event_loop) # noqa: RUF006
+ event_loop = asyncio.get_running_loop()
+ asyncio.ensure_future(_slow_function(0.3), loop=event_loop) # noqa: RUF006
+ asyncio.ensure_future(_slow_function(0.3), loop=event_loop) # noqa: RUF006
+ asyncio.ensure_future(_slow_function(0.4), loop=event_loop) # noqa: RUF006
incidents_pg = None # aiopg_utils.monitor_pg_responsiveness.enable()
- asyncio.ensure_future(fails_to_reach_pg_db(), loop=event_loop) # noqa: RUF006
+ asyncio.ensure_future(_raising_function(), loop=event_loop) # noqa: RUF006
return {"slow_callback": incidents, "posgres_responsive": incidents_pg}
@@ -46,6 +48,10 @@ def disable_monitoring() -> Iterable[None]:
asyncio.events.Handle._run = original_handler # noqa: SLF001
+@pytest.mark.skip(
+ reason="log_slow_callbacks is not supported out-of-the-box with uvloop."
+ " SEE https://github.com/ITISFoundation/osparc-simcore/issues/8047"
+)
async def test_slow_task_incident(disable_monitoring: None, incidents_manager: dict):
await asyncio.sleep(2)
assert len(incidents_manager["slow_callback"]) == 3
diff --git a/packages/service-library/tests/aiohttp/test_requests_validation.py b/packages/service-library/tests/aiohttp/test_requests_validation.py
index 97c2b317b6ac..a901cc4d8745 100644
--- a/packages/service-library/tests/aiohttp/test_requests_validation.py
+++ b/packages/service-library/tests/aiohttp/test_requests_validation.py
@@ -7,11 +7,13 @@
from uuid import UUID
import pytest
+import pytest_asyncio
from aiohttp import web
from aiohttp.test_utils import TestClient, make_mocked_request
from common_library.json_serialization import json_dumps
from faker import Faker
from models_library.rest_base import RequestParameters, StrictRequestParameters
+from models_library.rest_error import EnvelopedError
from models_library.rest_ordering import (
OrderBy,
OrderDirection,
@@ -98,8 +100,8 @@ def create_fake(cls, faker: Faker):
return cls(x=faker.pyint(), y=faker.pybool(), z=Sub.create_fake(faker))
-@pytest.fixture
-def client(event_loop, aiohttp_client: Callable, faker: Faker) -> TestClient:
+@pytest_asyncio.fixture(loop_scope="function", scope="function")
+async def client(aiohttp_client: Callable, faker: Faker) -> TestClient:
"""
Some app that:
@@ -115,18 +117,10 @@ async def _handler(request: web.Request) -> web.Response:
{**dict(request.app), **dict(request)}
)
- path_params = parse_request_path_parameters_as(
- MyRequestPathParams, request, use_enveloped_error_v1=False
- )
- query_params = parse_request_query_parameters_as(
- MyRequestQueryParams, request, use_enveloped_error_v1=False
- )
- headers_params = parse_request_headers_as(
- MyRequestHeadersParams, request, use_enveloped_error_v1=False
- )
- body = await parse_request_body_as(
- MyBody, request, use_enveloped_error_v1=False
- )
+ path_params = parse_request_path_parameters_as(MyRequestPathParams, request)
+ query_params = parse_request_query_parameters_as(MyRequestQueryParams, request)
+ headers_params = parse_request_headers_as(MyRequestHeadersParams, request)
+ body = await parse_request_body_as(MyBody, request)
# ---------------------------
return web.json_response(
@@ -162,7 +156,7 @@ async def _middleware(request: web.Request, handler):
# adds handler
app.add_routes([web.get("/projects/{project_uuid}", _handler)])
- return event_loop.run_until_complete(aiohttp_client(app))
+ return await aiohttp_client(app)
@pytest.fixture
@@ -229,19 +223,12 @@ async def test_parse_request_with_invalid_path_params(
assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}"
response_body = await r.json()
- assert response_body["error"].pop("resource")
- assert response_body == {
- "error": {
- "msg": "Invalid parameter/s 'project_uuid' in request path",
- "details": [
- {
- "loc": "project_uuid",
- "msg": "Input should be a valid UUID, invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `i` at 1",
- "type": "uuid_parsing",
- }
- ],
- }
- }
+
+ error_model = EnvelopedError.model_validate(response_body).error
+ assert error_model.message == "Invalid parameter/s 'project_uuid' in request path"
+ assert error_model.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ assert error_model.errors[0].field == "project_uuid"
+ assert error_model.errors[0].code == "uuid_parsing"
async def test_parse_request_with_invalid_query_params(
@@ -260,19 +247,11 @@ async def test_parse_request_with_invalid_query_params(
assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}"
response_body = await r.json()
- assert response_body["error"].pop("resource")
- assert response_body == {
- "error": {
- "msg": "Invalid parameter/s 'label' in request query",
- "details": [
- {
- "loc": "label",
- "msg": "Field required",
- "type": "missing",
- }
- ],
- }
- }
+ error_model = EnvelopedError.model_validate(response_body).error
+ assert error_model.message == "Invalid parameter/s 'label' in request query"
+ assert error_model.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ assert error_model.errors[0].field == "label"
+ assert error_model.errors[0].code == "missing"
async def test_parse_request_with_invalid_body(
@@ -292,25 +271,11 @@ async def test_parse_request_with_invalid_body(
response_body = await r.json()
- assert response_body["error"].pop("resource")
-
- assert response_body == {
- "error": {
- "msg": "Invalid field/s 'x, z' in request body",
- "details": [
- {
- "loc": "x",
- "msg": "Field required",
- "type": "missing",
- },
- {
- "loc": "z",
- "msg": "Field required",
- "type": "missing",
- },
- ],
- }
- }
+ error_model = EnvelopedError.model_validate(response_body).error
+ assert error_model.message == "Invalid field/s 'x, z' in request body"
+ assert error_model.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ assert error_model.errors[0].field == "x"
+ assert error_model.errors[0].code == "missing"
async def test_parse_request_with_invalid_json_body(
@@ -348,19 +313,15 @@ async def test_parse_request_with_invalid_headers_params(
assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}"
response_body = await r.json()
- assert response_body["error"].pop("resource")
- assert response_body == {
- "error": {
- "msg": "Invalid parameter/s 'X-Simcore-User-Agent' in request headers",
- "details": [
- {
- "loc": "X-Simcore-User-Agent",
- "msg": "Field required",
- "type": "missing",
- }
- ],
- }
- }
+
+ error_model = EnvelopedError.model_validate(response_body).error
+ assert (
+ error_model.message
+ == "Invalid parameter/s 'X-Simcore-User-Agent' in request headers"
+ )
+ assert error_model.status == status.HTTP_422_UNPROCESSABLE_ENTITY
+ assert error_model.errors[0].field == "X-Simcore-User-Agent"
+ assert error_model.errors[0].code == "missing"
def test_parse_request_query_parameters_as_with_order_by_query_models():
diff --git a/packages/service-library/tests/aiohttp/test_rest_middlewares.py b/packages/service-library/tests/aiohttp/test_rest_middlewares.py
index de5e80b85ae5..26884dbc11cd 100644
--- a/packages/service-library/tests/aiohttp/test_rest_middlewares.py
+++ b/packages/service-library/tests/aiohttp/test_rest_middlewares.py
@@ -14,6 +14,7 @@
from aiohttp import web
from aiohttp.test_utils import TestClient
from common_library.json_serialization import json_dumps
+from pytest_mock import MockerFixture
from servicelib.aiohttp import status
from servicelib.aiohttp.rest_middlewares import (
envelope_middleware_factory,
@@ -127,6 +128,10 @@ async def raise_success_with_text(_request: web.Request):
# NOTE: explicitly NOT enveloped!
raise web.HTTPOk(reason="I'm ok", text=json.dumps({"ok": True}))
+ @staticmethod
+ async def raise_success_with_raw_text(_request: web.Request):
+ raise web.HTTPOk(text="I'm ok") # NOT ALLOWED!
+
@pytest.fixture
async def client(
@@ -157,6 +162,10 @@ async def client(
("/v1/raise_success", Handlers.raise_success),
("/v1/raise_success_with_reason", Handlers.raise_success_with_reason),
("/v1/raise_success_with_text", Handlers.raise_success_with_text),
+ (
+ "/v1/raise_success_with_raw_text",
+ Handlers.raise_success_with_raw_text,
+ ),
]
]
)
@@ -239,7 +248,7 @@ async def test_raised_unhandled_exception(
#
# ERROR servicelib.aiohttp.rest_middlewares:rest_middlewares.py:75 We apologize ... [OEC:128594540599840].
# {
- # "exception_details": "Unexpected error",
+ # "exception_string": "Unexpected error",
# "error_code": "OEC:128594540599840",
# "context": {
# "request.remote": "127.0.0.1",
@@ -261,7 +270,7 @@ async def test_raised_unhandled_exception(
assert response.method in caplog.text
assert response.url.path in caplog.text
- assert "exception_details" in caplog.text
+ assert "exception_string" in caplog.text
assert "request.remote" in caplog.text
assert "context" in caplog.text
assert SomeUnexpectedError.__name__ in caplog.text
@@ -269,3 +278,117 @@ async def test_raised_unhandled_exception(
# log OEC
assert "OEC:" in caplog.text
+
+
+async def test_not_implemented_error_is_501(client: TestClient):
+ """Test that NotImplementedError is correctly mapped to HTTP 501 NOT IMPLEMENTED."""
+ response = await client.get(
+ "/v1/raise_exception", params={"exc": NotImplementedError.__name__}
+ )
+ assert response.status == status.HTTP_501_NOT_IMPLEMENTED
+
+ # Check that the response is properly enveloped
+ payload = await response.json()
+ assert is_enveloped(payload)
+
+ # Verify error details
+ data, error = unwrap_envelope(payload)
+ assert not data
+ assert error
+ assert error.get("status") == status.HTTP_501_NOT_IMPLEMENTED
+
+
+async def test_timeout_error_is_504(client: TestClient):
+ """Test that TimeoutError is correctly mapped to HTTP 504 GATEWAY TIMEOUT."""
+ response = await client.get(
+ "/v1/raise_exception", params={"exc": asyncio.TimeoutError.__name__}
+ )
+ assert response.status == status.HTTP_504_GATEWAY_TIMEOUT
+
+ # Check that the response is properly enveloped
+ payload = await response.json()
+ assert is_enveloped(payload)
+
+ # Verify error details
+ data, error = unwrap_envelope(payload)
+ assert not data
+ assert error
+ assert error.get("status") == status.HTTP_504_GATEWAY_TIMEOUT
+
+
+async def test_exception_in_non_api_route(client: TestClient):
+ """Test how exceptions are handled in routes not under the API path."""
+ response = await client.get("/free/raise_exception")
+
+ # This should be a raw exception, not processed by our middleware
+ assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ # Should not be enveloped since it's outside the API path
+ text = await response.text()
+ try:
+ # If it happens to be JSON, check it's not enveloped
+ payload = json.loads(text)
+ assert not is_enveloped(payload)
+ except json.JSONDecodeError:
+ # If it's not JSON, that's expected too
+ pass
+
+
+async def test_http_ok_with_text_is_enveloped(client: TestClient):
+ """Test that HTTPOk with text is properly enveloped."""
+ response = await client.get("/v1/raise_success_with_text")
+ assert response.status == status.HTTP_200_OK
+ assert response.reason == "I'm ok"
+
+ # Should be enveloped
+ payload = await response.json()
+ assert is_enveloped(payload)
+
+ # Check the content was preserved
+ data, error = unwrap_envelope(payload)
+ assert not error
+ assert data
+ assert data.get("ok") is True
+
+
+async def test_http_ok_with_raw_text_is_not_allowed(client: TestClient):
+ response = await client.get("/v1/raise_success_with_raw_text")
+ assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR
+
+
+async def test_exception_in_handler_returns_500(
+ client: TestClient, mocker: MockerFixture
+):
+ """Test that exceptions in the handler functions are caught and return 500."""
+
+ # Mock _handle_aiohttp_web_http_successful to raise an exception
+ def mocked_handler(*args, **kwargs):
+ msg = "Simulated error in handler"
+ raise ValueError(msg)
+
+ mocker.patch(
+ "servicelib.aiohttp.rest_middlewares._handle_aiohttp_web_http_successful",
+ side_effect=mocked_handler,
+ )
+
+ # Trigger a successful HTTP response that will be processed by our mocked handler
+ response = await client.get(
+ "/v1/raise_exception", params={"exc": web.HTTPOk.__name__}
+ )
+
+ # Should return 500 since our handler raised an exception
+ assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ # Check that the response is properly enveloped
+ payload = await response.json()
+ assert is_enveloped(payload)
+
+ # Verify error details
+ data, error = unwrap_envelope(payload)
+ assert not data
+ assert error
+ assert error.get("status") == status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ # Make sure there are no detailed error logs in production mode
+ assert not error.get("errors")
+ assert not error.get("logs")
diff --git a/packages/service-library/tests/aiohttp/test_rest_responses.py b/packages/service-library/tests/aiohttp/test_rest_responses.py
index 8c80f86b2cdf..25cb9e3025d1 100644
--- a/packages/service-library/tests/aiohttp/test_rest_responses.py
+++ b/packages/service-library/tests/aiohttp/test_rest_responses.py
@@ -62,7 +62,6 @@ def test_collected_http_errors_map(status_code: int, http_error_cls: type[HTTPEr
assert issubclass(http_error_cls, HTTPError)
-@pytest.mark.parametrize("skip_details", [True, False])
@pytest.mark.parametrize("error_code", [None, create_error_code(Exception("fake"))])
@pytest.mark.parametrize(
"http_error_cls",
@@ -88,7 +87,7 @@ def test_collected_http_errors_map(status_code: int, http_error_cls: type[HTTPEr
],
)
def tests_exception_to_response(
- skip_details: bool, error_code: ErrorCodeStr | None, http_error_cls: type[HTTPError]
+ error_code: ErrorCodeStr | None, http_error_cls: type[HTTPError]
):
expected_status_reason = "SHORT REASON"
expected_error_message = "Something whent wrong !"
@@ -99,8 +98,6 @@ def tests_exception_to_response(
error_message=expected_error_message,
status_reason=expected_status_reason,
http_error_cls=http_error_cls,
- skip_internal_error_details=skip_details
- and (http_error_cls == web.HTTPInternalServerError),
error_code=error_code,
)
diff --git a/packages/service-library/tests/aiohttp/test_tracing.py b/packages/service-library/tests/aiohttp/test_tracing.py
index 2621751f344a..c9fb30d7de85 100644
--- a/packages/service-library/tests/aiohttp/test_tracing.py
+++ b/packages/service-library/tests/aiohttp/test_tracing.py
@@ -3,15 +3,19 @@
# pylint: disable=unused-variable
import importlib
-from collections.abc import Callable, Iterator
+from collections.abc import Callable
+from functools import partial
from typing import Any
import pip
import pytest
from aiohttp import web
from aiohttp.test_utils import TestClient
+from opentelemetry import trace
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from pydantic import ValidationError
from servicelib.aiohttp.tracing import get_tracing_lifespan
+from servicelib.tracing import _OSPARC_TRACE_ID_HEADER
from settings_library.tracing import TracingSettings
@@ -51,16 +55,16 @@ def set_and_clean_settings_env_vars(
indirect=True,
)
async def test_valid_tracing_settings(
+ mock_otel_collector: InMemorySpanExporter,
aiohttp_client: Callable,
set_and_clean_settings_env_vars: Callable,
tracing_settings_in,
- uninstrument_opentelemetry: Iterator[None],
) -> TestClient:
app = web.Application()
service_name = "simcore_service_webserver"
tracing_settings = TracingSettings()
async for _ in get_tracing_lifespan(
- app, service_name=service_name, tracing_settings=tracing_settings
+ app=app, service_name=service_name, tracing_settings=tracing_settings
)(app):
pass
@@ -75,10 +79,10 @@ async def test_valid_tracing_settings(
indirect=True,
)
async def test_invalid_tracing_settings(
+ mock_otel_collector: InMemorySpanExporter,
aiohttp_client: Callable,
set_and_clean_settings_env_vars: Callable,
tracing_settings_in,
- uninstrument_opentelemetry: Iterator[None],
) -> TestClient:
with pytest.raises(ValidationError):
TracingSettings()
@@ -124,11 +128,11 @@ def manage_package(request):
indirect=True,
)
async def test_tracing_setup_package_detection(
+ mock_otel_collector: InMemorySpanExporter,
aiohttp_client: Callable,
set_and_clean_settings_env_vars: Callable[[], None],
tracing_settings_in: Callable[[], dict[str, Any]],
manage_package,
- uninstrument_opentelemetry: Iterator[None],
):
package_name = manage_package
importlib.import_module(package_name)
@@ -137,14 +141,63 @@ async def test_tracing_setup_package_detection(
service_name = "simcore_service_webserver"
tracing_settings = TracingSettings()
async for _ in get_tracing_lifespan(
- app,
+ app=app,
service_name=service_name,
tracing_settings=tracing_settings,
)(app):
# idempotency
async for _ in get_tracing_lifespan(
- app,
+ app=app,
service_name=service_name,
tracing_settings=tracing_settings,
)(app):
pass
+
+
+@pytest.mark.parametrize(
+ "tracing_settings_in",
+ [
+ ("http://opentelemetry-collector", 4318),
+ ],
+ indirect=True,
+)
+@pytest.mark.parametrize(
+ "server_response", [web.Response(text="Hello, world!"), web.HTTPNotFound()]
+)
+async def test_trace_id_in_response_header(
+ mock_otel_collector: InMemorySpanExporter,
+ aiohttp_client: Callable,
+ set_and_clean_settings_env_vars: Callable,
+ tracing_settings_in,
+ server_response: web.Response | web.HTTPException,
+) -> None:
+ app = web.Application()
+ service_name = "simcore_service_webserver"
+ tracing_settings = TracingSettings()
+
+ async def handler(handler_data: dict, request: web.Request) -> web.Response:
+ current_span = trace.get_current_span()
+ handler_data[_OSPARC_TRACE_ID_HEADER] = format(
+ current_span.get_span_context().trace_id, "032x"
+ )
+ if isinstance(server_response, web.HTTPException):
+ raise server_response
+ return server_response
+
+ handler_data = dict()
+ app.router.add_get("/", partial(handler, handler_data))
+
+ async for _ in get_tracing_lifespan(
+ app=app,
+ service_name=service_name,
+ tracing_settings=tracing_settings,
+ add_response_trace_id_header=True,
+ )(app):
+ client = await aiohttp_client(app)
+ response = await client.get("/")
+ assert _OSPARC_TRACE_ID_HEADER in response.headers
+ trace_id = response.headers[_OSPARC_TRACE_ID_HEADER]
+ assert len(trace_id) == 32 # Ensure trace ID is a 32-character hex string
+ assert (
+ trace_id == handler_data[_OSPARC_TRACE_ID_HEADER]
+ ) # Ensure trace IDs match
diff --git a/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py b/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py
index 1a2d453b4e6b..6a141f50a29b 100644
--- a/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py
+++ b/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py
@@ -78,7 +78,7 @@ async def test_create_pg_engine(postgres_service_with_fake_data: DataSourceName)
dsn = postgres_service_with_fake_data
# using raw call and dsn.asdict to fill create_engine arguments!
- engine1 = await aiopg.sa.create_engine(minsize=1, maxsize=5, **asdict(dsn))
+ engine1 = await aiopg.sa.create_engine(minsize=2, maxsize=5, **asdict(dsn))
# just creating engine
engine2 = await create_pg_engine(dsn)
@@ -114,7 +114,7 @@ async def test_engine_when_idle_for_some_time():
database="db",
application_name="test-app",
)
- engine = await create_pg_engine(dsn, minsize=1, maxsize=1)
+ engine = await create_pg_engine(dsn, minsize=2, maxsize=2)
init_pg_tables(dsn, metadata)
assert not engine.closed # does not mean anything!!!
# pylint: disable=no-value-for-parameter
diff --git a/packages/service-library/tests/conftest.py b/packages/service-library/tests/conftest.py
index 979a3731071d..f06739423b20 100644
--- a/packages/service-library/tests/conftest.py
+++ b/packages/service-library/tests/conftest.py
@@ -1,10 +1,12 @@
+# pylint: disable=contextmanager-generator-missing-cleanup
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-import
+import asyncio
import sys
from collections.abc import AsyncIterable, AsyncIterator, Callable
-from contextlib import AbstractAsyncContextManager, asynccontextmanager
+from contextlib import AbstractAsyncContextManager, asynccontextmanager, suppress
from copy import deepcopy
from pathlib import Path
from typing import Any
@@ -12,17 +14,19 @@
import pytest
import servicelib
from faker import Faker
-from pytest_mock import MockerFixture
from servicelib.redis import RedisClientSDK, RedisClientsManager, RedisManagerDBConfig
from settings_library.redis import RedisDatabase, RedisSettings
pytest_plugins = [
+ "pytest_simcore.asyncio_event_loops",
"pytest_simcore.docker_compose",
"pytest_simcore.docker_registry",
"pytest_simcore.docker_swarm",
"pytest_simcore.docker",
"pytest_simcore.environment_configs",
"pytest_simcore.file_extra",
+ "pytest_simcore.logging",
+ "pytest_simcore.long_running_tasks",
"pytest_simcore.pytest_global_environs",
"pytest_simcore.rabbit_service",
"pytest_simcore.redis_service",
@@ -67,12 +71,10 @@ def fake_data_dict(faker: Faker) -> dict[str, Any]:
return data
-@pytest.fixture
-async def get_redis_client_sdk(
- mock_redis_socket_timeout: None,
- mocker: MockerFixture,
- redis_service: RedisSettings,
-) -> AsyncIterable[
+@asynccontextmanager
+async def _get_redis_client_sdk(
+ redis_settings: RedisSettings,
+) -> AsyncIterator[
Callable[[RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]]
]:
@asynccontextmanager
@@ -80,17 +82,19 @@ async def _(
database: RedisDatabase,
decode_response: bool = True, # noqa: FBT002
) -> AsyncIterator[RedisClientSDK]:
- redis_resources_dns = redis_service.build_redis_dsn(database)
+ redis_resources_dns = redis_settings.build_redis_dsn(database)
client = RedisClientSDK(
redis_resources_dns, decode_responses=decode_response, client_name="pytest"
)
+ await client.setup()
assert client
assert client.redis_dsn == redis_resources_dns
assert client.client_name == "pytest"
yield client
- await client.shutdown()
+ with suppress(TimeoutError):
+ await asyncio.wait_for(client.shutdown(), timeout=5.0)
async def _cleanup_redis_data(clients_manager: RedisClientsManager) -> None:
for db in RedisDatabase:
@@ -98,7 +102,7 @@ async def _cleanup_redis_data(clients_manager: RedisClientsManager) -> None:
async with RedisClientsManager(
{RedisManagerDBConfig(database=db) for db in RedisDatabase},
- redis_service,
+ redis_settings,
client_name="pytest",
) as clients_manager:
await _cleanup_redis_data(clients_manager)
@@ -106,58 +110,21 @@ async def _cleanup_redis_data(clients_manager: RedisClientsManager) -> None:
await _cleanup_redis_data(clients_manager)
-@pytest.fixture()
-def uninstrument_opentelemetry():
- yield
- try:
- from opentelemetry.instrumentation.redis import RedisInstrumentor
-
- RedisInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.botocore import BotocoreInstrumentor
-
- BotocoreInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.requests import RequestsInstrumentor
-
- RequestsInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.aiopg import AiopgInstrumentor
-
- AiopgInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor
-
- AsyncPGInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
-
- FastAPIInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.aiohttp_client import (
- AioHttpClientInstrumentor,
- )
+@pytest.fixture
+async def get_redis_client_sdk(
+ use_in_memory_redis: RedisSettings,
+) -> AsyncIterable[
+ Callable[[RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]]
+]:
+ async with _get_redis_client_sdk(use_in_memory_redis) as client:
+ yield client
- AioHttpClientInstrumentor().uninstrument()
- except ImportError:
- pass
- try:
- from opentelemetry.instrumentation.aiohttp_server import (
- AioHttpServerInstrumentor,
- )
- AioHttpServerInstrumentor().uninstrument()
- except ImportError:
- pass
+@pytest.fixture
+async def get_in_process_redis_client_sdk(
+ redis_service: RedisSettings,
+) -> AsyncIterable[
+ Callable[[RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]]
+]:
+ async with _get_redis_client_sdk(redis_service) as client:
+ yield client
diff --git a/packages/service-library/tests/deferred_tasks/conftest.py b/packages/service-library/tests/deferred_tasks/conftest.py
index 00881e614715..e5d8849e7e17 100644
--- a/packages/service-library/tests/deferred_tasks/conftest.py
+++ b/packages/service-library/tests/deferred_tasks/conftest.py
@@ -8,11 +8,11 @@
@pytest.fixture
async def redis_client_sdk_deferred_tasks(
- get_redis_client_sdk: Callable[
+ get_in_process_redis_client_sdk: Callable[
[RedisDatabase, bool], AbstractAsyncContextManager[RedisClientSDK]
- ]
+ ],
) -> AsyncIterator[RedisClientSDK]:
- async with get_redis_client_sdk(
+ async with get_in_process_redis_client_sdk(
RedisDatabase.DEFERRED_TASKS, decode_response=False
) as client:
yield client
diff --git a/packages/service-library/tests/deferred_tasks/example_app.py b/packages/service-library/tests/deferred_tasks/example_app.py
index 9adb654e8964..991aa2efe8e2 100644
--- a/packages/service-library/tests/deferred_tasks/example_app.py
+++ b/packages/service-library/tests/deferred_tasks/example_app.py
@@ -95,6 +95,7 @@ def __init__(
)
async def setup(self) -> None:
+ await self._redis_client.setup()
await self._manager.setup()
diff --git a/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py b/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py
index cc19133b6b29..d25c7824752c 100644
--- a/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py
+++ b/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py
@@ -34,7 +34,6 @@
pytest_simcore_core_services_selection = [
"rabbit",
- "redis",
]
@@ -43,20 +42,23 @@ class MockKeys(StrAutoEnum):
GET_TIMEOUT = auto()
START_DEFERRED = auto()
ON_DEFERRED_CREATED = auto()
- RUN_DEFERRED = auto()
+ RUN_DEFERRED_BEFORE_HANDLER = auto()
+ RUN_DEFERRED_AFTER_HANDLER = auto()
ON_DEFERRED_RESULT = auto()
ON_FINISHED_WITH_ERROR = auto()
+ ON_CANCELLED = auto()
@pytest.fixture
async def redis_client_sdk(
- redis_service: RedisSettings,
+ use_in_memory_redis: RedisSettings,
) -> AsyncIterable[RedisClientSDK]:
sdk = RedisClientSDK(
- redis_service.build_redis_dsn(RedisDatabase.DEFERRED_TASKS),
+ use_in_memory_redis.build_redis_dsn(RedisDatabase.DEFERRED_TASKS),
decode_responses=False,
client_name="pytest",
)
+ await sdk.setup()
yield sdk
await sdk.shutdown()
@@ -122,14 +124,19 @@ async def on_created(
@classmethod
async def run(cls, context: DeferredContext) -> Any:
+ mocks[MockKeys.RUN_DEFERRED_BEFORE_HANDLER](context)
result = await run(context)
- mocks[MockKeys.RUN_DEFERRED](context)
+ mocks[MockKeys.RUN_DEFERRED_AFTER_HANDLER](context)
return result
@classmethod
async def on_result(cls, result: Any, context: DeferredContext) -> None:
mocks[MockKeys.ON_DEFERRED_RESULT](result, context)
+ @classmethod
+ async def on_cancelled(cls, context: DeferredContext) -> None:
+ mocks[MockKeys.ON_CANCELLED](context)
+
@classmethod
async def on_finished_with_error(
cls, error: TaskResultError, context: DeferredContext
@@ -229,8 +236,8 @@ async def _run_ok(_: DeferredContext) -> Any:
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1)
assert TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0])
- await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=1)
- mocks[MockKeys.RUN_DEFERRED].assert_called_once_with(context)
+ await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED_AFTER_HANDLER, count=1)
+ mocks[MockKeys.RUN_DEFERRED_AFTER_HANDLER].assert_called_once_with(context)
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=1)
mocks[MockKeys.ON_DEFERRED_RESULT].assert_called_once_with(run_return, context)
@@ -282,7 +289,7 @@ async def _run_raises(_: DeferredContext) -> None:
count=retry_count,
)
- await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0)
+ await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED_AFTER_HANDLER, count=0)
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0)
await _assert_log_message(
@@ -319,8 +326,10 @@ async def _run_to_cancel(_: DeferredContext) -> None:
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1)
task_uid = TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0])
+ await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED_BEFORE_HANDLER, count=1)
await mocked_deferred_handler.cancel(task_uid)
+ await _assert_mock_call(mocks, key=MockKeys.ON_CANCELLED, count=1)
await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=0)
assert (
@@ -330,7 +339,7 @@ async def _run_to_cancel(_: DeferredContext) -> None:
== 0
)
- await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0)
+ await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED_AFTER_HANDLER, count=0)
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0)
await _assert_log_message(
@@ -450,7 +459,7 @@ async def _run_that_times_out(_: DeferredContext) -> None:
for entry in mocks[MockKeys.ON_FINISHED_WITH_ERROR].call_args_list:
assert "builtins.TimeoutError" in entry.args[0].error
- await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0)
+ await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED_AFTER_HANDLER, count=0)
await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0)
diff --git a/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py b/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py
index 366759e22d3b..515ed901e98d 100644
--- a/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py
+++ b/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py
@@ -22,7 +22,8 @@ def task_schedule() -> TaskScheduleModel:
return TypeAdapter(TaskScheduleModel).validate_python(
{
"timeout": timedelta(seconds=1),
- "execution_attempts": 1,
+ "total_attempts": 1,
+ "execution_attempts": 2,
"class_unique_reference": "mock",
"start_context": {},
"state": TaskState.SCHEDULED,
diff --git a/packages/service-library/tests/deferred_tasks/test__utils.py b/packages/service-library/tests/deferred_tasks/test__utils.py
index db3f32554b32..8ac30f83a720 100644
--- a/packages/service-library/tests/deferred_tasks/test__utils.py
+++ b/packages/service-library/tests/deferred_tasks/test__utils.py
@@ -57,7 +57,7 @@ async def _() -> AsyncIterator[RabbitBroker]:
@pytest.fixture
def rabbit_exchange() -> RabbitExchange:
- return RabbitExchange("test_exchange")
+ return RabbitExchange("test_exchange", durable=True, auto_delete=True)
async def _assert_call_count(
@@ -256,7 +256,12 @@ async def test_fan_out_exchange_message_delivery(
handler_1_call_count = Mock()
handler_2_call_count = Mock()
- fan_out_exchange = RabbitExchange("test_fan_out_exchange", type=ExchangeType.FANOUT)
+ fan_out_exchange = RabbitExchange(
+ "test_fan_out_exchange",
+ type=ExchangeType.FANOUT,
+ durable=True,
+ auto_delete=True,
+ )
@rabbit_broker.subscriber(queue="handler_1", exchange=fan_out_exchange, retry=True)
async def handler_1(sleep_duration: float) -> None:
diff --git a/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py b/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py
index 7d11d2571539..0bb6254542e9 100644
--- a/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py
+++ b/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py
@@ -3,7 +3,6 @@
import asyncio
import contextlib
-import datetime
import itertools
import json
import random
@@ -19,7 +18,6 @@
from common_library.json_serialization import json_dumps
from common_library.serialization import model_dump_with_secrets
from pydantic import NonNegativeFloat, NonNegativeInt
-from pytest_mock import MockerFixture
from servicelib.rabbitmq import RabbitMQClient
from servicelib.redis import RedisClientSDK
from servicelib.sequences_utils import partition_gen
@@ -333,8 +331,7 @@ async def rabbit_client(
class ClientWithPingProtocol(Protocol):
- async def ping(self) -> bool:
- ...
+ async def ping(self) -> bool: ...
class ServiceManager:
@@ -386,19 +383,10 @@ async def pause_redis(self) -> AsyncIterator[None]:
yield
-@pytest.fixture
-def mock_default_socket_timeout(mocker: MockerFixture) -> None:
- mocker.patch(
- "servicelib.redis._client.DEFAULT_SOCKET_TIMEOUT",
- datetime.timedelta(seconds=0.25),
- )
-
-
@pytest.mark.parametrize("max_workers", [10])
@pytest.mark.parametrize("deferred_tasks_to_start", [100])
@pytest.mark.parametrize("service", ["rabbit", "redis"])
async def test_workflow_with_third_party_services_outages(
- mock_default_socket_timeout: None,
paused_container: Callable[[str], AbstractAsyncContextManager[None]],
redis_client_sdk_deferred_tasks: RedisClientSDK,
rabbit_client: RabbitMQClient,
diff --git a/packages/service-library/tests/fastapi/conftest.py b/packages/service-library/tests/fastapi/conftest.py
index f8811ca04f5a..9074321fa9b3 100644
--- a/packages/service-library/tests/fastapi/conftest.py
+++ b/packages/service-library/tests/fastapi/conftest.py
@@ -11,7 +11,10 @@
from fastapi import APIRouter, FastAPI
from fastapi.params import Query
from httpx import ASGITransport, AsyncClient
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from pydantic.types import PositiveFloat
+from pytest_mock import MockerFixture
@pytest.fixture
@@ -55,3 +58,13 @@ def go() -> int:
return cast(int, s.getsockname()[1])
return go
+
+
+@pytest.fixture
+def mock_otel_collector(mocker: MockerFixture) -> InMemorySpanExporter:
+ memory_exporter = InMemorySpanExporter()
+ span_processor = SimpleSpanProcessor(memory_exporter)
+ mocker.patch(
+ "servicelib.fastapi.tracing._create_span_processor", return_value=span_processor
+ )
+ return memory_exporter
diff --git a/packages/service-library/tests/fastapi/long_running_tasks/conftest.py b/packages/service-library/tests/fastapi/long_running_tasks/conftest.py
index d43a7e445c10..f10a27c322ac 100644
--- a/packages/service-library/tests/fastapi/long_running_tasks/conftest.py
+++ b/packages/service-library/tests/fastapi/long_running_tasks/conftest.py
@@ -9,13 +9,24 @@
from fastapi import FastAPI
from httpx import ASGITransport, AsyncClient
from servicelib.fastapi import long_running_tasks
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
@pytest.fixture
-async def bg_task_app(router_prefix: str) -> FastAPI:
+async def bg_task_app(
+ router_prefix: str, redis_service: RedisSettings, rabbit_service: RabbitSettings
+) -> FastAPI:
app = FastAPI()
- long_running_tasks.server.setup(app, router_prefix=router_prefix)
+ long_running_tasks.server.setup(
+ app,
+ redis_settings=redis_service,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
+ router_prefix=router_prefix,
+ )
return app
@@ -27,3 +38,14 @@ async def async_client(bg_task_app: FastAPI) -> AsyncIterable[AsyncClient]:
headers={"Content-Type": "application/json"},
) as client:
yield client
+
+
+@pytest.fixture
+async def rabbitmq_rpc_client(
+ rabbit_service: RabbitSettings,
+) -> AsyncIterable[RabbitMQRPCClient]:
+ client = await RabbitMQRPCClient.create(
+ client_name="test-lrt-rpc-client", settings=rabbit_service
+ )
+ yield client
+ await client.close()
diff --git a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_client.py b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_client.py
index 02d392126cbf..42f76a58f724 100644
--- a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_client.py
+++ b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_client.py
@@ -8,7 +8,7 @@
@pytest.mark.parametrize(
"error_class, error_args",
[
- (HTTPError, dict(message="")),
+ (HTTPError, {"message": ""}),
],
)
async def test_retry_on_errors(
diff --git a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py
index 84146c6b0dc6..1b72713dbd5c 100644
--- a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py
+++ b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py
@@ -12,27 +12,48 @@
import asyncio
import json
from collections.abc import AsyncIterator, Awaitable, Callable
-from typing import Final
+from typing import Annotated, Final
import pytest
from asgi_lifespan import LifespanManager
from fastapi import APIRouter, Depends, FastAPI, status
from httpx import AsyncClient
from pydantic import TypeAdapter
-from servicelib.fastapi import long_running_tasks
-from servicelib.long_running_tasks._models import TaskGet, TaskId
-from servicelib.long_running_tasks._task import TaskContext
+from servicelib.fastapi.long_running_tasks._manager import FastAPILongRunningManager
+from servicelib.fastapi.long_running_tasks.client import setup as setup_client
+from servicelib.fastapi.long_running_tasks.server import (
+ get_long_running_manager,
+)
+from servicelib.fastapi.long_running_tasks.server import setup as setup_server
+from servicelib.long_running_tasks import lrt_api
+from servicelib.long_running_tasks.models import (
+ TaskGet,
+ TaskId,
+ TaskProgress,
+ TaskStatus,
+)
+from servicelib.long_running_tasks.task import TaskContext, TaskRegistry
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
from tenacity.asyncio import AsyncRetrying
from tenacity.retry import retry_if_exception_type
from tenacity.stop import stop_after_delay
from tenacity.wait import wait_fixed
from yarl import URL
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
+
ITEM_PUBLISH_SLEEP: Final[float] = 0.1
+class _TestingError(Exception):
+ pass
+
+
async def _string_list_task(
- task_progress: long_running_tasks.server.TaskProgress,
+ progress: TaskProgress,
num_strings: int,
sleep_time: float,
fail: bool,
@@ -41,13 +62,17 @@ async def _string_list_task(
for index in range(num_strings):
generated_strings.append(f"{index}")
await asyncio.sleep(sleep_time)
- task_progress.update(message="generated item", percent=index / num_strings)
+ await progress.update(message="generated item", percent=index / num_strings)
if fail:
- raise RuntimeError("We were asked to fail!!")
+ msg = "We were asked to fail!!"
+ raise _TestingError(msg)
return generated_strings
+TaskRegistry.register(_string_list_task, allowed_errors=(_TestingError,))
+
+
@pytest.fixture
def server_routes() -> APIRouter:
routes = APIRouter()
@@ -58,31 +83,41 @@ def server_routes() -> APIRouter:
async def create_string_list_task(
num_strings: int,
sleep_time: float,
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
+ *,
fail: bool = False,
- task_manager: long_running_tasks.server.TasksManager = Depends(
- long_running_tasks.server.get_tasks_manager
- ),
- ) -> long_running_tasks.server.TaskId:
- task_id = long_running_tasks.server.start_task(
- task_manager,
- _string_list_task,
+ ) -> TaskId:
+ return await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ _string_list_task.__name__,
num_strings=num_strings,
sleep_time=sleep_time,
fail=fail,
)
- return task_id
return routes
@pytest.fixture
-async def app(server_routes: APIRouter) -> AsyncIterator[FastAPI]:
+async def app(
+ server_routes: APIRouter,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
+) -> AsyncIterator[FastAPI]:
# overrides fastapi/conftest.py:app
app = FastAPI(title="test app")
app.include_router(server_routes)
- long_running_tasks.server.setup(app)
- long_running_tasks.client.setup(app)
- async with LifespanManager(app):
+ setup_server(
+ app,
+ redis_settings=use_in_memory_redis,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
+ )
+ setup_client(app)
+ async with LifespanManager(app, startup_timeout=30, shutdown_timeout=30):
yield app
@@ -94,10 +129,7 @@ async def _caller(app: FastAPI, client: AsyncClient, **query_kwargs) -> TaskId:
)
resp = await client.post(f"{url}")
assert resp.status_code == status.HTTP_202_ACCEPTED
- task_id = TypeAdapter(long_running_tasks.server.TaskId).validate_python(
- resp.json()
- )
- return task_id
+ return TypeAdapter(TaskId).validate_python(resp.json())
return _caller
@@ -124,9 +156,7 @@ async def _waiter(
with attempt:
result = await client.get(f"{status_url}")
assert result.status_code == status.HTTP_200_OK
- task_status = long_running_tasks.server.TaskStatus.model_validate(
- result.json()
- )
+ task_status = TaskStatus.model_validate(result.json())
assert task_status
assert task_status.done
@@ -151,9 +181,7 @@ async def test_workflow(
with attempt:
result = await client.get(f"{status_url}")
assert result.status_code == status.HTTP_200_OK
- task_status = long_running_tasks.server.TaskStatus.model_validate(
- result.json()
- )
+ task_status = TaskStatus.model_validate(result.json())
assert task_status
progress_updates.append(
(task_status.task_progress.message, task_status.task_progress.percent)
@@ -194,7 +222,7 @@ async def test_workflow(
[
("GET", "get_task_status"),
("GET", "get_task_result"),
- ("DELETE", "cancel_and_delete_task"),
+ ("DELETE", "remove_task"),
],
)
async def test_get_task_wrong_task_id_raises_not_found(
@@ -218,7 +246,8 @@ async def test_failing_task_returns_error(
await wait_for_task(app, client, task_id, {})
# get the result
result_url = app.url_path_for("get_task_result", task_id=task_id)
- with pytest.raises(RuntimeError) as exec_info:
+
+ with pytest.raises(_TestingError) as exec_info:
await client.get(f"{result_url}")
assert f"{exec_info.value}" == "We were asked to fail!!"
@@ -243,7 +272,7 @@ async def test_cancel_task(
task_id = await start_long_running_task(app, client)
# cancel the task
- delete_url = app.url_path_for("cancel_and_delete_task", task_id=task_id)
+ delete_url = app.url_path_for("remove_task", task_id=task_id)
result = await client.delete(f"{delete_url}")
assert result.status_code == status.HTTP_204_NO_CONTENT
diff --git a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py
index b0db697a6ad9..aeab8876dd9c 100644
--- a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py
+++ b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py
@@ -3,7 +3,7 @@
import asyncio
from collections.abc import AsyncIterable
-from typing import Final
+from typing import Annotated, Final
import pytest
from asgi_lifespan import LifespanManager
@@ -11,26 +11,33 @@
from httpx import AsyncClient
from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter
from servicelib.fastapi.long_running_tasks._context_manager import _ProgressManager
+from servicelib.fastapi.long_running_tasks._manager import FastAPILongRunningManager
from servicelib.fastapi.long_running_tasks.client import (
- Client,
- ProgressMessage,
- ProgressPercent,
+ HttpClient,
periodic_task_result,
)
from servicelib.fastapi.long_running_tasks.client import setup as setup_client
-from servicelib.fastapi.long_running_tasks.server import (
- TaskId,
- TaskProgress,
- TasksManager,
- get_tasks_manager,
-)
+from servicelib.fastapi.long_running_tasks.server import get_long_running_manager
from servicelib.fastapi.long_running_tasks.server import setup as setup_server
-from servicelib.fastapi.long_running_tasks.server import (
- start_task,
-)
-from servicelib.long_running_tasks._errors import (
+from servicelib.long_running_tasks import lrt_api
+from servicelib.long_running_tasks.errors import (
+ GenericClientError,
TaskClientTimeoutError,
+ TaskExceptionError,
)
+from servicelib.long_running_tasks.models import (
+ ProgressMessage,
+ ProgressPercent,
+ TaskId,
+ TaskProgress,
+)
+from servicelib.long_running_tasks.task import TaskRegistry
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
TASK_SLEEP_INTERVAL: Final[PositiveFloat] = 0.1
@@ -38,21 +45,33 @@
async def _assert_task_removed(
- async_client: AsyncClient, task_id: TaskId, router_prefix: str
+ http_client: HttpClient, task_id: TaskId, router_prefix: str
) -> None:
- result = await async_client.get(f"{router_prefix}/tasks/{task_id}")
- assert result.status_code == status.HTTP_404_NOT_FOUND
+ with pytest.raises(GenericClientError, match=f"No task with {task_id} found"):
+ await http_client.get_task_status(task_id)
-async def a_test_task(task_progress: TaskProgress) -> int:
+async def a_test_task(progress: TaskProgress) -> int:
+ _ = progress
await asyncio.sleep(TASK_SLEEP_INTERVAL)
return 42
-async def a_failing_test_task(task_progress: TaskProgress) -> None:
+TaskRegistry.register(a_test_task)
+
+
+class _TestingError(Exception):
+ pass
+
+
+async def a_failing_test_task(progress: TaskProgress) -> None:
+ _ = progress
await asyncio.sleep(TASK_SLEEP_INTERVAL)
msg = "I am failing as requested"
- raise RuntimeError(msg)
+ raise _TestingError(msg)
+
+
+TaskRegistry.register(a_failing_test_task)
@pytest.fixture
@@ -61,33 +80,52 @@ def user_routes() -> APIRouter:
@router.get("/api/success", status_code=status.HTTP_200_OK)
async def create_task_user_defined_route(
- tasks_manager: TasksManager = Depends(get_tasks_manager),
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
) -> TaskId:
- task_id = start_task(tasks_manager, task=a_test_task)
- return task_id
+ return await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_test_task.__name__,
+ )
@router.get("/api/failing", status_code=status.HTTP_200_OK)
async def create_task_which_fails(
- task_manager: TasksManager = Depends(get_tasks_manager),
+ long_running_manager: Annotated[
+ FastAPILongRunningManager, Depends(get_long_running_manager)
+ ],
) -> TaskId:
- task_id = start_task(task_manager, task=a_failing_test_task)
- return task_id
+ return await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_failing_test_task.__name__,
+ )
return router
@pytest.fixture
async def bg_task_app(
- user_routes: APIRouter, router_prefix: str
+ user_routes: APIRouter,
+ router_prefix: str,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
) -> AsyncIterable[FastAPI]:
app = FastAPI()
app.include_router(user_routes)
- setup_server(app, router_prefix=router_prefix)
+ setup_server(
+ app,
+ router_prefix=router_prefix,
+ redis_settings=use_in_memory_redis,
+ rabbit_settings=rabbit_service,
+ lrt_namespace="test",
+ )
setup_client(app, router_prefix=router_prefix)
- async with LifespanManager(app):
+ async with LifespanManager(app, startup_timeout=30, shutdown_timeout=30):
yield app
@@ -96,39 +134,48 @@ def mock_task_id() -> TaskId:
return TypeAdapter(TaskId).validate_python("fake_task_id")
+@pytest.fixture()
+def http_client(bg_task_app: FastAPI, async_client: AsyncClient) -> HttpClient:
+ url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/")
+ return HttpClient(app=bg_task_app, async_client=async_client, base_url=f"{url}")
+
+
+async def _create_and_get_taskid(async_client: AsyncClient, *, endpoint: str) -> TaskId:
+ result = await async_client.get(f"/api/{endpoint}")
+ assert result.status_code == status.HTTP_200_OK, result.text
+ task_id: TaskId = result.json()
+ return task_id
+
+
async def test_task_result(
- bg_task_app: FastAPI, async_client: AsyncClient, router_prefix: str
+ async_client: AsyncClient,
+ http_client: HttpClient,
+ router_prefix: str,
) -> None:
- result = await async_client.get("/api/success")
- assert result.status_code == status.HTTP_200_OK, result.text
- task_id = result.json()
+ task_id = await _create_and_get_taskid(async_client, endpoint="success")
- url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/")
- client = Client(app=bg_task_app, async_client=async_client, base_url=url)
async with periodic_task_result(
- client,
+ http_client,
task_id,
task_timeout=10,
status_poll_interval=TASK_SLEEP_INTERVAL / 3,
) as result:
assert result == 42
- await _assert_task_removed(async_client, task_id, router_prefix)
+ await _assert_task_removed(http_client, task_id, router_prefix)
async def test_task_result_times_out(
- bg_task_app: FastAPI, async_client: AsyncClient, router_prefix: str
+ async_client: AsyncClient,
+ http_client: HttpClient,
+ router_prefix: str,
) -> None:
- result = await async_client.get("/api/success")
- assert result.status_code == status.HTTP_200_OK, result.text
- task_id = result.json()
+ task_id = await _create_and_get_taskid(async_client, endpoint="success")
- url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/")
- client = Client(app=bg_task_app, async_client=async_client, base_url=url)
timeout = TASK_SLEEP_INTERVAL / 10
with pytest.raises(TaskClientTimeoutError) as exec_info:
async with periodic_task_result(
- client,
+ http_client,
task_id,
task_timeout=timeout,
status_poll_interval=TASK_SLEEP_INTERVAL / 3,
@@ -139,27 +186,26 @@ async def test_task_result_times_out(
== f"Timed out after {timeout} seconds while awaiting '{task_id}' to complete"
)
- await _assert_task_removed(async_client, task_id, router_prefix)
+ await _assert_task_removed(http_client, task_id, router_prefix)
async def test_task_result_task_result_is_an_error(
- bg_task_app: FastAPI, async_client: AsyncClient, router_prefix: str
+ bg_task_app: FastAPI,
+ async_client: AsyncClient,
+ http_client: HttpClient,
+ router_prefix: str,
) -> None:
- result = await async_client.get("/api/failing")
- assert result.status_code == status.HTTP_200_OK, result.text
- task_id = result.json()
+ task_id = await _create_and_get_taskid(async_client, endpoint="failing")
- url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/")
- client = Client(app=bg_task_app, async_client=async_client, base_url=url)
- with pytest.raises(RuntimeError, match="I am failing as requested"):
+ with pytest.raises(TaskExceptionError, match="I am failing as requested"):
async with periodic_task_result(
- client,
+ http_client,
task_id,
task_timeout=10,
status_poll_interval=TASK_SLEEP_INTERVAL / 3,
):
pass
- await _assert_task_removed(async_client, task_id, router_prefix)
+ await _assert_task_removed(http_client, task_id, router_prefix)
@pytest.mark.parametrize("repeat", [1, 2, 10])
diff --git a/packages/service-library/tests/fastapi/test_cancellation_middleware.py b/packages/service-library/tests/fastapi/test_cancellation_middleware.py
index add93851f540..1eac0cc1ae63 100644
--- a/packages/service-library/tests/fastapi/test_cancellation_middleware.py
+++ b/packages/service-library/tests/fastapi/test_cancellation_middleware.py
@@ -2,6 +2,7 @@
import asyncio
import logging
+import threading
from collections.abc import Iterator
from threading import Thread
from unittest.mock import AsyncMock
@@ -9,6 +10,7 @@
import httpx
import pytest
import uvicorn
+import uvloop
from fastapi import APIRouter, BackgroundTasks, FastAPI
from pytest_simcore.helpers.logging_tools import log_context
from servicelib.fastapi.cancellation_middleware import RequestCancellationMiddleware
@@ -18,8 +20,11 @@
@pytest.fixture
-def server_done_event() -> asyncio.Event:
- return asyncio.Event()
+def server_done_event() -> threading.Event:
+ # This allows communicate an event between the thread where the server is running
+ # and the test thread. It is used to signal that the server has completed its task
+ # WARNING: do not user asyncio.Event here as it is not thread-safe!
+ return threading.Event()
@pytest.fixture
@@ -29,7 +34,7 @@ def server_cancelled_mock() -> AsyncMock:
@pytest.fixture
def fastapi_router(
- server_done_event: asyncio.Event, server_cancelled_mock: AsyncMock
+ server_done_event: threading.Event, server_cancelled_mock: AsyncMock
) -> APIRouter:
router = APIRouter()
@@ -77,22 +82,29 @@ async def sleep_with_background_task(
def fastapi_app(fastapi_router: APIRouter) -> FastAPI:
app = FastAPI()
app.include_router(fastapi_router)
- app.add_middleware(RequestCancellationMiddleware)
+
+ app.add_middleware(RequestCancellationMiddleware) # Middleware under test
return app
@pytest.fixture
def uvicorn_server(fastapi_app: FastAPI) -> Iterator[URL]:
- random_port = unused_port()
+
+ server_host = "127.0.0.1"
+ server_port = unused_port()
+ server_url = f"http://{server_host}:{server_port}"
+
with log_context(
logging.INFO,
- msg=f"with uvicorn server on 127.0.0.1:{random_port}",
+ msg=f"with uvicorn server on {server_url}",
) as ctx:
+
config = uvicorn.Config(
fastapi_app,
- host="127.0.0.1",
- port=random_port,
+ host=server_host,
+ port=server_port,
log_level="error",
+ loop="uvloop",
)
server = uvicorn.Server(config)
@@ -102,20 +114,16 @@ def uvicorn_server(fastapi_app: FastAPI) -> Iterator[URL]:
@retry(wait=wait_fixed(0.1), stop=stop_after_delay(10), reraise=True)
def wait_for_server_ready() -> None:
- with httpx.Client() as client:
- response = client.get(f"http://127.0.1:{random_port}/")
- assert (
- response.is_success
- ), f"Server did not start successfully: {response.status_code} {response.text}"
+ response = httpx.get(f"{server_url}/")
+ assert (
+ response.is_success
+ ), f"Server did not start successfully: {response.status_code} {response.text}"
wait_for_server_ready()
- ctx.logger.info(
- "server ready at: %s",
- f"http://127.0.0.1:{random_port}",
- )
+ ctx.logger.info("server ready at: %s", server_url)
- yield URL(f"http://127.0.0.1:{random_port}")
+ yield URL(server_url)
server.should_exit = True
thread.join(timeout=10)
@@ -123,43 +131,50 @@ def wait_for_server_ready() -> None:
async def test_server_cancels_when_client_disconnects(
uvicorn_server: URL,
- server_done_event: asyncio.Event,
+ server_done_event: threading.Event,
server_cancelled_mock: AsyncMock,
):
+ # Implementation of RequestCancellationMiddleware is under test here
+ assert isinstance(asyncio.get_running_loop(), uvloop.Loop)
+
async with httpx.AsyncClient(base_url=f"{uvicorn_server}") as client:
- # check standard call still complete as expected
+ # 1. check standard call still complete as expected
with log_context(logging.INFO, msg="client calling endpoint"):
response = await client.get("/sleep", params={"sleep_time": 0.1})
+
assert response.status_code == 200
assert response.json() == {"message": "Slept for 0.1 seconds"}
- async with asyncio.timeout(10):
- await server_done_event.wait()
+
+ server_done_event.wait(10)
server_done_event.clear()
- # check slow call get cancelled
+ # 2. check slow call get cancelled
with log_context(
logging.INFO, msg="client calling endpoint for cancellation"
) as ctx:
with pytest.raises(httpx.ReadTimeout):
- response = await client.get(
- "/sleep", params={"sleep_time": 10}, timeout=0.1
+ await client.get(
+ "/sleep",
+ params={"sleep_time": 10},
+ timeout=0.1, # <--- this will enforce the client to disconnect from the server !
)
ctx.logger.info("client disconnected from server")
- async with asyncio.timeout(5):
- await server_done_event.wait()
+ # request should have been cancelled after the ReadTimoeut!
+ server_done_event.wait(5)
server_cancelled_mock.assert_called_once()
server_cancelled_mock.reset_mock()
server_done_event.clear()
+ # 3. check background tasks get cancelled as well sadly
# NOTE: shows that FastAPI BackgroundTasks get cancelled too!
- # check background tasks get cancelled as well sadly
with log_context(logging.INFO, msg="client calling endpoint for cancellation"):
response = await client.get(
"/sleep-with-background-task",
params={"sleep_time": 2},
)
assert response.status_code == 200
- async with asyncio.timeout(5):
- await server_done_event.wait()
+
+ # request should have been cancelled after the ReadTimoeut!
+ server_done_event.wait(5)
server_cancelled_mock.assert_called_once()
diff --git a/packages/service-library/tests/fastapi/test_postgres_lifespan.py b/packages/service-library/tests/fastapi/test_postgres_lifespan.py
index 0c656c371876..07cc3077df1d 100644
--- a/packages/service-library/tests/fastapi/test_postgres_lifespan.py
+++ b/packages/service-library/tests/fastapi/test_postgres_lifespan.py
@@ -83,7 +83,6 @@ async def test_lifespan_postgres_database_in_an_app(
mock_create_async_engine_and_database_ready: MockType,
app_lifespan: LifespanManager,
):
-
app = FastAPI(lifespan=app_lifespan)
async with ASGILifespanManager(
@@ -93,7 +92,7 @@ async def test_lifespan_postgres_database_in_an_app(
) as asgi_manager:
# Verify that the async engine was created
mock_create_async_engine_and_database_ready.assert_called_once_with(
- app.state.settings.CATALOG_POSTGRES
+ app.state.settings.CATALOG_POSTGRES, app.title
)
# Verify that the async engine is in the lifespan manager state
diff --git a/packages/service-library/tests/fastapi/test_tracing.py b/packages/service-library/tests/fastapi/test_tracing.py
index 8e58dfd75dd9..148f4e43776d 100644
--- a/packages/service-library/tests/fastapi/test_tracing.py
+++ b/packages/service-library/tests/fastapi/test_tracing.py
@@ -4,15 +4,27 @@
import importlib
import random
import string
-from collections.abc import Callable, Iterator
+from collections.abc import Callable
+from functools import partial
from typing import Any
import pip
import pytest
from fastapi import FastAPI
+from fastapi.exceptions import HTTPException
+from fastapi.responses import PlainTextResponse
+from fastapi.testclient import TestClient
+from opentelemetry import trace
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from pydantic import ValidationError
from servicelib.fastapi.tracing import (
get_tracing_instrumentation_lifespan,
+ initialize_fastapi_app_tracing,
+)
+from servicelib.tracing import (
+ _OSPARC_TRACE_ID_HEADER,
+ _PROFILE_ATTRIBUTE_NAME,
+ with_profiled_span,
)
from settings_library.tracing import TracingSettings
@@ -60,9 +72,9 @@ def set_and_clean_settings_env_vars(
)
async def test_valid_tracing_settings(
mocked_app: FastAPI,
+ mock_otel_collector: InMemorySpanExporter,
set_and_clean_settings_env_vars: Callable[[], None],
tracing_settings_in: Callable[[], dict[str, Any]],
- uninstrument_opentelemetry: Iterator[None],
):
tracing_settings = TracingSettings()
async for _ in get_tracing_instrumentation_lifespan(
@@ -94,9 +106,9 @@ async def test_valid_tracing_settings(
)
async def test_invalid_tracing_settings(
mocked_app: FastAPI,
+ mock_otel_collector: InMemorySpanExporter,
set_and_clean_settings_env_vars: Callable[[], None],
tracing_settings_in: Callable[[], dict[str, Any]],
- uninstrument_opentelemetry: Iterator[None],
):
app = mocked_app
with pytest.raises((BaseException, ValidationError, TypeError)): # noqa: PT012
@@ -149,9 +161,9 @@ def manage_package(request):
)
async def test_tracing_setup_package_detection(
mocked_app: FastAPI,
+ mock_otel_collector: InMemorySpanExporter,
set_and_clean_settings_env_vars: Callable[[], None],
tracing_settings_in: Callable[[], dict[str, Any]],
- uninstrument_opentelemetry: Iterator[None],
manage_package,
):
package_name = manage_package
@@ -167,3 +179,108 @@ async def test_tracing_setup_package_detection(
service_name="Mock-Openetlemetry-Pytest",
)(app=mocked_app):
pass
+
+
+@pytest.mark.parametrize(
+ "tracing_settings_in",
+ [
+ ("http://opentelemetry-collector", 4318),
+ ],
+ indirect=True,
+)
+@pytest.mark.parametrize(
+ "server_response",
+ [
+ PlainTextResponse("ok"),
+ HTTPException(status_code=400, detail="error"),
+ ],
+)
+async def test_trace_id_in_response_header(
+ mock_otel_collector: InMemorySpanExporter,
+ mocked_app: FastAPI,
+ set_and_clean_settings_env_vars: Callable,
+ tracing_settings_in: Callable,
+ server_response: PlainTextResponse | HTTPException,
+) -> None:
+ tracing_settings = TracingSettings()
+
+ handler_data = dict()
+
+ async def handler(handler_data: dict):
+ current_span = trace.get_current_span()
+ handler_data[_OSPARC_TRACE_ID_HEADER] = format(
+ current_span.get_span_context().trace_id, "032x"
+ )
+ if isinstance(server_response, HTTPException):
+ raise server_response
+ return server_response
+
+ mocked_app.get("/")(partial(handler, handler_data))
+
+ async for _ in get_tracing_instrumentation_lifespan(
+ tracing_settings=tracing_settings,
+ service_name="Mock-OpenTelemetry-Pytest",
+ )(app=mocked_app):
+ initialize_fastapi_app_tracing(mocked_app, add_response_trace_id_header=True)
+ client = TestClient(mocked_app)
+ response = client.get("/")
+ assert _OSPARC_TRACE_ID_HEADER in response.headers
+ trace_id = response.headers[_OSPARC_TRACE_ID_HEADER]
+ assert len(trace_id) == 32 # Ensure trace ID is a 32-character hex string
+ assert trace_id == handler_data[_OSPARC_TRACE_ID_HEADER]
+
+
+@pytest.mark.parametrize(
+ "tracing_settings_in",
+ [
+ ("http://opentelemetry-collector", 4318),
+ ],
+ indirect=True,
+)
+@pytest.mark.parametrize(
+ "server_response",
+ [
+ PlainTextResponse("ok"),
+ HTTPException(status_code=400, detail="error"),
+ ],
+)
+async def test_with_profile_span(
+ mock_otel_collector: InMemorySpanExporter,
+ mocked_app: FastAPI,
+ set_and_clean_settings_env_vars: Callable[[], None],
+ tracing_settings_in: Callable,
+ server_response: PlainTextResponse | HTTPException,
+):
+ tracing_settings = TracingSettings()
+
+ handler_data = dict()
+
+ @with_profiled_span
+ async def handler(handler_data: dict):
+ current_span = trace.get_current_span()
+ handler_data[_OSPARC_TRACE_ID_HEADER] = format(
+ current_span.get_span_context().trace_id, "032x"
+ )
+ if isinstance(server_response, HTTPException):
+ raise server_response
+ return server_response
+
+ mocked_app.get("/")(partial(handler, handler_data))
+
+ async for _ in get_tracing_instrumentation_lifespan(
+ tracing_settings=tracing_settings,
+ service_name="Mock-OpenTelemetry-Pytest",
+ )(app=mocked_app):
+ initialize_fastapi_app_tracing(mocked_app, add_response_trace_id_header=True)
+ client = TestClient(mocked_app)
+ _ = client.get("/")
+ trace_id = handler_data.get(_OSPARC_TRACE_ID_HEADER)
+ assert trace_id is not None
+
+ spans = mock_otel_collector.get_finished_spans()
+ assert any(
+ span.context.trace_id == int(trace_id, 16)
+ and _PROFILE_ATTRIBUTE_NAME in span.attributes.keys()
+ for span in spans
+ if span.context is not None and span.attributes is not None
+ )
diff --git a/packages/service-library/tests/long_running_tasks/conftest.py b/packages/service-library/tests/long_running_tasks/conftest.py
new file mode 100644
index 000000000000..df4bb2fd9bc2
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/conftest.py
@@ -0,0 +1,85 @@
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+
+import asyncio
+import logging
+from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable
+from datetime import timedelta
+
+import pytest
+from faker import Faker
+from pytest_mock import MockerFixture
+from servicelib.logging_utils import log_catch
+from servicelib.long_running_tasks.manager import (
+ LongRunningManager,
+)
+from servicelib.long_running_tasks.models import LRTNamespace, TaskContext
+from servicelib.long_running_tasks.task import TasksManager
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+from utils import TEST_CHECK_STALE_INTERVAL_S
+
+_logger = logging.getLogger(__name__)
+
+
+class _TestingLongRunningManager(LongRunningManager):
+ @staticmethod
+ def get_task_context(request) -> TaskContext:
+ _ = request
+ return {}
+
+
+@pytest.fixture
+async def get_long_running_manager(
+ fast_long_running_tasks_cancellation: None, faker: Faker
+) -> AsyncIterator[
+ Callable[
+ [RedisSettings, RabbitSettings, LRTNamespace | None],
+ Awaitable[LongRunningManager],
+ ]
+]:
+ managers: list[LongRunningManager] = []
+
+ async def _(
+ redis_settings: RedisSettings,
+ rabbit_settings: RabbitSettings,
+ lrt_namespace: LRTNamespace | None,
+ ) -> LongRunningManager:
+ manager = _TestingLongRunningManager(
+ stale_task_check_interval=timedelta(seconds=TEST_CHECK_STALE_INTERVAL_S),
+ stale_task_detect_timeout=timedelta(seconds=TEST_CHECK_STALE_INTERVAL_S),
+ redis_settings=redis_settings,
+ rabbit_settings=rabbit_settings,
+ lrt_namespace=lrt_namespace or f"test{faker.uuid4()}",
+ )
+ await manager.setup()
+ managers.append(manager)
+ return manager
+
+ yield _
+
+ for manager in managers:
+ with log_catch(_logger, reraise=False):
+ await asyncio.wait_for(manager.teardown(), timeout=5)
+
+
+@pytest.fixture
+async def rabbitmq_rpc_client(
+ rabbit_service: RabbitSettings,
+) -> AsyncIterable[RabbitMQRPCClient]:
+ client = await RabbitMQRPCClient.create(
+ client_name="test-lrt-rpc-client", settings=rabbit_service
+ )
+ yield client
+ await client.close()
+
+
+@pytest.fixture
+def disable_stale_tasks_monitor(mocker: MockerFixture) -> None:
+ # no need to autoremove stale tasks in these tests
+ async def _to_replace(self: TasksManager) -> None:
+ self._started_event_task_stale_tasks_monitor.set()
+
+ mocker.patch.object(TasksManager, "_stale_tasks_monitor", _to_replace)
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks__redis_store.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks__redis_store.py
new file mode 100644
index 000000000000..fc08de586864
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks__redis_store.py
@@ -0,0 +1,117 @@
+# pylint:disable=redefined-outer-name
+
+from collections.abc import AsyncIterable, Callable
+from contextlib import AbstractAsyncContextManager
+from copy import deepcopy
+
+import pytest
+from pydantic import TypeAdapter
+from servicelib.long_running_tasks._redis_store import (
+ _MARKED_FOR_REMOVAL_FIELD,
+ RedisStore,
+)
+from servicelib.long_running_tasks.models import TaskData
+from servicelib.redis._client import RedisClientSDK
+from settings_library.redis import RedisDatabase, RedisSettings
+
+
+def test_ensure_task_data_field_name_and_type():
+ # NOTE: ensure thse do not change, if you want to change them remeber that the db is invalid
+ assert _MARKED_FOR_REMOVAL_FIELD == "marked_for_removal"
+ field = TaskData.model_fields[_MARKED_FOR_REMOVAL_FIELD]
+ assert field.annotation is bool
+
+
+@pytest.fixture
+def task_data() -> TaskData:
+ return TypeAdapter(TaskData).validate_python(
+ TaskData.model_json_schema()["examples"][0]
+ )
+
+
+@pytest.fixture
+async def store(
+ use_in_memory_redis: RedisSettings,
+ get_redis_client_sdk: Callable[
+ [RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]
+ ],
+) -> AsyncIterable[RedisStore]:
+ store = RedisStore(redis_settings=use_in_memory_redis, namespace="test")
+
+ await store.setup()
+ yield store
+ await store.shutdown()
+
+ # triggers cleanup of all redis data
+ async with get_redis_client_sdk(RedisDatabase.LONG_RUNNING_TASKS):
+ pass
+
+
+async def test_workflow(store: RedisStore, task_data: TaskData) -> None:
+ # task data
+ assert await store.list_tasks_data() == []
+ assert await store.get_task_data("missing") is None
+
+ await store.add_task_data(task_data.task_id, task_data)
+
+ assert await store.list_tasks_data() == [task_data]
+
+ await store.delete_task_data(task_data.task_id)
+
+ assert await store.list_tasks_data() == []
+
+ # cancelled tasks
+ await store.add_task_data(task_data.task_id, task_data)
+
+ assert await store.is_marked_for_removal(task_data.task_id) is False
+
+ await store.mark_for_removal(task_data.task_id)
+
+ assert await store.is_marked_for_removal(task_data.task_id) is True
+
+
+@pytest.fixture
+async def redis_stores(
+ use_in_memory_redis: RedisSettings,
+ get_redis_client_sdk: Callable[
+ [RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]
+ ],
+) -> AsyncIterable[list[RedisStore]]:
+ stores: list[RedisStore] = [
+ RedisStore(redis_settings=use_in_memory_redis, namespace=f"test-{i}")
+ for i in range(5)
+ ]
+ for store in stores:
+ await store.setup()
+
+ yield stores
+
+ for store in stores:
+ await store.shutdown()
+
+ # triggers cleanup of all redis data
+ async with get_redis_client_sdk(RedisDatabase.LONG_RUNNING_TASKS):
+ pass
+
+
+async def test_workflow_multiple_redis_stores_with_different_namespaces(
+ redis_stores: list[RedisStore], task_data: TaskData
+):
+
+ for store in redis_stores:
+ assert await store.list_tasks_data() == []
+
+ for store in redis_stores:
+ await store.add_task_data(task_data.task_id, task_data)
+ await store.mark_for_removal(task_data.task_id)
+
+ marked_as_removed_task_data = deepcopy(task_data)
+ marked_as_removed_task_data.marked_for_removal = True
+ for store in redis_stores:
+ assert await store.list_tasks_data() == [marked_as_removed_task_data]
+
+ for store in redis_stores:
+ await store.delete_task_data(task_data.task_id)
+
+ for store in redis_stores:
+ assert await store.list_tasks_data() == []
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks__serialization.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks__serialization.py
new file mode 100644
index 000000000000..3b7562e55503
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks__serialization.py
@@ -0,0 +1,50 @@
+from typing import Any
+
+import pytest
+from aiohttp.web import HTTPException, HTTPInternalServerError
+from servicelib.aiohttp.long_running_tasks._server import AiohttpHTTPExceptionSerializer
+from servicelib.long_running_tasks._serialization import (
+ dumps,
+ loads,
+ register_custom_serialization,
+)
+
+register_custom_serialization(HTTPException, AiohttpHTTPExceptionSerializer)
+
+
+class PositionalArguments:
+ def __init__(self, arg1, arg2, *args):
+ self.arg1 = arg1
+ self.arg2 = arg2
+ self.args = args
+
+
+class MixedArguments:
+ def __init__(self, arg1, arg2, kwarg1=None, kwarg2=None):
+ self.arg1 = arg1
+ self.arg2 = arg2
+ self.kwarg1 = kwarg1
+ self.kwarg2 = kwarg2
+
+
+@pytest.mark.parametrize(
+ "obj",
+ [
+ HTTPInternalServerError(reason="Uh-oh!", text="Failure!"),
+ PositionalArguments("arg1", "arg2", "arg3", "arg4"),
+ MixedArguments("arg1", "arg2", kwarg1="kwarg1", kwarg2="kwarg2"),
+ "a_string",
+ 1,
+ ],
+)
+def test_serialization(obj: Any):
+ str_data = dumps(obj)
+
+ try:
+ reconstructed_obj = loads(str_data)
+ except Exception as exc: # pylint:disable=broad-exception-caught
+ reconstructed_obj = exc
+
+ assert type(reconstructed_obj) is type(obj)
+ if hasattr(obj, "__dict__"):
+ assert reconstructed_obj.__dict__ == obj.__dict__
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_client_long_running_manager.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_client_long_running_manager.py
new file mode 100644
index 000000000000..27369fe08d64
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_client_long_running_manager.py
@@ -0,0 +1,82 @@
+# pylint:disable=redefined-outer-name
+
+from collections.abc import AsyncIterable, Callable
+from contextlib import AbstractAsyncContextManager
+from copy import deepcopy
+
+import pytest
+from pydantic import TypeAdapter
+from servicelib.long_running_tasks._redis_store import RedisStore
+from servicelib.long_running_tasks.long_running_client_helper import (
+ LongRunningClientHelper,
+)
+from servicelib.long_running_tasks.models import LRTNamespace, TaskData
+from servicelib.redis._client import RedisClientSDK
+from settings_library.redis import RedisDatabase, RedisSettings
+
+
+@pytest.fixture
+def task_data() -> TaskData:
+ return TypeAdapter(TaskData).validate_python(
+ TaskData.model_json_schema()["examples"][0]
+ )
+
+
+@pytest.fixture
+def lrt_namespace() -> LRTNamespace:
+ return "TEST-NAMESPACE"
+
+
+@pytest.fixture
+async def store(
+ use_in_memory_redis: RedisSettings,
+ get_redis_client_sdk: Callable[
+ [RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]
+ ],
+ lrt_namespace: LRTNamespace,
+) -> AsyncIterable[RedisStore]:
+ store = RedisStore(redis_settings=use_in_memory_redis, namespace=lrt_namespace)
+
+ await store.setup()
+ yield store
+ await store.shutdown()
+
+ # triggers cleanup of all redis data
+ async with get_redis_client_sdk(RedisDatabase.LONG_RUNNING_TASKS):
+ pass
+
+
+@pytest.fixture
+async def long_running_client_helper(
+ use_in_memory_redis: RedisSettings,
+) -> AsyncIterable[LongRunningClientHelper]:
+ helper = LongRunningClientHelper(redis_settings=use_in_memory_redis)
+
+ await helper.setup()
+ yield helper
+ await helper.shutdown()
+
+
+async def test_cleanup_namespace(
+ store: RedisStore,
+ task_data: TaskData,
+ long_running_client_helper: LongRunningClientHelper,
+ lrt_namespace: LRTNamespace,
+) -> None:
+ # create entries in both sides
+ await store.add_task_data(task_data.task_id, task_data)
+ await store.mark_for_removal(task_data.task_id)
+
+ # entries exit
+ marked_for_removal = deepcopy(task_data)
+ marked_for_removal.marked_for_removal = True
+ assert await store.list_tasks_data() == [marked_for_removal]
+
+ # removes
+ await long_running_client_helper.cleanup(lrt_namespace)
+
+ # entris were removed
+ assert await store.list_tasks_data() == []
+
+ # ensore it does not raise errors if there is nothing to remove
+ await long_running_client_helper.cleanup(lrt_namespace)
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_lrt_api.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_lrt_api.py
new file mode 100644
index 000000000000..88e464ee5b01
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_lrt_api.py
@@ -0,0 +1,316 @@
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+
+import asyncio
+import secrets
+from collections.abc import Awaitable, Callable
+from typing import Any, Final
+
+import pytest
+from models_library.api_schemas_long_running_tasks.base import TaskProgress
+from pydantic import NonNegativeInt
+from pytest_simcore.helpers.long_running_tasks import assert_task_is_no_longer_present
+from servicelib.long_running_tasks import lrt_api
+from servicelib.long_running_tasks.manager import LongRunningManager
+from servicelib.long_running_tasks.models import LRTNamespace, TaskContext
+from servicelib.long_running_tasks.task import TaskId, TaskRegistry
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+from tenacity import (
+ AsyncRetrying,
+ TryAgain,
+ retry_if_exception_type,
+ stop_after_delay,
+ wait_fixed,
+)
+
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
+
+_RETRY_PARAMS: dict[str, Any] = {
+ "reraise": True,
+ "wait": wait_fixed(0.1),
+ "stop": stop_after_delay(60),
+ "retry": retry_if_exception_type((AssertionError, TryAgain)),
+}
+
+
+async def _task_echo_input(progress: TaskProgress, to_return: Any) -> Any:
+ return to_return
+
+
+class _TestingError(Exception):
+ pass
+
+
+async def _task_always_raise(progress: TaskProgress) -> None:
+ msg = "This task always raises an error"
+ raise _TestingError(msg)
+
+
+async def _task_takes_too_long(progress: TaskProgress) -> None:
+ # Simulate a long-running task that is taking too much time
+ await asyncio.sleep(1e9)
+
+
+TaskRegistry.register(_task_echo_input)
+TaskRegistry.register(_task_always_raise, allowed_errors=(_TestingError,))
+TaskRegistry.register(_task_takes_too_long)
+
+
+@pytest.fixture
+def managers_count() -> NonNegativeInt:
+ return 5
+
+
+@pytest.fixture
+async def long_running_managers(
+ disable_stale_tasks_monitor: None,
+ managers_count: NonNegativeInt,
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
+ get_long_running_manager: Callable[
+ [RedisSettings, RabbitSettings, LRTNamespace | None],
+ Awaitable[LongRunningManager],
+ ],
+) -> list[LongRunningManager]:
+ maanagers: list[LongRunningManager] = []
+ for _ in range(managers_count):
+ long_running_manager = await get_long_running_manager(
+ use_in_memory_redis, rabbit_service, "some-service"
+ )
+ maanagers.append(long_running_manager)
+
+ return maanagers
+
+
+def _get_long_running_manager(
+ long_running_managers: list[LongRunningManager],
+) -> LongRunningManager:
+ return secrets.choice(long_running_managers)
+
+
+async def _assert_task_status(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ long_running_manager: LongRunningManager,
+ task_id: TaskId,
+ *,
+ is_done: bool
+) -> None:
+ result = await lrt_api.get_task_status(
+ rabbitmq_rpc_client, long_running_manager.lrt_namespace, TaskContext(), task_id
+ )
+ assert result.done is is_done
+
+
+async def _assert_task_status_on_random_manager(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ long_running_managers: list[LongRunningManager],
+ task_ids: list[TaskId],
+ *,
+ is_done: bool = True
+) -> None:
+ for task_id in task_ids:
+ result = await lrt_api.get_task_status(
+ rabbitmq_rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ TaskContext(),
+ task_id,
+ )
+ assert result.done is is_done
+
+
+async def _assert_task_status_done_on_all_managers(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ long_running_managers: list[LongRunningManager],
+ task_id: TaskId,
+ *,
+ is_done: bool = True
+) -> None:
+ async for attempt in AsyncRetrying(**_RETRY_PARAMS):
+ with attempt:
+ await _assert_task_status(
+ rabbitmq_rpc_client,
+ _get_long_running_manager(long_running_managers),
+ task_id,
+ is_done=is_done,
+ )
+
+ # check can do this form any task manager
+ for manager in long_running_managers:
+ await _assert_task_status(
+ rabbitmq_rpc_client, manager, task_id, is_done=is_done
+ )
+
+
+async def _assert_list_tasks_from_all_managers(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ long_running_managers: list[LongRunningManager],
+ task_context: TaskContext,
+ task_count: int,
+) -> None:
+ for manager in long_running_managers:
+ tasks = await lrt_api.list_tasks(
+ rabbitmq_rpc_client, manager.lrt_namespace, task_context
+ )
+ assert len(tasks) == task_count
+
+
+_TASK_CONTEXT: Final[list[TaskContext | None]] = [{"a": "context"}, None]
+_IS_UNIQUE: Final[list[bool]] = [False, True]
+_TASK_COUNT: Final[list[int]] = [5]
+
+
+@pytest.mark.parametrize("task_count", _TASK_COUNT)
+@pytest.mark.parametrize("task_context", _TASK_CONTEXT)
+@pytest.mark.parametrize("is_unique", _IS_UNIQUE)
+@pytest.mark.parametrize("to_return", [{"key": "value"}])
+async def test_workflow_with_result(
+ disable_stale_tasks_monitor: None,
+ fast_long_running_tasks_cancellation: None,
+ long_running_managers: list[LongRunningManager],
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ task_count: int,
+ is_unique: bool,
+ task_context: TaskContext | None,
+ to_return: Any,
+):
+ saved_context = task_context or {}
+ task_count = 1 if is_unique else task_count
+
+ task_ids: list[TaskId] = []
+ for _ in range(task_count):
+ task_id = await lrt_api.start_task(
+ _get_long_running_manager(long_running_managers).rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ _task_echo_input.__name__,
+ unique=is_unique,
+ task_name=None,
+ task_context=task_context,
+ fire_and_forget=False,
+ to_return=to_return,
+ )
+ task_ids.append(task_id)
+
+ for task_id in task_ids:
+ await _assert_task_status_done_on_all_managers(
+ rabbitmq_rpc_client, long_running_managers, task_id
+ )
+
+ await _assert_list_tasks_from_all_managers(
+ rabbitmq_rpc_client, long_running_managers, saved_context, task_count=task_count
+ )
+
+ # avoids tasks getting garbage collected
+ await _assert_task_status_on_random_manager(
+ rabbitmq_rpc_client, long_running_managers, task_ids, is_done=True
+ )
+
+ for task_id in task_ids:
+ result = await lrt_api.get_task_result(
+ rabbitmq_rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ saved_context,
+ task_id,
+ )
+ assert result == to_return
+
+ await assert_task_is_no_longer_present(
+ _get_long_running_manager(long_running_managers), task_id, saved_context
+ )
+
+
+@pytest.mark.parametrize("task_count", _TASK_COUNT)
+@pytest.mark.parametrize("task_context", _TASK_CONTEXT)
+@pytest.mark.parametrize("is_unique", _IS_UNIQUE)
+async def test_workflow_raises_error(
+ disable_stale_tasks_monitor: None,
+ fast_long_running_tasks_cancellation: None,
+ long_running_managers: list[LongRunningManager],
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ task_count: int,
+ is_unique: bool,
+ task_context: TaskContext | None,
+):
+ saved_context = task_context or {}
+ task_count = 1 if is_unique else task_count
+
+ task_ids: list[TaskId] = []
+ for _ in range(task_count):
+ task_id = await lrt_api.start_task(
+ _get_long_running_manager(long_running_managers).rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ _task_always_raise.__name__,
+ unique=is_unique,
+ task_name=None,
+ task_context=task_context,
+ fire_and_forget=False,
+ )
+ task_ids.append(task_id)
+
+ for task_id in task_ids:
+ await _assert_task_status_done_on_all_managers(
+ rabbitmq_rpc_client, long_running_managers, task_id
+ )
+
+ await _assert_list_tasks_from_all_managers(
+ rabbitmq_rpc_client, long_running_managers, saved_context, task_count=task_count
+ )
+
+ # avoids tasks getting garbage collected
+ await _assert_task_status_on_random_manager(
+ rabbitmq_rpc_client, long_running_managers, task_ids, is_done=True
+ )
+
+ for task_id in task_ids:
+ with pytest.raises(_TestingError, match="This task always raises an error"):
+ await lrt_api.get_task_result(
+ rabbitmq_rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ saved_context,
+ task_id,
+ )
+
+ await assert_task_is_no_longer_present(
+ _get_long_running_manager(long_running_managers), task_id, saved_context
+ )
+
+
+@pytest.mark.parametrize("task_context", _TASK_CONTEXT)
+@pytest.mark.parametrize("is_unique", _IS_UNIQUE)
+async def test_remove_task(
+ disable_stale_tasks_monitor: None,
+ fast_long_running_tasks_cancellation: None,
+ long_running_managers: list[LongRunningManager],
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ is_unique: bool,
+ task_context: TaskContext | None,
+):
+ task_id = await lrt_api.start_task(
+ _get_long_running_manager(long_running_managers).rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ _task_takes_too_long.__name__,
+ unique=is_unique,
+ task_name=None,
+ task_context=task_context,
+ fire_and_forget=False,
+ )
+ saved_context = task_context or {}
+
+ await _assert_task_status_done_on_all_managers(
+ rabbitmq_rpc_client, long_running_managers, task_id, is_done=False
+ )
+
+ await lrt_api.remove_task(
+ rabbitmq_rpc_client,
+ _get_long_running_manager(long_running_managers).lrt_namespace,
+ saved_context,
+ task_id,
+ )
+
+ await assert_task_is_no_longer_present(
+ _get_long_running_manager(long_running_managers), task_id, saved_context
+ )
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_models.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_models.py
index f21417da788e..a92765eb010c 100644
--- a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_models.py
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_models.py
@@ -1,4 +1,4 @@
-from servicelib.long_running_tasks._models import TaskProgress
+from servicelib.long_running_tasks.models import TaskProgress
def test_progress_has_no_more_than_3_digits():
diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py
index 6d3b9c837f26..0808878818a9 100644
--- a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py
+++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py
@@ -6,29 +6,48 @@
import asyncio
import urllib.parse
-from collections.abc import AsyncIterator
+from collections.abc import Awaitable, Callable
from datetime import datetime
-from typing import Any, Final
+from typing import Any
import pytest
from faker import Faker
-from servicelib.long_running_tasks._errors import (
+from models_library.api_schemas_long_running_tasks.base import ProgressMessage
+from servicelib.long_running_tasks import lrt_api
+from servicelib.long_running_tasks._serialization import (
+ loads,
+)
+from servicelib.long_running_tasks.errors import (
TaskAlreadyRunningError,
- TaskCancelledError,
TaskNotCompletedError,
TaskNotFoundError,
+ TaskNotRegisteredError,
+ TaskRaisedUnserializableError,
+)
+from servicelib.long_running_tasks.manager import (
+ LongRunningManager,
)
-from servicelib.long_running_tasks._models import (
- ProgressPercent,
+from servicelib.long_running_tasks.models import (
+ LRTNamespace,
+ ResultField,
+ TaskContext,
TaskProgress,
TaskStatus,
)
-from servicelib.long_running_tasks._task import TasksManager, start_task
-from tenacity import TryAgain
+from servicelib.long_running_tasks.task import TaskRegistry
+from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient
+from settings_library.rabbit import RabbitSettings
+from settings_library.redis import RedisSettings
+from tenacity import TryAgain, retry, stop_after_attempt
from tenacity.asyncio import AsyncRetrying
from tenacity.retry import retry_if_exception_type
from tenacity.stop import stop_after_delay
from tenacity.wait import wait_fixed
+from utils import TEST_CHECK_STALE_INTERVAL_S
+
+pytest_simcore_core_services_selection = [
+ "rabbit",
+]
_RETRY_PARAMS: dict[str, Any] = {
"reraise": True,
@@ -38,329 +57,602 @@
}
+class _TetingError(Exception):
+ pass
+
+
async def a_background_task(
- task_progress: TaskProgress,
+ progress: TaskProgress,
raise_when_finished: bool,
total_sleep: int,
) -> int:
"""sleeps and raises an error or returns 42"""
for i in range(total_sleep):
await asyncio.sleep(1)
- task_progress.update(percent=ProgressPercent((i + 1) / total_sleep))
+ await progress.update(percent=(i + 1) / total_sleep)
if raise_when_finished:
msg = "raised this error as instructed"
- raise RuntimeError(msg)
+ raise _TetingError(msg)
return 42
-async def fast_background_task(task_progress: TaskProgress) -> int:
- """this task does nothing and returns a constant"""
+async def fast_background_task(progress: TaskProgress) -> int:
return 42
-async def failing_background_task(task_progress: TaskProgress):
- """this task does nothing and returns a constant"""
+async def failing_background_task(progress: TaskProgress):
msg = "failing asap"
- raise RuntimeError(msg)
+ raise _TetingError(msg)
+
+
+async def failing_unpicklable_background_task(progress: TaskProgress):
+ @retry(
+ stop=stop_after_attempt(2),
+ reraise=False,
+ )
+ async def _innter_fail() -> None:
+ msg = "always fails with retry"
+ raise _TetingError(msg)
+ await _innter_fail()
-TEST_CHECK_STALE_INTERVAL_S: Final[float] = 1
+
+TaskRegistry.register(a_background_task)
+TaskRegistry.register(fast_background_task)
+TaskRegistry.register(failing_background_task)
+TaskRegistry.register(failing_unpicklable_background_task)
+
+
+@pytest.fixture
+def empty_context() -> TaskContext:
+ return {}
@pytest.fixture
-async def tasks_manager() -> AsyncIterator[TasksManager]:
- tasks_manager = TasksManager(
- stale_task_check_interval_s=TEST_CHECK_STALE_INTERVAL_S,
- stale_task_detect_timeout_s=TEST_CHECK_STALE_INTERVAL_S,
+async def long_running_manager(
+ use_in_memory_redis: RedisSettings,
+ rabbit_service: RabbitSettings,
+ get_long_running_manager: Callable[
+ [RedisSettings, RabbitSettings, LRTNamespace | None],
+ Awaitable[LongRunningManager],
+ ],
+) -> LongRunningManager:
+ return await get_long_running_manager(
+ use_in_memory_redis, rabbit_service, "rabbit-namespace"
)
- yield tasks_manager
- await tasks_manager.close()
@pytest.mark.parametrize("check_task_presence_before", [True, False])
async def test_task_is_auto_removed(
- tasks_manager: TasksManager, check_task_presence_before: bool
+ long_running_manager: LongRunningManager,
+ check_task_presence_before: bool,
+ empty_context: TaskContext,
):
- task_id = start_task(
- tasks_manager,
- a_background_task,
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10 * TEST_CHECK_STALE_INTERVAL_S,
+ task_context=empty_context,
)
if check_task_presence_before:
# immediately after starting the task is still there
- task_status = tasks_manager.get_task_status(task_id, with_task_context=None)
+ task_status = await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
assert task_status
# wait for task to be automatically removed
# meaning no calls via the manager methods are received
async for attempt in AsyncRetrying(**_RETRY_PARAMS):
with attempt:
- for tasks in tasks_manager._tasks_groups.values(): # noqa: SLF001
- if task_id in tasks:
- msg = "wait till no element is found any longer"
- raise TryAgain(msg)
+ if (
+ await long_running_manager.tasks_manager._tasks_data.get_task_data( # noqa: SLF001
+ task_id
+ )
+ is not None
+ ):
+ msg = "wait till no element is found any longer"
+ raise TryAgain(msg)
with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_status(task_id, with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_result(task_id, with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
-async def test_checked_task_is_not_auto_removed(tasks_manager: TasksManager):
- task_id = start_task(
- tasks_manager,
- a_background_task,
+@pytest.mark.parametrize("wait_multiplier", [1, 2, 3, 4, 5, 6])
+async def test_checked_task_is_not_auto_removed(
+ long_running_manager: LongRunningManager,
+ empty_context: TaskContext,
+ wait_multiplier: int,
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
- total_sleep=5 * TEST_CHECK_STALE_INTERVAL_S,
+ total_sleep=wait_multiplier * TEST_CHECK_STALE_INTERVAL_S,
+ task_context=empty_context,
)
async for attempt in AsyncRetrying(**_RETRY_PARAMS):
with attempt:
- status = tasks_manager.get_task_status(task_id, with_task_context=None)
+ status = await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
assert status.done, f"task {task_id} not complete"
- result = tasks_manager.get_task_result(task_id, with_task_context=None)
+ result = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
assert result
-async def test_fire_and_forget_task_is_not_auto_removed(tasks_manager: TasksManager):
- task_id = start_task(
- tasks_manager,
- a_background_task,
+def _get_resutlt(result_field: ResultField) -> Any:
+ assert result_field.str_result
+ return loads(result_field.str_result)
+
+
+async def test_fire_and_forget_task_is_not_auto_removed(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=5 * TEST_CHECK_STALE_INTERVAL_S,
fire_and_forget=True,
+ task_context=empty_context,
)
await asyncio.sleep(3 * TEST_CHECK_STALE_INTERVAL_S)
# the task shall still be present even if we did not check the status before
- status = tasks_manager.get_task_status(task_id, with_task_context=None)
+ status = await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
assert not status.done, "task was removed although it is fire and forget"
# the task shall finish
- await asyncio.sleep(3 * TEST_CHECK_STALE_INTERVAL_S)
+ await asyncio.sleep(4 * TEST_CHECK_STALE_INTERVAL_S)
# get the result
- task_result = tasks_manager.get_task_result(task_id, with_task_context=None)
- assert task_result == 42
+ task_result = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
+ assert _get_resutlt(task_result) == 42
-async def test_get_result_of_unfinished_task_raises(tasks_manager: TasksManager):
- task_id = start_task(
- tasks_manager,
- a_background_task,
+async def test_get_result_of_unfinished_task_raises(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=5 * TEST_CHECK_STALE_INTERVAL_S,
+ task_context=empty_context,
)
with pytest.raises(TaskNotCompletedError):
- tasks_manager.get_task_result(task_id, with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
-async def test_unique_task_already_running(tasks_manager: TasksManager):
- async def unique_task(task_progress: TaskProgress):
+async def test_unique_task_already_running(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ async def unique_task(progress: TaskProgress):
+ _ = progress
await asyncio.sleep(1)
- start_task(tasks_manager=tasks_manager, task=unique_task, unique=True)
+ TaskRegistry.register(unique_task)
+
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ unique_task.__name__,
+ unique=True,
+ task_context=empty_context,
+ )
# ensure unique running task regardless of how many times it gets started
with pytest.raises(TaskAlreadyRunningError) as exec_info:
- start_task(tasks_manager=tasks_manager, task=unique_task, unique=True)
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ unique_task.__name__,
+ unique=True,
+ task_context=empty_context,
+ )
assert "must be unique, found: " in f"{exec_info.value}"
+ TaskRegistry.unregister(unique_task)
-async def test_start_multiple_not_unique_tasks(tasks_manager: TasksManager):
- async def not_unique_task(task_progress: TaskProgress):
+
+async def test_start_multiple_not_unique_tasks(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ async def not_unique_task(progress: TaskProgress):
await asyncio.sleep(1)
+ TaskRegistry.register(not_unique_task)
+
for _ in range(5):
- start_task(tasks_manager=tasks_manager, task=not_unique_task)
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ not_unique_task.__name__,
+ task_context=empty_context,
+ )
+ TaskRegistry.unregister(not_unique_task)
-def test_get_task_id(faker):
- obj1 = TasksManager.create_task_id(faker.word()) # noqa: SLF001
- obj2 = TasksManager.create_task_id(faker.word()) # noqa: SLF001
+
+@pytest.mark.parametrize("is_unique", [True, False])
+async def test_get_task_id(
+ long_running_manager: LongRunningManager, faker: Faker, is_unique: bool
+):
+ obj1 = long_running_manager.tasks_manager._get_task_id( # noqa: SLF001
+ faker.word(), is_unique=is_unique
+ )
+ obj2 = long_running_manager.tasks_manager._get_task_id( # noqa: SLF001
+ faker.word(), is_unique=is_unique
+ )
assert obj1 != obj2
-async def test_get_status(tasks_manager: TasksManager):
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+async def test_get_status(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
+ task_context=empty_context,
+ )
+ task_status = await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
)
- task_status = tasks_manager.get_task_status(task_id, with_task_context=None)
assert isinstance(task_status, TaskStatus)
- assert task_status.task_progress.message == ""
+ assert isinstance(task_status.task_progress.message, ProgressMessage)
assert task_status.task_progress.percent == 0.0
- assert task_status.done == False
+ assert task_status.done is False
assert isinstance(task_status.started, datetime)
-async def test_get_status_missing(tasks_manager: TasksManager):
+async def test_get_status_missing(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
with pytest.raises(TaskNotFoundError) as exec_info:
- tasks_manager.get_task_status("missing_task_id", with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_status(
+ "missing_task_id", with_task_context=empty_context
+ )
assert f"{exec_info.value}" == "No task with missing_task_id found"
-async def test_get_result(tasks_manager: TasksManager):
- task_id = start_task(tasks_manager=tasks_manager, task=fast_background_task)
- await asyncio.sleep(0.1)
- result = tasks_manager.get_task_result(task_id, with_task_context=None)
- assert result == 42
+async def test_get_result(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ fast_background_task.__name__,
+ task_context=empty_context,
+ )
+
+ async for attempt in AsyncRetrying(**_RETRY_PARAMS):
+ with attempt:
+ status = await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
+ assert status.done is True
+
+ result = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
+ assert _get_resutlt(result) == 42
-async def test_get_result_missing(tasks_manager: TasksManager):
+async def test_get_result_missing(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
with pytest.raises(TaskNotFoundError) as exec_info:
- tasks_manager.get_task_result("missing_task_id", with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_result(
+ "missing_task_id", with_task_context=empty_context
+ )
assert f"{exec_info.value}" == "No task with missing_task_id found"
-async def test_get_result_finished_with_error(tasks_manager: TasksManager):
- task_id = start_task(tasks_manager=tasks_manager, task=failing_background_task)
+async def test_get_result_finished_with_error(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ failing_background_task.__name__,
+ task_context=empty_context,
+ )
# wait for result
async for attempt in AsyncRetrying(**_RETRY_PARAMS):
with attempt:
- assert tasks_manager.get_task_status(task_id, with_task_context=None).done
-
- with pytest.raises(RuntimeError, match="failing asap"):
- tasks_manager.get_task_result(task_id, with_task_context=None)
+ assert (
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
+ ).done
+
+ result = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
+ assert result.str_error is not None # nosec
+ with pytest.raises(_TetingError, match="failing asap"):
+ loads(result.str_error)
-async def test_get_result_task_was_cancelled_multiple_times(
- tasks_manager: TasksManager,
+async def test_get_result_finished_with_unpicklable_error(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ failing_unpicklable_background_task.__name__,
+ task_context=empty_context,
+ )
+ # wait for result
+ async for attempt in AsyncRetrying(**_RETRY_PARAMS):
+ with attempt:
+ assert (
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
+ ).done
+
+ result = await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
+ assert result.str_error is not None # nosec
+ with pytest.raises(TaskRaisedUnserializableError, match="cannot pickle"):
+ loads(result.str_error)
+
+
+async def test_cancel_task_from_different_manager(
+ rabbit_service: RabbitSettings,
+ use_in_memory_redis: RedisSettings,
+ get_long_running_manager: Callable[
+ [RedisSettings, RabbitSettings, LRTNamespace | None],
+ Awaitable[LongRunningManager],
+ ],
+ empty_context: TaskContext,
):
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+ manager_1 = await get_long_running_manager(
+ use_in_memory_redis, rabbit_service, "test-namespace"
+ )
+ manager_2 = await get_long_running_manager(
+ use_in_memory_redis, rabbit_service, "test-namespace"
+ )
+ manager_3 = await get_long_running_manager(
+ use_in_memory_redis, rabbit_service, "test-namespace"
+ )
+
+ task_id = await lrt_api.start_task(
+ manager_1.rpc_client,
+ manager_1.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
- total_sleep=10,
+ total_sleep=1,
+ task_context=empty_context,
)
- for _ in range(5):
- await tasks_manager.cancel_task(task_id, with_task_context=None)
- with pytest.raises(
- TaskCancelledError, match=f"Task {task_id} was cancelled before completing"
- ):
- tasks_manager.get_task_result(task_id, with_task_context=None)
+ # wati for task to complete
+ for manager in (manager_1, manager_2, manager_3):
+ status = await manager.tasks_manager.get_task_status(task_id, empty_context)
+ assert status.done is False
+
+ async for attempt in AsyncRetrying(**_RETRY_PARAMS):
+ with attempt:
+ for manager in (manager_1, manager_2, manager_3):
+ status = await manager.tasks_manager.get_task_status(
+ task_id, empty_context
+ )
+ assert status.done is True
+
+ # check all provide the same result
+ for manager in (manager_1, manager_2, manager_3):
+ task_result = await manager.tasks_manager.get_task_result(
+ task_id, empty_context
+ )
+ assert _get_resutlt(task_result) == 42
-async def test_remove_task(tasks_manager: TasksManager):
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+async def test_remove_task(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
+ task_context=empty_context,
+ )
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
+ await long_running_manager.tasks_manager.remove_task(
+ task_id, with_task_context=empty_context, wait_for_removal=True
)
- tasks_manager.get_task_status(task_id, with_task_context=None)
- await tasks_manager.remove_task(task_id, with_task_context=None)
with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_status(task_id, with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_result(task_id, with_task_context=None)
+ await long_running_manager.tasks_manager.get_task_result(
+ task_id, with_task_context=empty_context
+ )
-async def test_remove_task_with_task_context(tasks_manager: TasksManager):
- TASK_CONTEXT = {"some_context": "some_value"}
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+async def test_remove_task_with_task_context(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
- task_context=TASK_CONTEXT,
+ task_context=empty_context,
)
# getting status fails if wrong task context given
with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_status(
+ await long_running_manager.tasks_manager.get_task_status(
task_id, with_task_context={"wrong_task_context": 12}
)
- tasks_manager.get_task_status(task_id, with_task_context=TASK_CONTEXT)
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, with_task_context=empty_context
+ )
# removing task fails if wrong task context given
with pytest.raises(TaskNotFoundError):
- await tasks_manager.remove_task(
- task_id, with_task_context={"wrong_task_context": 12}
+ await long_running_manager.tasks_manager.remove_task(
+ task_id, with_task_context={"wrong_task_context": 12}, wait_for_removal=True
)
- await tasks_manager.remove_task(task_id, with_task_context=TASK_CONTEXT)
+ await long_running_manager.tasks_manager.remove_task(
+ task_id, with_task_context=empty_context, wait_for_removal=True
+ )
-async def test_remove_unknown_task(tasks_manager: TasksManager):
+async def test_remove_unknown_task(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
with pytest.raises(TaskNotFoundError):
- await tasks_manager.remove_task("invalid_id", with_task_context=None)
-
- await tasks_manager.remove_task(
- "invalid_id", with_task_context=None, reraise_errors=False
- )
+ await long_running_manager.tasks_manager.remove_task(
+ "invalid_id", with_task_context=empty_context, wait_for_removal=True
+ )
-async def test_cancel_task_with_task_context(tasks_manager: TasksManager):
- TASK_CONTEXT = {"some_context": "some_value"}
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+async def test__cancelled_tasks_worker_equivalent_of_cancellation_from_a_different_process(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
- task_context=TASK_CONTEXT,
+ task_context=empty_context,
+ )
+ await long_running_manager.tasks_manager._tasks_data.mark_for_removal( # noqa: SLF001
+ task_id
)
- # getting status fails if wrong task context given
- with pytest.raises(TaskNotFoundError):
- tasks_manager.get_task_status(
- task_id, with_task_context={"wrong_task_context": 12}
- )
- # getting status fails if wrong task context given
- with pytest.raises(TaskNotFoundError):
- await tasks_manager.cancel_task(
- task_id, with_task_context={"wrong_task_context": 12}
- )
- await tasks_manager.cancel_task(task_id, with_task_context=TASK_CONTEXT)
-
-async def test_list_tasks(tasks_manager: TasksManager):
- assert tasks_manager.list_tasks(with_task_context=None) == []
+ async for attempt in AsyncRetrying(**_RETRY_PARAMS):
+ with attempt: # noqa: SIM117
+ with pytest.raises(TaskNotFoundError):
+ assert (
+ await long_running_manager.tasks_manager.get_task_status(
+ task_id, empty_context
+ )
+ is None
+ )
+
+
+async def test_list_tasks(
+ disable_stale_tasks_monitor: None,
+ long_running_manager: LongRunningManager,
+ empty_context: TaskContext,
+):
+ assert (
+ await long_running_manager.tasks_manager.list_tasks(
+ with_task_context=empty_context
+ )
+ == []
+ )
# start a bunch of tasks
NUM_TASKS = 10
task_ids = []
for _ in range(NUM_TASKS):
task_ids.append( # noqa: PERF401
- start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
+ task_context=empty_context,
+ )
+ )
+ assert (
+ len(
+ await long_running_manager.tasks_manager.list_tasks(
+ with_task_context=empty_context
)
)
- assert len(tasks_manager.list_tasks(with_task_context=None)) == NUM_TASKS
+ == NUM_TASKS
+ )
for task_index, task_id in enumerate(task_ids):
- await tasks_manager.remove_task(task_id, with_task_context=None)
- assert len(tasks_manager.list_tasks(with_task_context=None)) == NUM_TASKS - (
- task_index + 1
+ await long_running_manager.tasks_manager.remove_task(
+ task_id, with_task_context=empty_context, wait_for_removal=True
)
+ assert len(
+ await long_running_manager.tasks_manager.list_tasks(
+ with_task_context=empty_context
+ )
+ ) == NUM_TASKS - (task_index + 1)
-async def test_list_tasks_filtering(tasks_manager: TasksManager):
- start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+async def test_list_tasks_filtering(
+ long_running_manager: LongRunningManager, empty_context: TaskContext
+):
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
+ task_context=empty_context,
)
- start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
task_context={"user_id": 213},
)
- start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
task_context={"user_id": 213, "product": "osparc"},
)
- assert len(tasks_manager.list_tasks(with_task_context=None)) == 3
- assert len(tasks_manager.list_tasks(with_task_context={"user_id": 213})) == 1
assert (
len(
- tasks_manager.list_tasks(
+ await long_running_manager.tasks_manager.list_tasks(
+ with_task_context=empty_context
+ )
+ )
+ == 3
+ )
+ assert (
+ len(
+ await long_running_manager.tasks_manager.list_tasks(
+ with_task_context={"user_id": 213}
+ )
+ )
+ == 1
+ )
+ assert (
+ len(
+ await long_running_manager.tasks_manager.list_tasks(
with_task_context={"user_id": 213, "product": "osparc"}
)
)
@@ -368,7 +660,7 @@ async def test_list_tasks_filtering(tasks_manager: TasksManager):
)
assert (
len(
- tasks_manager.list_tasks(
+ await long_running_manager.tasks_manager.list_tasks(
with_task_context={"user_id": 120, "product": "osparc"}
)
)
@@ -376,13 +668,26 @@ async def test_list_tasks_filtering(tasks_manager: TasksManager):
)
-async def test_define_task_name(tasks_manager: TasksManager, faker: Faker):
+async def test_define_task_name(long_running_manager: LongRunningManager, faker: Faker):
task_name = faker.name()
- task_id = start_task(
- tasks_manager=tasks_manager,
- task=a_background_task,
+ task_id = await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ a_background_task.__name__,
raise_when_finished=False,
total_sleep=10,
task_name=task_name,
)
- assert task_id.startswith(urllib.parse.quote(task_name, safe=""))
+ assert urllib.parse.quote(task_name, safe="") in task_id
+
+
+async def test_start_not_registered_task(
+ rabbitmq_rpc_client: RabbitMQRPCClient,
+ long_running_manager: LongRunningManager,
+):
+ with pytest.raises(TaskNotRegisteredError):
+ await lrt_api.start_task(
+ long_running_manager.rpc_client,
+ long_running_manager.lrt_namespace,
+ "not_registered_task",
+ )
diff --git a/packages/service-library/tests/long_running_tasks/utils.py b/packages/service-library/tests/long_running_tasks/utils.py
new file mode 100644
index 000000000000..e473dd7e1daf
--- /dev/null
+++ b/packages/service-library/tests/long_running_tasks/utils.py
@@ -0,0 +1,3 @@
+from typing import Final
+
+TEST_CHECK_STALE_INTERVAL_S: Final[float] = 1
diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq.py b/packages/service-library/tests/rabbitmq/test_rabbitmq.py
index d4c6c4b8ebb1..979d11d26777 100644
--- a/packages/service-library/tests/rabbitmq/test_rabbitmq.py
+++ b/packages/service-library/tests/rabbitmq/test_rabbitmq.py
@@ -314,7 +314,7 @@ async def _always_returning_fail(_: Any) -> bool:
@pytest.mark.parametrize("topics", _TOPICS)
-@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors()
+@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors
async def test_publish_with_no_registered_subscriber(
on_message_spy: mock.Mock,
create_rabbitmq_client: Callable[[str], RabbitMQClient],
@@ -476,7 +476,7 @@ def _raise_once_then_true(*args, **kwargs):
@pytest.fixture
async def ensure_queue_deletion(
- create_rabbitmq_client: Callable[[str], RabbitMQClient]
+ create_rabbitmq_client: Callable[[str], RabbitMQClient],
) -> AsyncIterator[Callable[[QueueName], None]]:
created_queues = set()
@@ -723,7 +723,7 @@ async def test_rabbit_adding_topics_to_a_fanout_exchange(
await _assert_message_received(mocked_message_parser, 0)
-@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors()
+@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors
async def test_rabbit_not_using_the_same_exchange_type_raises(
create_rabbitmq_client: Callable[[str], RabbitMQClient],
random_exchange_name: Callable[[], str],
@@ -738,7 +738,7 @@ async def test_rabbit_not_using_the_same_exchange_type_raises(
await client.subscribe(exchange_name, mocked_message_parser, topics=[])
-@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors()
+@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors
async def test_unsubscribe_consumer(
create_rabbitmq_client: Callable[[str], RabbitMQClient],
random_exchange_name: Callable[[], str],
diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py
index 72ecc9a8aa68..51874400b907 100644
--- a/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py
+++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py
@@ -2,21 +2,24 @@
import datetime
from collections.abc import AsyncIterator
from dataclasses import dataclass, field
+from typing import Final
import pytest
+from common_library.async_tools import cancel_wait_task
from faker import Faker
from models_library.api_schemas_rpc_async_jobs.async_jobs import (
AsyncJobGet,
AsyncJobId,
- AsyncJobNameData,
AsyncJobResult,
AsyncJobStatus,
)
from models_library.api_schemas_rpc_async_jobs.exceptions import JobMissingError
+from models_library.products import ProductName
from models_library.progress_bar import ProgressReport
from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace
+from models_library.users import UserID
from pydantic import TypeAdapter
-from servicelib.async_utils import cancel_wait_task
+from servicelib.celery.models import OwnerMetadata
from servicelib.rabbitmq import RabbitMQRPCClient, RemoteMethodNotRegisteredError
from servicelib.rabbitmq.rpc_interfaces.async_jobs.async_jobs import (
list_jobs,
@@ -28,6 +31,14 @@
"rabbit",
]
+_ASYNC_JOB_CLIENT_NAME: Final[str] = "pytest_client_name"
+
+
+class _TestOwnerMetadata(OwnerMetadata):
+ user_id: UserID
+ product_name: ProductName
+ owner: str = _ASYNC_JOB_CLIENT_NAME
+
@pytest.fixture
def method_name(faker: Faker) -> RPCMethodName:
@@ -35,10 +46,11 @@ def method_name(faker: Faker) -> RPCMethodName:
@pytest.fixture
-def job_id_data(faker: Faker) -> AsyncJobNameData:
- return AsyncJobNameData(
+def owner_metadata(faker: Faker) -> OwnerMetadata:
+ return _TestOwnerMetadata(
user_id=faker.pyint(min_value=1),
product_name=faker.word(),
+ owner=_ASYNC_JOB_CLIENT_NAME,
)
@@ -68,9 +80,9 @@ def _get_task(self, job_id: AsyncJobId) -> asyncio.Task:
raise JobMissingError(job_id=f"{job_id}")
async def status(
- self, job_id: AsyncJobId, job_id_data: AsyncJobNameData
+ self, job_id: AsyncJobId, owner_metadata: OwnerMetadata
) -> AsyncJobStatus:
- assert job_id_data
+ assert owner_metadata
task = self._get_task(job_id)
return AsyncJobStatus(
job_id=job_id,
@@ -79,32 +91,29 @@ async def status(
)
async def cancel(
- self, job_id: AsyncJobId, job_id_data: AsyncJobNameData
+ self, job_id: AsyncJobId, owner_metadata: OwnerMetadata
) -> None:
assert job_id
- assert job_id_data
+ assert owner_metadata
task = self._get_task(job_id)
task.cancel()
async def result(
- self, job_id: AsyncJobId, job_id_data: AsyncJobNameData
+ self, job_id: AsyncJobId, owner_metadata: OwnerMetadata
) -> AsyncJobResult:
- assert job_id_data
+ assert owner_metadata
task = self._get_task(job_id)
assert task.done()
return AsyncJobResult(
result={
"data": task.result(),
"job_id": job_id,
- "job_id_data": job_id_data,
+ "owner_metadata": owner_metadata,
}
)
- async def list_jobs(
- self, filter_: str, job_id_data: AsyncJobNameData
- ) -> list[AsyncJobGet]:
- assert job_id_data
- assert filter_ is not None
+ async def list_jobs(self, owner_metadata: OwnerMetadata) -> list[AsyncJobGet]:
+ assert owner_metadata
return [
AsyncJobGet(
@@ -114,8 +123,8 @@ async def list_jobs(
for t in self.tasks
]
- async def submit(self, job_id_data: AsyncJobNameData) -> AsyncJobGet:
- assert job_id_data
+ async def submit(self, owner_metadata: OwnerMetadata) -> AsyncJobGet:
+ assert owner_metadata
job_id = faker.uuid4(cast_to=None)
self.tasks.append(asyncio.create_task(_slow_task(), name=f"{job_id}"))
return AsyncJobGet(job_id=job_id, job_name="fake_job_name")
@@ -145,7 +154,7 @@ async def test_async_jobs_methods(
async_job_rpc_server: RabbitMQRPCClient,
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
job_id: AsyncJobId,
method: str,
):
@@ -157,7 +166,7 @@ async def test_async_jobs_methods(
rpc_client,
rpc_namespace=namespace,
job_id=job_id,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
@@ -166,13 +175,12 @@ async def test_list_jobs(
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
method_name: RPCMethodName,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
):
await list_jobs(
rpc_client,
rpc_namespace=namespace,
- filter_="",
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
@@ -181,13 +189,13 @@ async def test_submit(
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
method_name: RPCMethodName,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
):
await submit(
rpc_client,
rpc_namespace=namespace,
method_name=method_name,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
@@ -195,14 +203,14 @@ async def test_submit_with_invalid_method_name(
async_job_rpc_server: RabbitMQRPCClient,
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
):
with pytest.raises(RemoteMethodNotRegisteredError):
await submit(
rpc_client,
rpc_namespace=namespace,
method_name=RPCMethodName("invalid_method_name"),
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
)
@@ -211,14 +219,14 @@ async def test_submit_and_wait_properly_timesout(
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
method_name: RPCMethodName,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
):
with pytest.raises(TimeoutError): # noqa: PT012
async for _job_composed_result in submit_and_wait(
rpc_client,
rpc_namespace=namespace,
method_name=method_name,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
client_timeout=datetime.timedelta(seconds=0.1),
):
pass
@@ -229,13 +237,13 @@ async def test_submit_and_wait(
rpc_client: RabbitMQRPCClient,
namespace: RPCNamespace,
method_name: RPCMethodName,
- job_id_data: AsyncJobNameData,
+ owner_metadata: OwnerMetadata,
):
async for job_composed_result in submit_and_wait(
rpc_client,
rpc_namespace=namespace,
method_name=method_name,
- job_id_data=job_id_data,
+ owner_metadata=owner_metadata,
client_timeout=datetime.timedelta(seconds=10),
):
if not job_composed_result.done:
@@ -243,10 +251,11 @@ async def test_submit_and_wait(
await job_composed_result.result()
assert job_composed_result.done
assert job_composed_result.status.progress.actual_value == 1
- assert await job_composed_result.result() == AsyncJobResult(
+ result = await job_composed_result.result()
+ assert result == AsyncJobResult(
result={
"data": None,
"job_id": job_composed_result.status.job_id,
- "job_id_data": job_id_data,
+ "owner_metadata": owner_metadata,
}
)
diff --git a/packages/service-library/tests/redis/conftest.py b/packages/service-library/tests/redis/conftest.py
index ae6d04c20856..f29c76bdfb22 100644
--- a/packages/service-library/tests/redis/conftest.py
+++ b/packages/service-library/tests/redis/conftest.py
@@ -12,11 +12,11 @@
@pytest.fixture
async def redis_client_sdk(
- get_redis_client_sdk: Callable[
+ get_in_process_redis_client_sdk: Callable[
[RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]
],
) -> AsyncIterator[RedisClientSDK]:
- async with get_redis_client_sdk(RedisDatabase.RESOURCES) as client:
+ async with get_in_process_redis_client_sdk(RedisDatabase.RESOURCES) as client:
yield client
@@ -30,3 +30,18 @@ def with_short_default_redis_lock_ttl(mocker: MockerFixture) -> datetime.timedel
short_ttl = datetime.timedelta(seconds=0.25)
mocker.patch.object(redis_constants, "DEFAULT_LOCK_TTL", short_ttl)
return short_ttl
+
+
+@pytest.fixture
+def semaphore_name(faker: Faker) -> str:
+ return faker.pystr()
+
+
+@pytest.fixture
+def semaphore_capacity() -> int:
+ return 3
+
+
+@pytest.fixture
+def short_ttl() -> datetime.timedelta:
+ return datetime.timedelta(seconds=1)
diff --git a/packages/service-library/tests/redis/test_client.py b/packages/service-library/tests/redis/test_client.py
index 210c857bb9b4..580c47d0facb 100644
--- a/packages/service-library/tests/redis/test_client.py
+++ b/packages/service-library/tests/redis/test_client.py
@@ -104,13 +104,11 @@ async def test_redis_lock_with_ttl(
assert not await ttl_lock.locked()
-async def test_redis_client_sdk_setup_shutdown(
- mock_redis_socket_timeout: None, redis_service: RedisSettings
-):
+async def test_redis_client_sdk_setup_shutdown(redis_service: RedisSettings):
# setup
redis_resources_dns = redis_service.build_redis_dsn(RedisDatabase.RESOURCES)
client = RedisClientSDK(redis_resources_dns, client_name="pytest")
- assert client
+ await client.setup()
assert client.redis_dsn == redis_resources_dns
# ensure health check task sets the health to True
@@ -130,7 +128,6 @@ async def test_redis_client_sdk_setup_shutdown(
async def test_regression_fails_on_redis_service_outage(
- mock_redis_socket_timeout: None,
paused_container: Callable[[str], AbstractAsyncContextManager[None]],
redis_client_sdk: RedisClientSDK,
):
diff --git a/packages/service-library/tests/redis/test_clients_manager.py b/packages/service-library/tests/redis/test_clients_manager.py
index eeb110557e33..4bf5bc454f46 100644
--- a/packages/service-library/tests/redis/test_clients_manager.py
+++ b/packages/service-library/tests/redis/test_clients_manager.py
@@ -16,7 +16,6 @@
async def test_redis_client_sdks_manager(
- mock_redis_socket_timeout: None,
redis_service: RedisSettings,
):
all_redis_configs: set[RedisManagerDBConfig] = {
diff --git a/packages/service-library/tests/redis/test_decorators.py b/packages/service-library/tests/redis/test_decorators.py
index e4ca9d51463c..019b6595ac7c 100644
--- a/packages/service-library/tests/redis/test_decorators.py
+++ b/packages/service-library/tests/redis/test_decorators.py
@@ -267,7 +267,7 @@ async def race_condition_increase(self, by: int) -> None:
self.value = current_value
counter = RaceConditionCounter()
- # ensures it does nto time out before acquiring the lock
+ # ensures it does not time out before acquiring the lock
time_for_all_inc_counter_calls_to_finish = (
with_short_default_redis_lock_ttl * INCREASE_OPERATIONS * 10
)
diff --git a/packages/service-library/tests/redis/test_project_document_version.py b/packages/service-library/tests/redis/test_project_document_version.py
new file mode 100644
index 000000000000..47b9ca4da307
--- /dev/null
+++ b/packages/service-library/tests/redis/test_project_document_version.py
@@ -0,0 +1,50 @@
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+
+from typing import cast
+from uuid import UUID
+
+import pytest
+from faker import Faker
+from models_library.projects import ProjectID
+from servicelib.redis import RedisClientSDK
+from servicelib.redis._project_document_version import (
+ increment_and_return_project_document_version,
+)
+
+pytest_simcore_core_services_selection = [
+ "redis",
+]
+pytest_simcore_ops_services_selection = [
+ "redis-commander",
+]
+
+
+@pytest.fixture()
+def project_uuid(faker: Faker) -> ProjectID:
+ return cast(UUID, faker.uuid4(cast_to=None))
+
+
+async def test_project_document_version_workflow(
+ redis_client_sdk: RedisClientSDK, project_uuid: ProjectID
+):
+ """Test the complete workflow of getting and incrementing project document versions."""
+
+ # First increment should return 1
+ new_version = await increment_and_return_project_document_version(
+ redis_client_sdk, project_uuid
+ )
+ assert new_version == 1
+
+ # Second increment should return 2
+ new_version = await increment_and_return_project_document_version(
+ redis_client_sdk, project_uuid
+ )
+ assert new_version == 2
+
+ # Multiple increments should work correctly
+ for expected_version in range(3, 6):
+ new_version = await increment_and_return_project_document_version(
+ redis_client_sdk, project_uuid
+ )
+ assert new_version == expected_version
diff --git a/packages/service-library/tests/redis/test_project_lock.py b/packages/service-library/tests/redis/test_project_lock.py
index aa9d7fd1c740..03fe4f0e4627 100644
--- a/packages/service-library/tests/redis/test_project_lock.py
+++ b/packages/service-library/tests/redis/test_project_lock.py
@@ -10,11 +10,11 @@
from uuid import UUID
import pytest
+from common_library.async_tools import cancel_wait_task
from faker import Faker
from models_library.projects import ProjectID
from models_library.projects_access import Owner
from models_library.projects_state import ProjectLocked, ProjectStatus
-from servicelib.async_utils import cancel_wait_task
from servicelib.redis import (
ProjectLockError,
RedisClientSDK,
diff --git a/packages/service-library/tests/redis/test_semaphore.py b/packages/service-library/tests/redis/test_semaphore.py
new file mode 100644
index 000000000000..755ce716bfe2
--- /dev/null
+++ b/packages/service-library/tests/redis/test_semaphore.py
@@ -0,0 +1,617 @@
+# ruff: noqa: SLF001, EM101, TRY003, PT011, PLR0917
+# pylint: disable=no-value-for-parameter
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+import asyncio
+import datetime
+import logging
+
+import pytest
+from faker import Faker
+from pytest_mock import MockerFixture
+from servicelib.redis import RedisClientSDK
+from servicelib.redis._constants import (
+ DEFAULT_SEMAPHORE_TTL,
+ SEMAPHORE_KEY_PREFIX,
+)
+from servicelib.redis._errors import SemaphoreLostError
+from servicelib.redis._semaphore import (
+ DistributedSemaphore,
+ SemaphoreAcquisitionError,
+ SemaphoreNotAcquiredError,
+ distributed_semaphore,
+)
+from servicelib.redis._utils import handle_redis_returns_union_types
+
+pytest_simcore_core_services_selection = [
+ "redis",
+]
+pytest_simcore_ops_services_selection = [
+ "redis-commander",
+]
+
+
+@pytest.fixture
+def with_short_default_semaphore_ttl(
+ mocker: MockerFixture,
+) -> datetime.timedelta:
+ short_ttl = datetime.timedelta(seconds=5)
+ mocker.patch(
+ "servicelib.redis._semaphore.DEFAULT_SEMAPHORE_TTL",
+ short_ttl,
+ )
+ return short_ttl
+
+
+async def test_semaphore_initialization(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+):
+ semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk, key=semaphore_name, capacity=semaphore_capacity
+ )
+
+ assert semaphore.key == semaphore_name
+ assert semaphore.capacity == semaphore_capacity
+ assert semaphore.ttl == DEFAULT_SEMAPHORE_TTL
+ assert semaphore.blocking is True
+ assert semaphore.instance_id is not None
+ assert (
+ semaphore.semaphore_key
+ == f"{SEMAPHORE_KEY_PREFIX}{semaphore_name}_cap{semaphore_capacity}"
+ )
+ assert semaphore.tokens_key.startswith(f"{semaphore.semaphore_key}:")
+ assert semaphore.holders_set.startswith(f"{semaphore.semaphore_key}:")
+ assert semaphore.holder_key.startswith(f"{semaphore.semaphore_key}:")
+
+
+async def test_invalid_semaphore_initialization(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ with pytest.raises(ValueError, match="Input should be greater than 0"):
+ DistributedSemaphore(
+ redis_client=redis_client_sdk, key=semaphore_name, capacity=0
+ )
+
+ with pytest.raises(ValueError, match="Input should be greater than 0"):
+ DistributedSemaphore(
+ redis_client=redis_client_sdk, key=semaphore_name, capacity=-1
+ )
+
+ with pytest.raises(ValueError, match="TTL must be positive"):
+ DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ttl=datetime.timedelta(seconds=0),
+ )
+ with pytest.raises(ValueError, match="TTL must be positive"):
+ DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ttl=datetime.timedelta(seconds=0.5),
+ )
+ with pytest.raises(ValueError, match="Timeout must be positive"):
+ DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ttl=datetime.timedelta(seconds=10),
+ blocking=True,
+ blocking_timeout=datetime.timedelta(seconds=0),
+ )
+
+
+async def _assert_semaphore_redis_state(
+ redis_client_sdk: RedisClientSDK,
+ semaphore: DistributedSemaphore,
+ *,
+ expected_count: int,
+ expected_free_tokens: int,
+ expected_expired: bool = False,
+):
+ """Helper to assert the internal Redis state of the semaphore"""
+ holders = await handle_redis_returns_union_types(
+ redis_client_sdk.redis.smembers(semaphore.holders_set)
+ )
+ assert len(holders) == expected_count
+ if expected_count > 0:
+ assert semaphore.instance_id in holders
+ holder_key_exists = await redis_client_sdk.redis.exists(semaphore.holder_key)
+ if expected_expired:
+ assert holder_key_exists == 0
+ else:
+ assert holder_key_exists == 1
+ tokens = await handle_redis_returns_union_types(
+ redis_client_sdk.redis.lrange(semaphore.tokens_key, 0, -1)
+ )
+ assert len(tokens) == expected_free_tokens
+
+
+async def test_semaphore_acquire_release_basic(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ with_short_default_semaphore_ttl: datetime.timedelta,
+):
+ semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=with_short_default_semaphore_ttl,
+ )
+
+ # Initially not acquired
+ assert await semaphore.current_count() == 0
+ assert await semaphore.available_tokens() == semaphore_capacity
+ assert await semaphore.is_acquired() is False
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+ # Acquire
+ result = await semaphore.acquire()
+ assert result is True
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == semaphore_capacity - 1
+ assert await semaphore.is_acquired() is True
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ )
+
+ # Acquire again on same instance should return True immediately and keep the same count (reentrant)
+ result = await semaphore.acquire()
+ assert result is True
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == semaphore_capacity - 1
+ assert await semaphore.is_acquired() is True
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ )
+
+ # reacquire should just work
+ await semaphore.reacquire()
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == semaphore_capacity - 1
+ assert await semaphore.is_acquired() is True
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ )
+
+ # Release
+ await semaphore.release()
+ assert await semaphore.current_count() == 0
+ assert await semaphore.available_tokens() == semaphore_capacity
+ assert await semaphore.is_acquired() is False
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+ # reacquire after release should fail
+ with pytest.raises(SemaphoreNotAcquiredError):
+ await semaphore.reacquire()
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+ # so does release again
+ with pytest.raises(SemaphoreNotAcquiredError):
+ await semaphore.release()
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+
+async def test_semaphore_acquire_release_with_ttl_expiry(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ with_short_default_semaphore_ttl: datetime.timedelta,
+):
+ semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=with_short_default_semaphore_ttl,
+ )
+ await semaphore.acquire()
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == semaphore_capacity - 1
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ )
+
+ # wait for TTL to expire
+ await asyncio.sleep(with_short_default_semaphore_ttl.total_seconds() + 0.1)
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ expected_expired=True,
+ )
+
+ # TTL expired, reacquire should fail
+ with pytest.raises(SemaphoreLostError):
+ await semaphore.reacquire()
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=semaphore_capacity - 1,
+ expected_expired=True,
+ )
+ # and release should also fail
+ with pytest.raises(SemaphoreLostError):
+ await semaphore.release()
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+ # and release again should also fail with different error
+ with pytest.raises(SemaphoreNotAcquiredError):
+ await semaphore.release()
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=0,
+ expected_free_tokens=semaphore_capacity,
+ )
+
+
+async def test_semaphore_multiple_instances_capacity_limit(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ capacity = 2
+ semaphores = [
+ DistributedSemaphore(
+ redis_client=redis_client_sdk, key=semaphore_name, capacity=capacity
+ )
+ for _ in range(4)
+ ]
+
+ # Acquire first two should succeed
+ assert await semaphores[0].acquire() is True
+ assert await semaphores[0].is_acquired() is True
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphores[0],
+ expected_count=1,
+ expected_free_tokens=capacity - 1,
+ )
+ assert await semaphores[1].is_acquired() is False
+ for sem in semaphores[:4]:
+ assert await sem.current_count() == 1
+ assert await sem.available_tokens() == capacity - 1
+
+ # acquire second
+ assert await semaphores[1].acquire() is True
+ for sem in semaphores[:2]:
+ assert await sem.is_acquired() is True
+ assert await sem.current_count() == 2
+ assert await sem.available_tokens() == capacity - 2
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ sem,
+ expected_count=2,
+ expected_free_tokens=capacity - 2,
+ )
+
+ # Third and fourth should fail in non-blocking mode
+ for sem in semaphores[2:]:
+ sem.blocking = False
+ assert await sem.acquire() is False
+ assert await sem.is_acquired() is False
+ assert await sem.current_count() == 2
+ assert await sem.available_tokens() == capacity - 2
+
+ # Release one
+ await semaphores[0].release()
+ assert await semaphores[0].is_acquired() is False
+ for sem in semaphores[:4]:
+ assert await sem.current_count() == 1
+ assert await sem.available_tokens() == capacity - 1
+
+ # Now third can acquire
+ assert await semaphores[2].acquire() is True
+ for sem in semaphores[:4]:
+ assert await sem.current_count() == 2
+ assert await sem.available_tokens() == capacity - 2
+
+ # Clean up
+ await semaphores[1].release()
+ await semaphores[2].release()
+
+
+async def test_semaphore_with_timeout(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ timeout = datetime.timedelta(seconds=1)
+ semaphore1 = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking_timeout=timeout,
+ )
+ assert await semaphore1.acquire() is True
+ assert await semaphore1.is_acquired() is True
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore1,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+ semaphore2 = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking_timeout=timeout,
+ )
+ # Second should timeout
+ with pytest.raises(SemaphoreAcquisitionError):
+ await semaphore2.acquire()
+ assert await semaphore2.is_acquired() is False
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore1,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+
+
+async def test_semaphore_context_manager(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ) as semaphore1:
+ assert await semaphore1.is_acquired() is True
+ assert await semaphore1.current_count() == 1
+ assert await semaphore1.available_tokens() == 0
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore1,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+ assert await semaphore1.is_acquired() is False
+ assert await semaphore1.current_count() == 0
+ assert await semaphore1.available_tokens() == 1
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore1,
+ expected_count=0,
+ expected_free_tokens=1,
+ )
+
+
+async def test_semaphore_context_manager_with_timeout(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ capacity = 1
+ timeout = datetime.timedelta(seconds=0.1)
+
+ # First semaphore acquires
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=capacity,
+ ) as semaphore1:
+ assert await semaphore1.is_acquired() is True
+ assert await semaphore1.current_count() == 1
+ assert await semaphore1.available_tokens() == 0
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore1,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+ # Second semaphore should raise on timeout
+ with pytest.raises(SemaphoreAcquisitionError):
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=capacity,
+ blocking=True,
+ blocking_timeout=timeout,
+ ):
+ ...
+
+ # non-blocking should also raise when used with context manager
+ with pytest.raises(SemaphoreAcquisitionError):
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=capacity,
+ blocking=False,
+ ):
+ ...
+ # using the semaphore directly should in non-blocking mode should return False
+ semaphore2 = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=capacity,
+ blocking=False,
+ )
+ assert await semaphore2.acquire() is False
+
+ # now try infinite timeout
+ semaphore3 = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=capacity,
+ blocking_timeout=None, # wait forever
+ )
+ acquire_task = asyncio.create_task(semaphore3.acquire())
+ await asyncio.sleep(5) # give some time to start acquiring
+ assert not acquire_task.done()
+
+
+@pytest.mark.parametrize(
+ "exception",
+ [RuntimeError, asyncio.CancelledError],
+ ids=str,
+)
+async def test_semaphore_context_manager_with_exception(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ exception: type[Exception | asyncio.CancelledError],
+):
+ async def _raising_context():
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ):
+ raise exception("Test")
+
+ with pytest.raises(exception, match="Test"):
+ await _raising_context()
+
+
+async def test_semaphore_context_manager_lost_renewal(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ with_short_default_semaphore_ttl: datetime.timedelta,
+):
+ with pytest.raises(SemaphoreLostError): # noqa: PT012
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ttl=with_short_default_semaphore_ttl,
+ ) as semaphore:
+ assert await semaphore.is_acquired() is True
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == 0
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+
+ # now simulate lost renewal by deleting the holder key
+ await redis_client_sdk.redis.delete(semaphore.holder_key)
+ # wait a bit to let the auto-renewal task detect the lost lock
+ # the sleep will be interrupted by the exception and the context manager will exit
+ with pytest.raises(asyncio.CancelledError):
+ await asyncio.sleep(
+ with_short_default_semaphore_ttl.total_seconds() + 0.5
+ )
+ raise asyncio.CancelledError
+
+
+async def test_semaphore_context_manager_auto_renewal(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ with_short_default_semaphore_ttl: datetime.timedelta,
+):
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ ttl=with_short_default_semaphore_ttl,
+ ) as semaphore:
+ assert await semaphore.is_acquired() is True
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == 0
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+
+ # wait for a few TTLs to ensure auto-renewal is working
+ total_wait = with_short_default_semaphore_ttl.total_seconds() * 3
+ await asyncio.sleep(total_wait)
+
+ # should still be acquired
+ assert await semaphore.is_acquired() is True
+ assert await semaphore.current_count() == 1
+ assert await semaphore.available_tokens() == 0
+ await _assert_semaphore_redis_state(
+ redis_client_sdk,
+ semaphore,
+ expected_count=1,
+ expected_free_tokens=0,
+ )
+
+
+async def test_semaphore_context_manager_logs_warning_when_hold_too_long(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ caplog: pytest.LogCaptureFixture,
+):
+ """Test that a warning is logged when holding the semaphore for too long"""
+ with caplog.at_level(logging.WARNING):
+ async with distributed_semaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ expected_lock_overall_time=datetime.timedelta(milliseconds=200),
+ ):
+ await asyncio.sleep(0.3)
+ assert caplog.records
+ assert "longer than expected" in caplog.messages[-1]
+
+
+async def test_multiple_semaphores_different_keys(
+ redis_client_sdk: RedisClientSDK,
+ faker: Faker,
+):
+ """Test that semaphores with different keys don't interfere"""
+ key1 = faker.pystr()
+ key2 = faker.pystr()
+ capacity = 1
+
+ async with (
+ distributed_semaphore(
+ redis_client=redis_client_sdk, key=key1, capacity=capacity
+ ),
+ distributed_semaphore(
+ redis_client=redis_client_sdk, key=key2, capacity=capacity
+ ),
+ ):
+ ...
diff --git a/packages/service-library/tests/redis/test_semaphore_decorator.py b/packages/service-library/tests/redis/test_semaphore_decorator.py
new file mode 100644
index 000000000000..fa004da96fe3
--- /dev/null
+++ b/packages/service-library/tests/redis/test_semaphore_decorator.py
@@ -0,0 +1,720 @@
+# ruff: noqa: SLF001, EM101, TRY003, PT011, PLR0917
+# pylint: disable=no-value-for-parameter
+# pylint: disable=protected-access
+# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
+# pylint: disable=unused-variable
+
+import asyncio
+import datetime
+import logging
+from contextlib import asynccontextmanager
+from typing import Literal
+
+import pytest
+from pytest_mock import MockerFixture
+from pytest_simcore.helpers.logging_tools import log_context
+from servicelib.redis import RedisClientSDK
+from servicelib.redis._constants import SEMAPHORE_KEY_PREFIX
+from servicelib.redis._errors import SemaphoreLostError
+from servicelib.redis._semaphore import (
+ DistributedSemaphore,
+ SemaphoreAcquisitionError,
+)
+from servicelib.redis._semaphore_decorator import (
+ with_limited_concurrency,
+ with_limited_concurrency_cm,
+)
+
+pytest_simcore_core_services_selection = [
+ "redis",
+]
+pytest_simcore_ops_services_selection = [
+ "redis-commander",
+]
+
+
+async def test_basic_functionality(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ call_count = 0
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ async def limited_function():
+ nonlocal call_count
+ call_count += 1
+ await asyncio.sleep(0.1)
+ return call_count
+
+ # Multiple concurrent calls
+ tasks = [asyncio.create_task(limited_function()) for _ in range(3)]
+ results = await asyncio.gather(*tasks)
+
+ # All should complete successfully
+ assert len(results) == 3
+ assert all(isinstance(r, int) for r in results)
+
+
+async def test_auto_renewal(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ short_ttl: datetime.timedelta,
+):
+ work_started = asyncio.Event()
+ work_completed = asyncio.Event()
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ async def long_running_work() -> Literal["success"]:
+ work_started.set()
+ # Wait longer than TTL to ensure renewal works
+ await asyncio.sleep(short_ttl.total_seconds() * 2)
+ work_completed.set()
+ return "success"
+
+ task = asyncio.create_task(long_running_work())
+ await work_started.wait()
+
+ # Check that semaphore is being held
+ temp_semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ assert await temp_semaphore.current_count() == 1
+ assert await temp_semaphore.available_tokens() == semaphore_capacity - 1
+
+ # Wait for work to complete
+ result = await task
+ assert result == "success"
+ assert work_completed.is_set()
+
+ # After completion, semaphore should be released
+ assert await temp_semaphore.current_count() == 0
+ assert await temp_semaphore.available_tokens() == semaphore_capacity
+
+
+async def test_auto_renewal_lose_semaphore_raises(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ short_ttl: datetime.timedelta,
+):
+ work_started = asyncio.Event()
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ async def coro_that_should_fail() -> Literal["should not reach here"]:
+ work_started.set()
+ # Wait long enough for renewal to be attempted multiple times
+ await asyncio.sleep(short_ttl.total_seconds() * 100)
+ return "should not reach here"
+
+ task = asyncio.create_task(coro_that_should_fail())
+ await work_started.wait()
+
+ # Wait for the first renewal interval to pass
+ renewal_interval = short_ttl / 3
+ await asyncio.sleep(renewal_interval.total_seconds() * 1.5)
+
+ # Find and delete all holder keys for this semaphore
+ holder_keys = await redis_client_sdk.redis.keys(
+ f"{SEMAPHORE_KEY_PREFIX}{semaphore_name}_cap{semaphore_capacity}:holders:*"
+ )
+ assert holder_keys, "Holder keys should exist before deletion"
+ await redis_client_sdk.redis.delete(*holder_keys)
+
+ # wait another renewal interval to ensure the renewal fails
+ await asyncio.sleep(renewal_interval.total_seconds() * 1.5)
+
+ # it shall have raised already, do not wait too much
+ async with asyncio.timeout(renewal_interval.total_seconds()):
+ with pytest.raises(SemaphoreLostError):
+ await task
+
+
+async def test_decorator_with_callable_parameters(
+ redis_client_sdk: RedisClientSDK,
+):
+ executed_keys = []
+
+ def get_redis_client(*args, **kwargs) -> RedisClientSDK:
+ return redis_client_sdk
+
+ def get_key(user_id: str, resource: str) -> str:
+ return f"{user_id}-{resource}"
+
+ def get_capacity(user_id: str, resource: str) -> int:
+ return 2
+
+ @with_limited_concurrency(
+ get_redis_client,
+ key=get_key,
+ capacity=get_capacity,
+ )
+ async def process_user_resource(user_id: str, resource: str):
+ executed_keys.append(f"{user_id}-{resource}")
+ await asyncio.sleep(0.05)
+
+ # Test with different parameters
+ await asyncio.gather(
+ process_user_resource("user1", "wallet1"),
+ process_user_resource("user1", "wallet2"),
+ process_user_resource("user2", "wallet1"),
+ )
+
+ assert len(executed_keys) == 3
+ assert "user1-wallet1" in executed_keys
+ assert "user1-wallet2" in executed_keys
+ assert "user2-wallet1" in executed_keys
+
+
+async def test_decorator_capacity_enforcement(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ concurrent_count = 0
+ max_concurrent = 0
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=2,
+ )
+ async def limited_function() -> None:
+ nonlocal concurrent_count, max_concurrent
+ concurrent_count += 1
+ max_concurrent = max(max_concurrent, concurrent_count)
+ await asyncio.sleep(0.1)
+ concurrent_count -= 1
+
+ # Start 5 concurrent tasks
+ tasks = [asyncio.create_task(limited_function()) for _ in range(5)]
+ await asyncio.gather(*tasks)
+
+ # Should never exceed capacity of 2
+ assert max_concurrent <= 2
+
+
+async def test_exception_handling(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ async def failing_function():
+ raise RuntimeError("Test exception")
+
+ with pytest.raises(RuntimeError, match="Test exception"):
+ await failing_function()
+
+ # Semaphore should be released even after exception
+ # Test by trying to acquire again
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ async def success_function():
+ return "success"
+
+ result = await success_function()
+ assert result == "success"
+
+
+async def test_non_blocking_behavior(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ # Test the blocking timeout behavior
+ started_event = asyncio.Event()
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking=True,
+ blocking_timeout=datetime.timedelta(seconds=0.1),
+ )
+ async def limited_function() -> None:
+ started_event.set()
+ await asyncio.sleep(2)
+
+ # Start first task that will hold the semaphore
+ task1 = asyncio.create_task(limited_function())
+ await started_event.wait() # Wait until semaphore is actually acquired
+
+ # Second task should timeout and raise an exception
+ with pytest.raises(SemaphoreAcquisitionError):
+ await limited_function()
+
+ await task1
+
+ # now doing the same with non-blocking should raise
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking=False,
+ blocking_timeout=None,
+ )
+ async def limited_function_non_blocking() -> None:
+ await asyncio.sleep(2)
+
+ tasks = [asyncio.create_task(limited_function_non_blocking()) for _ in range(3)]
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+ assert len(results) == 3
+ assert any(isinstance(r, SemaphoreAcquisitionError) for r in results)
+
+
+async def test_user_exceptions_properly_reraised(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ short_ttl: datetime.timedelta,
+ mocker: MockerFixture,
+):
+ class UserFunctionError(Exception):
+ """Custom exception to ensure we're catching the right exception"""
+
+ work_started = asyncio.Event()
+
+ # Track that auto-renewal is actually happening
+ from servicelib.redis._semaphore import DistributedSemaphore
+
+ spied_renew_fct = mocker.spy(DistributedSemaphore, "reacquire")
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl, # Short TTL to ensure renewal happens
+ )
+ async def failing_function():
+ work_started.set()
+ # Wait long enough for at least one renewal to happen
+ await asyncio.sleep(short_ttl.total_seconds() * 0.8)
+ # Then raise our custom exception
+ raise UserFunctionError("User function failed intentionally")
+
+ # Verify the exception is properly re-raised
+ with pytest.raises(UserFunctionError, match="User function failed intentionally"):
+ await failing_function()
+
+ # Ensure work actually started
+ assert work_started.is_set()
+
+ # Verify auto-renewal was working (at least one renewal should have happened)
+ assert (
+ spied_renew_fct.call_count >= 1
+ ), "Auto-renewal should have been called at least once"
+
+ # Verify semaphore was properly released by trying to acquire it again
+ test_semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ assert (
+ await test_semaphore.current_count() == 0
+ ), "Semaphore should be released after exception"
+
+
+async def test_cancelled_error_preserved(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+):
+ """Test that CancelledError is properly preserved through the decorator"""
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ )
+ async def function_raising_cancelled_error():
+ raise asyncio.CancelledError
+
+ # Verify CancelledError is preserved
+ with pytest.raises(asyncio.CancelledError):
+ await function_raising_cancelled_error()
+
+
+@pytest.mark.heavy_load
+async def test_with_large_capacity(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ large_capacity = 100
+ concurrent_count = 0
+ max_concurrent = 0
+ sleep_time_s = 10
+ num_tasks = 500
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=large_capacity,
+ blocking=True,
+ blocking_timeout=None,
+ )
+ async def limited_function(task_id: int) -> None:
+ nonlocal concurrent_count, max_concurrent
+ concurrent_count += 1
+ max_concurrent = max(max_concurrent, concurrent_count)
+ with log_context(logging.INFO, f"{task_id=}") as ctx:
+ ctx.logger.info("started %s with %s", task_id, concurrent_count)
+ await asyncio.sleep(sleep_time_s)
+ ctx.logger.info("done %s with %s", task_id, concurrent_count)
+ concurrent_count -= 1
+
+ # Start tasks equal to the large capacity
+ tasks = [asyncio.create_task(limited_function(i)) for i in range(num_tasks)]
+ done, pending = await asyncio.wait(
+ tasks,
+ timeout=float(num_tasks) / float(large_capacity) * 10.0 * float(sleep_time_s),
+ )
+ assert not pending, f"Some tasks did not complete: {len(pending)} pending"
+ assert len(done) == num_tasks
+
+ # Should never exceed the large capacity
+ assert max_concurrent <= large_capacity
+
+
+async def test_long_locking_logs_warning(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ caplog: pytest.LogCaptureFixture,
+):
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking=True,
+ blocking_timeout=None,
+ expected_lock_overall_time=datetime.timedelta(milliseconds=200),
+ )
+ async def limited_function() -> None:
+ with log_context(logging.INFO, "task"):
+ await asyncio.sleep(0.4)
+
+ with caplog.at_level(logging.WARNING):
+ await limited_function()
+ assert caplog.records
+ assert "longer than expected" in caplog.messages[-1]
+
+
+async def test_semaphore_fair_queuing(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ entered_order: list[int] = []
+
+ @with_limited_concurrency(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ async def limited_function(call_id: int):
+ entered_order.append(call_id)
+ await asyncio.sleep(0.2)
+ return call_id
+
+ # Launch tasks in a specific order
+ num_tasks = 10
+ tasks = []
+ for i in range(num_tasks):
+ tasks.append(asyncio.create_task(limited_function(i)))
+ await asyncio.sleep(0.1) # Small delay to help preserve order
+ results = await asyncio.gather(*tasks)
+
+ # All should complete successfully and in order
+ assert results == list(range(num_tasks))
+ # The order in which they entered the critical section should match the order of submission
+ assert entered_order == list(
+ range(num_tasks)
+ ), f"Expected fair queuing, got {entered_order}"
+
+
+async def test_context_manager_basic_functionality(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ concurrent_count = 0
+ max_concurrent = 0
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=2,
+ blocking_timeout=None,
+ )
+ @asynccontextmanager
+ async def limited_context_manager():
+ nonlocal concurrent_count, max_concurrent
+ concurrent_count += 1
+ max_concurrent = max(max_concurrent, concurrent_count)
+ try:
+ yield
+ await asyncio.sleep(0.1)
+ finally:
+ concurrent_count -= 1
+
+ async def use_context_manager() -> int:
+ async with limited_context_manager():
+ await asyncio.sleep(0.1)
+ return 1
+
+ # Start concurrent context managers
+ tasks = [asyncio.create_task(use_context_manager()) for _ in range(20)]
+ results = await asyncio.gather(*tasks)
+ # All should complete successfully
+ assert len(results) == 20
+ assert all(isinstance(r, int) for r in results)
+
+ # Should never exceed capacity of 2
+ assert max_concurrent <= 2
+
+
+async def test_context_manager_exception_handling(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ @asynccontextmanager
+ async def failing_context_manager():
+ yield
+ raise RuntimeError("Test exception")
+
+ with pytest.raises(RuntimeError, match="Test exception"):
+ async with failing_context_manager():
+ pass
+
+ # Semaphore should be released even after exception
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ )
+ @asynccontextmanager
+ async def success_context_manager():
+ yield "success"
+
+ async with success_context_manager() as result:
+ assert result == "success"
+
+
+async def test_context_manager_auto_renewal(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ short_ttl: datetime.timedelta,
+):
+ work_started = asyncio.Event()
+ work_completed = asyncio.Event()
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ @asynccontextmanager
+ async def long_running_context_manager():
+ work_started.set()
+ yield "data"
+ # Wait longer than TTL to ensure renewal works
+ await asyncio.sleep(short_ttl.total_seconds() * 2)
+ work_completed.set()
+
+ async def use_long_running_cm():
+ async with long_running_context_manager() as data:
+ assert data == "data"
+ # Keep context manager active for longer than TTL
+ await asyncio.sleep(short_ttl.total_seconds() * 1.5)
+
+ task = asyncio.create_task(use_long_running_cm())
+ await work_started.wait()
+
+ # Check that semaphore is being held
+ temp_semaphore = DistributedSemaphore(
+ redis_client=redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ assert await temp_semaphore.current_count() == 1
+ assert await temp_semaphore.available_tokens() == semaphore_capacity - 1
+
+ # Wait for work to complete
+ await task
+ assert work_completed.is_set()
+
+ # After completion, semaphore should be released
+ assert await temp_semaphore.current_count() == 0
+ assert await temp_semaphore.available_tokens() == semaphore_capacity
+
+
+async def test_context_manager_with_callable_parameters(
+ redis_client_sdk: RedisClientSDK,
+):
+ executed_keys = []
+
+ def get_redis_client(*args, **kwargs):
+ return redis_client_sdk
+
+ def get_key(user_id: str, resource: str) -> str:
+ return f"{user_id}-{resource}"
+
+ def get_capacity(user_id: str, resource: str) -> int:
+ return 2
+
+ @with_limited_concurrency_cm(
+ get_redis_client,
+ key=get_key,
+ capacity=get_capacity,
+ )
+ @asynccontextmanager
+ async def process_user_resource_cm(user_id: str, resource: str):
+ executed_keys.append(f"{user_id}-{resource}")
+ yield f"processed-{user_id}-{resource}"
+ await asyncio.sleep(0.05)
+
+ async def use_cm(user_id: str, resource: str):
+ async with process_user_resource_cm(user_id, resource) as result:
+ return result
+
+ # Test with different parameters
+ results = await asyncio.gather(
+ use_cm("user1", "wallet1"),
+ use_cm("user1", "wallet2"),
+ use_cm("user2", "wallet1"),
+ )
+
+ assert len(executed_keys) == 3
+ assert "user1-wallet1" in executed_keys
+ assert "user1-wallet2" in executed_keys
+ assert "user2-wallet1" in executed_keys
+
+ assert len(results) == 3
+ assert "processed-user1-wallet1" in results
+ assert "processed-user1-wallet2" in results
+ assert "processed-user2-wallet1" in results
+
+
+async def test_context_manager_non_blocking_behavior(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+):
+ started_event = asyncio.Event()
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking=True,
+ blocking_timeout=datetime.timedelta(seconds=0.1),
+ )
+ @asynccontextmanager
+ async def limited_context_manager():
+ started_event.set()
+ yield
+ await asyncio.sleep(2)
+
+ # Start first context manager that will hold the semaphore
+ async def long_running_cm():
+ async with limited_context_manager():
+ await asyncio.sleep(2)
+
+ task1 = asyncio.create_task(long_running_cm())
+ await started_event.wait() # Wait until semaphore is actually acquired
+
+ # Second context manager should timeout and raise an exception
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=1,
+ blocking=True,
+ blocking_timeout=datetime.timedelta(seconds=0.1),
+ )
+ @asynccontextmanager
+ async def timeout_context_manager():
+ yield
+
+ with pytest.raises(SemaphoreAcquisitionError):
+ async with timeout_context_manager():
+ pass
+
+ await task1
+
+
+async def test_context_manager_lose_semaphore_raises(
+ redis_client_sdk: RedisClientSDK,
+ semaphore_name: str,
+ semaphore_capacity: int,
+ short_ttl: datetime.timedelta,
+):
+ work_started = asyncio.Event()
+
+ @with_limited_concurrency_cm(
+ redis_client_sdk,
+ key=semaphore_name,
+ capacity=semaphore_capacity,
+ ttl=short_ttl,
+ )
+ @asynccontextmanager
+ async def context_manager_that_should_fail():
+ yield "data"
+
+ async def use_failing_cm() -> None:
+ async with context_manager_that_should_fail() as data:
+ assert data == "data"
+ work_started.set()
+ # Wait long enough for renewal to be attempted multiple times
+ await asyncio.sleep(short_ttl.total_seconds() * 100)
+
+ task = asyncio.create_task(use_failing_cm())
+ await work_started.wait()
+
+ # Wait for the first renewal interval to pass
+ renewal_interval = short_ttl / 3
+ await asyncio.sleep(renewal_interval.total_seconds() + 1.5)
+
+ # Find and delete all holder keys for this semaphore
+ holder_keys = await redis_client_sdk.redis.keys(
+ f"{SEMAPHORE_KEY_PREFIX}{semaphore_name}_cap{semaphore_capacity}:holders:*"
+ )
+ assert holder_keys, "Holder keys should exist before deletion"
+ await redis_client_sdk.redis.delete(*holder_keys)
+
+ # wait another renewal interval to ensure the renewal fails
+ await asyncio.sleep(renewal_interval.total_seconds() * 1.5)
+
+ async with asyncio.timeout(renewal_interval.total_seconds()):
+ with pytest.raises(SemaphoreLostError):
+ await task
diff --git a/packages/service-library/tests/test_async_utils.py b/packages/service-library/tests/test_async_utils.py
index 9bb1b4fff45a..e7164417fc6f 100644
--- a/packages/service-library/tests/test_async_utils.py
+++ b/packages/service-library/tests/test_async_utils.py
@@ -7,7 +7,6 @@
import random
from collections import deque
from dataclasses import dataclass
-from datetime import timedelta
from time import time
from typing import Any
@@ -15,7 +14,6 @@
from faker import Faker
from servicelib.async_utils import (
_sequential_jobs_contexts,
- delayed_start,
run_sequentially_in_context,
)
@@ -225,20 +223,3 @@ async def test_multiple_context_calls(context_param: int) -> int:
assert i == await test_multiple_context_calls(i)
assert len(_sequential_jobs_contexts) == RETRIES
-
-
-async def test_with_delay():
- @delayed_start(timedelta(seconds=0.2))
- async def decorated_awaitable() -> int:
- return 42
-
- assert await decorated_awaitable() == 42
-
- async def another_awaitable() -> int:
- return 42
-
- decorated_another_awaitable = delayed_start(timedelta(seconds=0.2))(
- another_awaitable
- )
-
- assert await decorated_another_awaitable() == 42
diff --git a/packages/service-library/tests/test_background_task.py b/packages/service-library/tests/test_background_task.py
index 8c508bf8979c..9a33ed6a62c1 100644
--- a/packages/service-library/tests/test_background_task.py
+++ b/packages/service-library/tests/test_background_task.py
@@ -13,9 +13,9 @@
from unittest.mock import AsyncMock
import pytest
+from common_library.async_tools import cancel_wait_task
from faker import Faker
from pytest_mock.plugin import MockerFixture
-from servicelib.async_utils import cancel_wait_task
from servicelib.background_task import create_periodic_task, periodic, periodic_task
pytest_simcore_core_services_selection = [
@@ -26,8 +26,8 @@
]
-_FAST_POLL_INTERVAL: Final[int] = 1
-_VERY_SLOW_POLL_INTERVAL: Final[int] = 100
+_FAST_POLL_INTERVAL: Final[float] = 0.01
+_VERY_SLOW_POLL_INTERVAL: Final[float] = 1
@pytest.fixture
@@ -207,12 +207,16 @@ async def _func() -> None:
assert mock_func.call_count > 1
+class CustomError(Exception):
+ pass
+
+
async def test_periodic_task_logs_error(
mock_background_task: mock.AsyncMock,
task_interval: datetime.timedelta,
caplog: pytest.LogCaptureFixture,
):
- mock_background_task.side_effect = RuntimeError("Test error")
+ mock_background_task.side_effect = CustomError("Test error")
with caplog.at_level(logging.ERROR):
async with periodic_task(
diff --git a/packages/service-library/tests/test_background_task_utils.py b/packages/service-library/tests/test_background_task_utils.py
index 9a03a6c35410..7307a8b7b89a 100644
--- a/packages/service-library/tests/test_background_task_utils.py
+++ b/packages/service-library/tests/test_background_task_utils.py
@@ -13,7 +13,7 @@
import arrow
import pytest
-from servicelib.async_utils import cancel_wait_task
+from common_library.async_tools import cancel_wait_task
from servicelib.background_task_utils import exclusive_periodic
from servicelib.redis import RedisClientSDK
from settings_library.redis import RedisDatabase
@@ -24,13 +24,6 @@
wait_fixed,
)
-pytest_simcore_core_services_selection = [
- "redis",
-]
-pytest_simcore_ops_services_selection = [
- "redis-commander",
-]
-
@pytest.fixture
async def redis_client_sdk(
diff --git a/packages/service-library/tests/test_celery.py b/packages/service-library/tests/test_celery.py
new file mode 100644
index 000000000000..670805d1a2ef
--- /dev/null
+++ b/packages/service-library/tests/test_celery.py
@@ -0,0 +1,150 @@
+from types import NoneType
+from typing import Annotated
+
+# pylint: disable=redefined-outer-name
+# pylint: disable=protected-access
+import pydantic
+import pytest
+from common_library.json_serialization import json_dumps
+from faker import Faker
+from pydantic import StringConstraints
+from servicelib.celery.models import (
+ OwnerMetadata,
+ TaskUUID,
+ Wildcard,
+)
+
+_faker = Faker()
+
+
+class _TestOwnerMetadata(OwnerMetadata):
+ string_: str
+ int_: int
+ bool_: bool
+ none_: None
+ uuid_: str
+
+
+@pytest.fixture
+def test_owner_metadata() -> dict[str, str | int | bool | None | list[str]]:
+ data = {
+ "string_": _faker.word(),
+ "int_": _faker.random_int(),
+ "bool_": _faker.boolean(),
+ "none_": None,
+ "uuid_": _faker.uuid4(),
+ "owner": _faker.word().lower(),
+ }
+ _TestOwnerMetadata.model_validate(data) # ensure it's valid
+ return data
+
+
+async def test_task_filter_serialization(
+ test_owner_metadata: dict[str, str | int | bool | None | list[str]],
+):
+ task_filter = _TestOwnerMetadata.model_validate(test_owner_metadata)
+ assert task_filter.model_dump() == test_owner_metadata
+
+
+async def test_task_filter_sorting_key_not_serialized():
+
+ class _OwnerMetadata(OwnerMetadata):
+ a: int | Wildcard
+ b: str | Wildcard
+
+ owner_metadata = _OwnerMetadata.model_validate(
+ {"a": _faker.random_int(), "b": _faker.word(), "owner": _faker.word().lower()}
+ )
+ task_uuid = TaskUUID(_faker.uuid4())
+ copy_owner_metadata = owner_metadata.model_dump()
+ copy_owner_metadata.update({"task_uuid": f"{task_uuid}"})
+
+ expected_key = ":".join(
+ [f"{k}={json_dumps(v)}" for k, v in sorted(copy_owner_metadata.items())]
+ )
+ assert owner_metadata.model_dump_task_id(task_uuid=task_uuid) == expected_key
+
+
+async def test_task_filter_task_uuid(
+ test_owner_metadata: dict[str, str | int | bool | None | list[str]],
+):
+ task_filter = _TestOwnerMetadata.model_validate(test_owner_metadata)
+ task_uuid = TaskUUID(_faker.uuid4())
+ task_id = task_filter.model_dump_task_id(task_uuid)
+ assert OwnerMetadata.get_task_uuid(task_id=task_id) == task_uuid
+
+
+async def test_owner_metadata_task_id_dump_and_validate():
+
+ class MyModel(OwnerMetadata):
+ int_: int
+ bool_: bool
+ str_: str
+ float_: float
+ none_: NoneType
+ list_s: list[str]
+ list_i: list[int]
+ list_f: list[float]
+ list_b: list[bool]
+
+ mymodel = MyModel(
+ int_=1,
+ none_=None,
+ bool_=True,
+ str_="test",
+ float_=1.0,
+ owner="myowner",
+ list_b=[True, False],
+ list_f=[1.0, 2.0],
+ list_i=[1, 2],
+ list_s=["a", "b"],
+ )
+ task_uuid = TaskUUID(_faker.uuid4())
+ task_id = mymodel.model_dump_task_id(task_uuid)
+ mymodel_recreated = MyModel.model_validate_task_id(task_id=task_id)
+ assert mymodel_recreated == mymodel
+
+
+@pytest.mark.parametrize(
+ "bad_data",
+ [
+ {"foo": "bar:baz"},
+ {"foo": "bar=baz"},
+ {"foo:bad": "bar"},
+ {"foo=bad": "bar"},
+ {"foo": ":baz"},
+ {"foo": "=baz"},
+ ],
+)
+def test_task_filter_validator_raises_on_forbidden_chars(bad_data):
+ with pytest.raises(pydantic.ValidationError):
+ OwnerMetadata.model_validate(bad_data)
+
+
+async def test_task_owner():
+ class MyOwnerMetadata(OwnerMetadata):
+ extra_field: str
+
+ with pytest.raises(pydantic.ValidationError):
+ MyOwnerMetadata(owner="", extra_field="value")
+
+ with pytest.raises(pydantic.ValidationError):
+ MyOwnerMetadata(owner="UPPER_CASE", extra_field="value")
+
+ class MyNextFilter(OwnerMetadata):
+ owner: Annotated[
+ str, StringConstraints(strip_whitespace=True, pattern=r"^the_task_owner$")
+ ]
+
+ with pytest.raises(pydantic.ValidationError):
+ MyNextFilter(owner="wrong_owner")
+
+
+def test_owner_metadata_serialize_deserialize(test_owner_metadata):
+ test_owner_metadata = _TestOwnerMetadata.model_validate(test_owner_metadata)
+ data = test_owner_metadata.model_dump()
+ deserialized_data = OwnerMetadata.model_validate(data)
+ assert len(_TestOwnerMetadata.model_fields) > len(
+ OwnerMetadata.model_fields
+ ) # ensure extra data is available in _TestOwnerMetadata -> needed for RPC
+ assert deserialized_data.model_dump() == data
diff --git a/services/dynamic-sidecar/tests/unit/test_modules_container_utils.py b/packages/service-library/tests/test_container_utils.py
similarity index 87%
rename from services/dynamic-sidecar/tests/unit/test_modules_container_utils.py
rename to packages/service-library/tests/test_container_utils.py
index a8b84f7235c1..075645e5009d 100644
--- a/services/dynamic-sidecar/tests/unit/test_modules_container_utils.py
+++ b/packages/service-library/tests/test_container_utils.py
@@ -1,11 +1,10 @@
# pylint: disable=redefined-outer-name
-import contextlib
from collections.abc import AsyncIterable
import aiodocker
import pytest
-from simcore_service_dynamic_sidecar.modules.container_utils import (
+from servicelib.container_utils import (
ContainerExecCommandFailedError,
ContainerExecContainerNotFoundError,
ContainerExecTimeoutError,
@@ -26,9 +25,7 @@ async def running_container_name() -> AsyncIterable[str]:
yield container_inspect["Name"][1:]
- with contextlib.suppress(aiodocker.DockerError):
- await container.kill()
- await container.delete()
+ await container.delete(force=True)
async def test_run_command_in_container_container_not_found():
diff --git a/packages/service-library/tests/test_exception_utils.py b/packages/service-library/tests/test_exception_utils.py
index a884d3dafb19..040022b64eb6 100644
--- a/packages/service-library/tests/test_exception_utils.py
+++ b/packages/service-library/tests/test_exception_utils.py
@@ -4,7 +4,7 @@
import pytest
from pydantic import PositiveFloat, PositiveInt
-from servicelib.exception_utils import DelayedExceptionHandler, silence_exceptions
+from servicelib.exception_utils import DelayedExceptionHandler, suppress_exceptions
TOLERANCE: Final[PositiveFloat] = 0.1
SLEEP_FOR: Final[PositiveFloat] = TOLERANCE * 0.1
@@ -53,14 +53,17 @@ def test_workflow_raises() -> None:
# Define some custom exceptions for testing
class CustomError(Exception):
- pass
+ def __init__(self, code: int = 0, message: str = ""):
+ self.code = code
+ self.message = message
+ super().__init__(message)
class AnotherCustomError(Exception):
pass
-@silence_exceptions((CustomError,))
+@suppress_exceptions((CustomError,), reason="CustomError is silenced")
def sync_function(*, raise_error: bool, raise_another_error: bool) -> str:
if raise_error:
raise CustomError
@@ -69,7 +72,7 @@ def sync_function(*, raise_error: bool, raise_another_error: bool) -> str:
return "Success"
-@silence_exceptions((CustomError,))
+@suppress_exceptions((CustomError,), reason="CustomError is silenced")
async def async_function(*, raise_error: bool, raise_another_error: bool) -> str:
if raise_error:
raise CustomError
@@ -78,6 +81,29 @@ async def async_function(*, raise_error: bool, raise_another_error: bool) -> str
return "Success"
+# Test functions with predicate
+@suppress_exceptions(
+ (CustomError,),
+ reason="Only suppress CustomError with code >= 100",
+ predicate=lambda e: hasattr(e, "code") and e.code >= 100,
+)
+def sync_function_with_predicate(error_code: int = 0) -> str:
+ if error_code > 0:
+ raise CustomError(code=error_code, message=f"Error {error_code}")
+ return "Success"
+
+
+@suppress_exceptions(
+ (CustomError,),
+ reason="Only suppress CustomError with code >= 100",
+ predicate=lambda e: hasattr(e, "code") and e.code >= 100,
+)
+async def async_function_with_predicate(error_code: int = 0) -> str:
+ if error_code > 0:
+ raise CustomError(code=error_code, message=f"Error {error_code}")
+ return "Success"
+
+
def test_sync_function_no_exception():
result = sync_function(raise_error=False, raise_another_error=False)
assert result == "Success"
@@ -106,3 +132,144 @@ def test_sync_function_with_different_exception():
async def test_async_function_with_different_exception():
with pytest.raises(AnotherCustomError):
await async_function(raise_error=False, raise_another_error=True)
+
+
+def test_sync_function_predicate_suppresses_matching_exception():
+ result = sync_function_with_predicate(
+ error_code=150
+ ) # code >= 100, should be suppressed
+ assert result is None
+
+
+def test_sync_function_predicate_raises_non_matching_exception():
+ with pytest.raises(CustomError):
+ sync_function_with_predicate(error_code=50) # code < 100, should be raised
+
+
+def test_sync_function_predicate_no_exception():
+ result = sync_function_with_predicate(error_code=0)
+ assert result == "Success"
+
+
+async def test_async_function_predicate_suppresses_matching_exception():
+ result = await async_function_with_predicate(
+ error_code=200
+ ) # code >= 100, should be suppressed
+ assert result is None
+
+
+async def test_async_function_predicate_raises_non_matching_exception():
+ with pytest.raises(CustomError):
+ await async_function_with_predicate(
+ error_code=25
+ ) # code < 100, should be raised
+
+
+async def test_async_function_predicate_no_exception():
+ result = await async_function_with_predicate(error_code=0)
+ assert result == "Success"
+
+
+@suppress_exceptions(
+ (ValueError, TypeError),
+ reason="Complex predicate test",
+ predicate=lambda e: "suppress" in str(e).lower(),
+)
+def function_with_complex_predicate(message: str) -> str:
+ if "value" in message:
+ raise ValueError(message)
+ if "type" in message:
+ raise TypeError(message)
+ return "Success"
+
+
+def test_complex_predicate_suppresses_matching():
+ result = function_with_complex_predicate("please suppress this value error")
+ assert result is None
+
+
+def test_complex_predicate_raises_non_matching():
+ with pytest.raises(ValueError, match="value error without keyword"):
+ function_with_complex_predicate("value error without keyword")
+
+
+def test_complex_predicate_different_exception_type():
+ result = function_with_complex_predicate("type error with suppress keyword")
+ assert result is None
+
+
+# Test predicate exception handling
+@suppress_exceptions(
+ (ValueError,),
+ reason="Predicate that raises exception",
+ predicate=lambda _: bool(1 / 0), # This will raise ZeroDivisionError
+)
+def function_with_failing_predicate() -> str:
+ msg = "Original error"
+ raise ValueError(msg)
+
+
+@suppress_exceptions(
+ (ValueError,),
+ reason="Predicate that raises exception",
+ predicate=lambda _: bool(1 / 0), # This will raise ZeroDivisionError
+)
+async def async_function_with_failing_predicate() -> str:
+ msg = "Original error"
+ raise ValueError(msg)
+
+
+def test_sync_function_predicate_exception_reraised(caplog):
+ with pytest.raises(ValueError, match="Original error"):
+ function_with_failing_predicate()
+
+ # Check that warning was logged
+ assert "Predicate function raised exception" in caplog.text
+ assert "ZeroDivisionError" in caplog.text
+
+
+async def test_async_function_predicate_exception_reraised(caplog):
+ with pytest.raises(ValueError, match="Original error"):
+ await async_function_with_failing_predicate()
+
+ # Check that warning was logged
+ assert "Predicate function raised exception" in caplog.text
+ assert "ZeroDivisionError" in caplog.text
+
+
+@suppress_exceptions(
+ (ValueError,),
+ reason="Predicate that accesses invalid attribute",
+ predicate=lambda e: e.nonexistent_attribute == "test",
+)
+def function_with_attribute_error_predicate() -> str:
+ msg = "Original error"
+ raise ValueError(msg)
+
+
+def test_predicate_attribute_error_reraised(caplog):
+ with pytest.raises(ValueError, match="Original error"):
+ function_with_attribute_error_predicate()
+
+ # Check that warning was logged about predicate failure
+ assert "Predicate function raised exception" in caplog.text
+ assert "AttributeError" in caplog.text
+
+
+@suppress_exceptions(
+ (ValueError,),
+ reason="Predicate that sometimes works",
+ predicate=lambda e: len(str(e)) > 5, # Safe predicate
+)
+def function_with_working_predicate(message: str) -> str:
+ raise ValueError(message)
+
+
+def test_predicate_works_normally():
+ # Short message - predicate returns False, exception re-raised
+ with pytest.raises(ValueError):
+ function_with_working_predicate("Hi")
+
+ # Long message - predicate returns True, exception suppressed
+ result = function_with_working_predicate("This is a long error message")
+ assert result is None
diff --git a/packages/service-library/tests/test_logging_utils.py b/packages/service-library/tests/test_logging_utils.py
index d56e07962f23..63f0d69c7883 100644
--- a/packages/service-library/tests/test_logging_utils.py
+++ b/packages/service-library/tests/test_logging_utils.py
@@ -1,26 +1,52 @@
# pylint:disable=redefined-outer-name
# pylint:disable=unused-argument
+import io
import logging
+import re
from collections.abc import Iterable
from contextlib import suppress
from pathlib import Path
from typing import Any
import pytest
+from common_library.logging.logging_base import get_log_record_extra
from faker import Faker
from servicelib.logging_utils import (
+ _DEFAULT_FORMATTING,
+ CustomFormatter,
LogExtra,
LogLevelInt,
LogMessageStr,
+ async_loggers,
guess_message_log_level,
log_context,
log_decorator,
log_exceptions,
set_parent_module_log_level,
)
+from tenacity import (
+ retry,
+ retry_if_exception_type,
+ stop_after_delay,
+ wait_fixed,
+)
_logger = logging.getLogger(__name__)
+
+
+@retry(
+ wait=wait_fixed(0.01),
+ stop=stop_after_delay(2.0),
+ reraise=True,
+ retry=retry_if_exception_type(AssertionError),
+)
+def _assert_check_log_message(
+ caplog: pytest.LogCaptureFixture, expected_message: str
+) -> None:
+ assert expected_message in caplog.text
+
+
_ALL_LOGGING_LEVELS = [
logging.CRITICAL,
logging.ERROR,
@@ -325,8 +351,9 @@ def test_log_exceptions_and_suppress_without_exc_info(
caplog.set_level(level)
exc_msg = "logs exceptions and suppresses"
- with suppress(ValueError), log_exceptions(
- _logger, level, "CONTEXT", exc_info=False
+ with (
+ suppress(ValueError),
+ log_exceptions(_logger, level, "CONTEXT", exc_info=False),
):
raise ValueError(exc_msg)
@@ -410,3 +437,315 @@ def test_set_parent_module_log_level_(caplog: pytest.LogCaptureFixture):
assert "parent warning" in caplog.text
assert "child warning" in caplog.text
+
+
+@pytest.mark.parametrize("log_format_local_dev_enabled", [True, False])
+def test_setup_async_loggers_basic(
+ caplog: pytest.LogCaptureFixture,
+ log_format_local_dev_enabled: bool,
+):
+ """Test basic async logging setup without filters."""
+ caplog.clear()
+ caplog.set_level(logging.INFO)
+
+ with async_loggers(
+ log_format_local_dev_enabled=log_format_local_dev_enabled,
+ logger_filter_mapping={}, # No filters for this test
+ tracing_settings=None, # No tracing for this test
+ log_base_level=logging.INFO, # Set base log level
+ noisy_loggers=(), # No noisy loggers for this test
+ ):
+ test_logger = logging.getLogger("test_async_logger")
+ test_logger.info("Test async log message")
+
+ _assert_check_log_message(caplog, "Test async log message")
+
+
+def test_setup_async_loggers_with_filters(
+ caplog: pytest.LogCaptureFixture,
+):
+ caplog.clear()
+ caplog.set_level(logging.INFO)
+
+ # Define filter mapping
+ filter_mapping = {
+ "test_filtered_logger": ["filtered_message"],
+ }
+
+ with async_loggers(
+ log_format_local_dev_enabled=True,
+ logger_filter_mapping=filter_mapping,
+ tracing_settings=None, # No tracing for this test
+ log_base_level=logging.INFO, # Set base log level
+ noisy_loggers=(), # No noisy loggers for this test
+ ):
+ test_logger = logging.getLogger("test_filtered_logger")
+ unfiltered_logger = logging.getLogger("test_unfiltered_logger")
+
+ # This should be filtered out
+ test_logger.info("This is a filtered_message")
+
+ # This should pass through
+ test_logger.info("This is an unfiltered message")
+ unfiltered_logger.info("This is from unfiltered logger")
+
+ _assert_check_log_message(caplog, "This is an unfiltered message")
+ _assert_check_log_message(caplog, "This is from unfiltered logger")
+
+ # Check that filtered message was not captured
+ assert "This is a filtered_message" not in caplog.text
+
+ # Check that unfiltered messages were captured
+ assert "This is an unfiltered message" in caplog.text
+ assert "This is from unfiltered logger" in caplog.text
+
+
+def test_setup_async_loggers_with_tracing_settings(
+ caplog: pytest.LogCaptureFixture,
+):
+ """Test async logging setup with tracing settings."""
+ caplog.clear()
+ caplog.set_level(logging.INFO)
+
+ # Note: We can't easily test actual tracing without setting up OpenTelemetry
+ # But we can test that the function accepts the parameter
+ with async_loggers(
+ log_format_local_dev_enabled=False,
+ logger_filter_mapping={}, # No filters for this test
+ tracing_settings=None,
+ log_base_level=logging.INFO, # Set base log level
+ noisy_loggers=(), # No noisy loggers for this test
+ ):
+ test_logger = logging.getLogger("test_tracing_logger")
+ test_logger.info("Test message with tracing settings")
+
+ _assert_check_log_message(caplog, "Test message with tracing settings")
+
+
+def test_setup_async_loggers_context_manager_cleanup(
+ caplog: pytest.LogCaptureFixture,
+):
+ """Test that async logging context manager properly cleans up."""
+ caplog.clear()
+ caplog.set_level(logging.DEBUG)
+
+ test_logger = logging.getLogger("test_cleanup_logger")
+
+ with async_loggers(
+ log_format_local_dev_enabled=True,
+ logger_filter_mapping={},
+ tracing_settings=None,
+ log_base_level=logging.INFO, # Set base log level
+ noisy_loggers=(), # No noisy loggers for this test
+ ):
+ # During the context, handlers should be replaced
+ test_logger.info("Message during context")
+
+ _assert_check_log_message(caplog, "Message during context")
+
+
+def test_setup_async_loggers_exception_handling(
+ caplog: pytest.LogCaptureFixture,
+):
+ """Test that async logging handles exceptions gracefully."""
+ caplog.clear()
+ caplog.set_level(logging.DEBUG) # Set to DEBUG to capture cleanup messages
+
+ def _raise_test_exception():
+ """Helper function to raise exception for testing."""
+ exc_msg = "Test exception"
+ raise ValueError(exc_msg)
+
+ try:
+ with async_loggers(
+ log_format_local_dev_enabled=True,
+ logger_filter_mapping={},
+ tracing_settings=None,
+ log_base_level=logging.INFO, # Set base log level
+ noisy_loggers=(), # No noisy loggers for this test
+ ):
+ test_logger = logging.getLogger("test_exception_logger")
+ test_logger.info("Message before exception")
+
+ _assert_check_log_message(caplog, "Message before exception")
+
+ # Raise an exception to test cleanup
+ _raise_test_exception()
+
+ except ValueError:
+ # Expected exception
+ pass
+
+ # Check that the message was logged and cleanup happened
+ assert "Message before exception" in caplog.text
+
+
+def _create_grok_regex_pattern() -> str:
+ """Convert Grok pattern to regex for testing."""
+ # The Grok pattern from the comment:
+ # log_level=%{WORD:log_level} \| log_timestamp=%{TIMESTAMP_ISO8601:log_timestamp} \| log_source=%{NOTSPACE:log_source} \| log_uid=%{NOTSPACE:log_uid} \| log_oec=%{NOTSPACE:log_oec} \| log_trace_id=%{NOTSPACE:log_trace_id} \| log_span_id=%{NOTSPACE:log_span_id} \| log_msg=%{GREEDYDATA:log_msg}
+
+ grok_to_regex = {
+ r"%{WORD:log_level}": r"(?P\w+)",
+ r"%{TIMESTAMP_ISO8601:log_timestamp}": r"(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})",
+ r"%{NOTSPACE:log_source}": r"(?P\S+)",
+ r"%{NOTSPACE:log_uid}": r"(?P\S+)",
+ r"%{NOTSPACE:log_oec}": r"(?P\S+)",
+ r"%{NOTSPACE:log_trace_id}": r"(?P\S+)",
+ r"%{NOTSPACE:log_span_id}": r"(?P\S+)",
+ r"%{GREEDYDATA:log_msg}": r"(?P.*)",
+ }
+
+ grok_pattern = r"log_level=%{WORD:log_level} \| log_timestamp=%{TIMESTAMP_ISO8601:log_timestamp} \| log_source=%{NOTSPACE:log_source} \| log_uid=%{NOTSPACE:log_uid} \| log_oec=%{NOTSPACE:log_oec} \| log_trace_id=%{NOTSPACE:log_trace_id} \| log_span_id=%{NOTSPACE:log_span_id} \| log_msg=%{GREEDYDATA:log_msg}"
+
+ # Convert to regex
+ regex_pattern = grok_pattern
+ for grok, regex in grok_to_regex.items():
+ regex_pattern = regex_pattern.replace(grok, regex)
+
+ return regex_pattern
+
+
+def _create_test_log_record(
+ name: str,
+ level: int,
+ func_name: str,
+ lineno: int,
+ message: str,
+ *,
+ user_id: int | str | None = None,
+ error_code: str | None = None,
+ trace_id: str | None = None,
+ span_id: str | None = None,
+) -> logging.LogRecord:
+ """Create a test LogRecord with optional extra fields."""
+
+ record = logging.LogRecord(
+ name=name,
+ level=level,
+ pathname="/path/to/file.py",
+ lineno=lineno,
+ msg=message,
+ args=(),
+ exc_info=None,
+ func=func_name,
+ )
+
+ # Add extra fields if provided
+ extra = get_log_record_extra(user_id=user_id, error_code=error_code)
+ if extra:
+ for key, value in extra.items():
+ setattr(record, key, value)
+
+ # Add OpenTelemetry trace ID
+ record.otelTraceID = trace_id # type: ignore[attr-defined]
+ record.otelSpanID = span_id # type: ignore[attr-defined]
+
+ return record
+
+
+def test_grok_pattern_parsing(caplog: pytest.LogCaptureFixture) -> None:
+ """
+ Test that the Graylog Grok pattern correctly parses logs formatted with _DEFAULT_FORMATTING.
+
+ This test validates that the Grok pattern defined in the comment can correctly
+ parse logs formatted with _DEFAULT_FORMATTING.
+
+ WARNING: If log formatting changes, the Grok pattern in Graylog must be updated accordingly.
+ """
+
+ # Create a custom handler with the default formatter
+ log_stream = io.StringIO()
+ handler = logging.StreamHandler(log_stream)
+ formatter = CustomFormatter(_DEFAULT_FORMATTING, log_format_local_dev_enabled=False)
+ handler.setFormatter(formatter)
+
+ # Create test log record with all fields populated
+ test_message = (
+ "This is a test log message with special chars: []{} and new line\nembedded"
+ )
+ record = _create_test_log_record(
+ name="test.module.submodule",
+ level=logging.INFO,
+ func_name="test_function",
+ lineno=42,
+ message=test_message,
+ user_id=12345,
+ error_code="OEC001",
+ trace_id="1234567890abcdef1234567890abcdef",
+ span_id="987654321",
+ )
+
+ # Format the record
+ formatted_log = formatter.format(record)
+
+ # Test that the formatted log matches the Grok pattern
+ regex_pattern = _create_grok_regex_pattern()
+ match = re.match(regex_pattern, formatted_log)
+
+ assert (
+ match is not None
+ ), f"Grok pattern did not match formatted log. Log: {formatted_log!r}"
+
+ # Verify extracted fields match expected values
+ groups = match.groupdict()
+
+ assert groups["log_level"] == "INFO"
+ assert groups["log_source"] == "test.module.submodule:test_function(42)"
+ assert groups["log_uid"] == "12345"
+ assert groups["log_oec"] == "OEC001"
+ assert groups["log_trace_id"] == "1234567890abcdef1234567890abcdef"
+ assert groups["log_span_id"] == "987654321"
+
+ # Verify the message is correctly escaped (newlines become \\n)
+ expected_message = test_message.replace("\n", "\\n")
+ assert groups["log_msg"] == expected_message
+
+ # Verify timestamp format is ISO8601-like (as expected by Python logging)
+ timestamp_pattern = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}"
+ assert re.match(timestamp_pattern, groups["log_timestamp"])
+
+
+def test_grok_pattern_parsing_with_none_values(
+ caplog: pytest.LogCaptureFixture,
+) -> None:
+ """
+ Test Grok pattern parsing when optional fields are None.
+
+ WARNING: If log formatting changes, the Grok pattern in Graylog must be updated accordingly.
+ """
+
+ # Create a custom handler with the default formatter
+ handler = logging.StreamHandler(io.StringIO())
+ formatter = CustomFormatter(_DEFAULT_FORMATTING, log_format_local_dev_enabled=False)
+ handler.setFormatter(formatter)
+
+ # Create test log record with None values for optional fields
+ record = _create_test_log_record(
+ name="test.module",
+ level=logging.ERROR,
+ func_name="error_function",
+ lineno=100,
+ message="Error message",
+ user_id=None,
+ error_code=None,
+ trace_id=None,
+ span_id=None,
+ )
+
+ formatted_log = formatter.format(record)
+ regex_pattern = _create_grok_regex_pattern()
+ match = re.match(regex_pattern, formatted_log)
+
+ assert (
+ match is not None
+ ), f"Grok pattern did not match log with None values. Log: {formatted_log!r}"
+
+ groups = match.groupdict()
+ assert groups["log_level"] == "ERROR"
+ assert groups["log_source"] == "test.module:error_function(100)"
+ assert groups["log_uid"] == "None"
+ assert groups["log_oec"] == "None"
+ assert groups["log_trace_id"] == "None"
+ assert groups["log_span_id"] == "None"
+ assert groups["log_msg"] == "Error message"
diff --git a/packages/service-library/tests/test_utils_meta.py b/packages/service-library/tests/test_utils_meta.py
index a6da532bc776..2468943268e1 100644
--- a/packages/service-library/tests/test_utils_meta.py
+++ b/packages/service-library/tests/test_utils_meta.py
@@ -1,7 +1,9 @@
from typing import Final
+import pytest
from models_library.basic_types import VersionStr
from packaging.version import Version
+from pytest_mock import MockerFixture
from servicelib.utils_meta import PackageInfo
@@ -32,3 +34,33 @@ def test_meta_module_implementation():
assert __version__ in APP_FINISHED_BANNER_MSG
assert PROJECT_NAME in APP_FINISHED_BANNER_MSG
+
+
+@pytest.mark.parametrize(
+ "package_name, app_name, is_valid_app_name, is_correct_app_name",
+ [
+ ("simcore-service-library", "simcore-service-library", True, True),
+ ("simcore-service-lib", "simcore-service-library", True, False),
+ ("simcore_service_library", "simcore_service_library", False, True),
+ ],
+)
+def test_app_name(
+ mocker: MockerFixture,
+ package_name: str,
+ app_name: str,
+ is_valid_app_name: bool,
+ is_correct_app_name: bool,
+):
+
+ def mock_distribution(name):
+ return mocker.Mock(metadata={"Name": name, "Version": "1.0.0"})
+
+ mocker.patch("servicelib.utils_meta.distribution", side_effect=mock_distribution)
+ if is_valid_app_name:
+ info = PackageInfo(package_name=package_name)
+ if is_correct_app_name:
+ assert info.app_name == app_name
+ assert info.prometheus_friendly_app_name == app_name.replace("-", "_")
+ else:
+ with pytest.raises(ValueError):
+ _ = PackageInfo(package_name=package_name)
diff --git a/packages/settings-library/requirements/_base.txt b/packages/settings-library/requirements/_base.txt
index bc7e83313349..959a20139d50 100644
--- a/packages/settings-library/requirements/_base.txt
+++ b/packages/settings-library/requirements/_base.txt
@@ -1,6 +1,6 @@
annotated-types==0.7.0
# via pydantic
-click==8.1.8
+click==8.2.1
# via typer
markdown-it-py==3.0.0
# via rich
@@ -11,7 +11,7 @@ orjson==3.10.15
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/common-library/requirements/_base.in
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
@@ -19,9 +19,9 @@ pydantic==2.10.6
# -r requirements/_base.in
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via -r requirements/../../../packages/common-library/requirements/_base.in
pydantic-settings==2.7.0
# via
@@ -32,17 +32,20 @@ pygments==2.19.1
# via rich
python-dotenv==1.0.1
# via pydantic-settings
-rich==13.9.4
+rich==14.1.0
# via
# -r requirements/_base.in
# typer
shellingham==1.5.4
# via typer
-typer==0.15.2
+typer==0.16.1
# via -r requirements/_base.in
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# pydantic
# pydantic-core
# pydantic-extra-types
# typer
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
diff --git a/packages/settings-library/requirements/_test.txt b/packages/settings-library/requirements/_test.txt
index fb8381375d51..6a2a4716993a 100644
--- a/packages/settings-library/requirements/_test.txt
+++ b/packages/settings-library/requirements/_test.txt
@@ -11,19 +11,25 @@ packaging==24.2
# pytest
# pytest-sugar
pluggy==1.5.0
- # via pytest
-pytest==8.3.5
+ # via
+ # pytest
+ # pytest-cov
+pygments==2.19.1
+ # via
+ # -c requirements/_base.txt
+ # pytest
+pytest==8.4.1
# via
# -r requirements/_test.in
# pytest-cov
# pytest-instafail
# pytest-mock
# pytest-sugar
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
pytest-instafail==0.5.0
# via -r requirements/_test.in
-pytest-mock==3.14.0
+pytest-mock==3.14.1
# via -r requirements/_test.in
pytest-runner==6.0.1
# via -r requirements/_test.in
diff --git a/packages/settings-library/requirements/_tools.txt b/packages/settings-library/requirements/_tools.txt
index 13e0ee77ce64..7fa8b1865ba8 100644
--- a/packages/settings-library/requirements/_tools.txt
+++ b/packages/settings-library/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# black
@@ -27,9 +27,9 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# black
# mypy
@@ -41,7 +41,9 @@ packaging==24.2
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # black
+ # mypy
pip==25.0.1
# via pip-tools
pip-tools==7.4.1
@@ -65,11 +67,11 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# mypy
diff --git a/packages/settings-library/src/settings_library/base.py b/packages/settings-library/src/settings_library/base.py
index 9ab3119dfc79..8c5a06488ca9 100644
--- a/packages/settings-library/src/settings_library/base.py
+++ b/packages/settings-library/src/settings_library/base.py
@@ -1,6 +1,6 @@
import logging
from functools import cached_property
-from typing import Any, Final, get_origin
+from typing import Any, Final
from common_library.pydantic_fields_extension import get_type, is_literal, is_nullable
from pydantic import ValidationInfo, field_validator
@@ -15,9 +15,9 @@
_logger = logging.getLogger(__name__)
-_AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING: Final[
- str
-] = "{field_name} auto_default_from_env unresolved, defaulting to None"
+_AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING: Final[str] = (
+ "{field_name} auto_default_from_env unresolved, defaulting to None"
+)
class DefaultFromEnvFactoryError(ValueError):
@@ -119,11 +119,14 @@ def _parse_none(cls, v, info: ValidationInfo):
model_config = SettingsConfigDict(
case_sensitive=True, # All must be capitalized
- extra="forbid",
+ env_parse_none_str="null",
+ extra="ignore", # NOTE: if "strict" then fields with multiple aliases defined in the envs will fail to validate!
frozen=True,
- validate_default=True,
ignored_types=(cached_property,),
- env_parse_none_str="null",
+ populate_by_name=True, # NOTE: populate_by_name deprecated in pydantic v2.11+
+ validate_by_alias=True,
+ validate_by_name=True,
+ validate_default=True,
)
@classmethod
@@ -133,28 +136,15 @@ def __pydantic_init_subclass__(cls, **kwargs: Any):
for name, field in cls.model_fields.items():
auto_default_from_env = _is_auto_default_from_env_enabled(field)
field_type = get_type(field)
-
- # Avoids issubclass raising TypeError. SEE test_issubclass_type_error_with_pydantic_models
- is_not_composed = (
- get_origin(field_type) is None
- ) # is not composed as dict[str, Any] or Generic[Base]
is_not_literal = not is_literal(field)
- if (
- is_not_literal
- and is_not_composed
- and issubclass(field_type, BaseCustomSettings)
- ):
+ if is_not_literal and issubclass(field_type, BaseCustomSettings):
if auto_default_from_env:
# Builds a default factory `Field(default_factory=create_settings_from_env(field))`
field.default_factory = _create_settings_from_env(name, field)
field.default = None
- elif (
- is_not_literal
- and is_not_composed
- and issubclass(field_type, BaseSettings)
- ):
+ elif is_not_literal and issubclass(field_type, BaseSettings):
msg = f"{cls}.{name} of type {field_type} must inherit from BaseCustomSettings"
raise ValueError(msg)
diff --git a/packages/settings-library/src/settings_library/docker_api_proxy.py b/packages/settings-library/src/settings_library/docker_api_proxy.py
index 14f66f0934e5..cc002c5a3187 100644
--- a/packages/settings-library/src/settings_library/docker_api_proxy.py
+++ b/packages/settings-library/src/settings_library/docker_api_proxy.py
@@ -1,4 +1,5 @@
from functools import cached_property
+from typing import Annotated
from pydantic import Field, SecretStr
@@ -7,12 +8,12 @@
class DockerApiProxysettings(BaseCustomSettings):
- DOCKER_API_PROXY_HOST: str = Field(
- description="hostname of the docker-api-proxy service"
- )
- DOCKER_API_PROXY_PORT: PortInt = Field(
- 8888, description="port of the docker-api-proxy service"
- )
+ DOCKER_API_PROXY_HOST: Annotated[
+ str, Field(description="hostname of the docker-api-proxy service")
+ ]
+ DOCKER_API_PROXY_PORT: Annotated[
+ PortInt, Field(description="port of the docker-api-proxy service")
+ ] = 8888
DOCKER_API_PROXY_SECURE: bool = False
DOCKER_API_PROXY_USER: str
diff --git a/packages/settings-library/src/settings_library/postgres.py b/packages/settings-library/src/settings_library/postgres.py
index 83aa960c92cc..64276b7fdce4 100644
--- a/packages/settings-library/src/settings_library/postgres.py
+++ b/packages/settings-library/src/settings_library/postgres.py
@@ -1,15 +1,16 @@
from functools import cached_property
-from typing import Annotated
+from typing import Annotated, Self
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from pydantic import (
AliasChoices,
Field,
+ NonNegativeInt,
PostgresDsn,
SecretStr,
- ValidationInfo,
- field_validator,
+ model_validator,
)
+from pydantic.config import JsonDict
from pydantic_settings import SettingsConfigDict
from .base import BaseCustomSettings
@@ -30,32 +31,51 @@ class PostgresSettings(BaseCustomSettings):
# pool connection limits
POSTGRES_MINSIZE: Annotated[
- int, Field(description="Minimum number of connections in the pool", ge=1)
+ int,
+ Field(
+ description="Minimum number of connections in the pool that are always created and kept",
+ ge=1,
+ ),
] = 1
POSTGRES_MAXSIZE: Annotated[
- int, Field(description="Maximum number of connections in the pool", ge=1)
+ int,
+ Field(
+ description="Maximum number of connections in the pool that are kept",
+ ge=1,
+ ),
] = 50
+ POSTGRES_MAX_POOLSIZE: Annotated[
+ int,
+ Field(
+ description="Maximal number of connection in asyncpg pool (without overflow), lazily created on demand"
+ ),
+ ] = 10
+ POSTGRES_MAX_OVERFLOW: Annotated[
+ NonNegativeInt, Field(description="Maximal overflow connections")
+ ] = 20
POSTGRES_CLIENT_NAME: Annotated[
str | None,
Field(
description="Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)",
validation_alias=AliasChoices(
- "POSTGRES_CLIENT_NAME",
# This is useful when running inside a docker container, then the hostname is set each client gets a different name
+ "POSTGRES_CLIENT_NAME",
"HOST",
"HOSTNAME",
),
),
] = None
- @field_validator("POSTGRES_MAXSIZE")
- @classmethod
- def _check_size(cls, v, info: ValidationInfo):
- if info.data["POSTGRES_MINSIZE"] > v:
- msg = f"assert POSTGRES_MINSIZE={info.data['POSTGRES_MINSIZE']} <= POSTGRES_MAXSIZE={v}"
+ @model_validator(mode="after")
+ def validate_postgres_sizes(self) -> Self:
+ if self.POSTGRES_MINSIZE > self.POSTGRES_MAXSIZE:
+ msg = (
+ f"assert POSTGRES_MINSIZE={self.POSTGRES_MINSIZE} <= "
+ f"POSTGRES_MAXSIZE={self.POSTGRES_MAXSIZE}"
+ )
raise ValueError(msg)
- return v
+ return self
@cached_property
def dsn(self) -> str:
@@ -81,19 +101,19 @@ def dsn_with_async_sqlalchemy(self) -> str:
)
return f"{url}"
- @cached_property
- def dsn_with_query(self) -> str:
+ def dsn_with_query(self, application_name: str, *, suffix: str | None) -> str:
"""Some clients do not support queries in the dsn"""
dsn = self.dsn
- return self._update_query(dsn)
+ return self._update_query(dsn, application_name, suffix=suffix)
+
+ def client_name(self, application_name: str, *, suffix: str | None) -> str:
+ return f"{application_name}{'-' if self.POSTGRES_CLIENT_NAME else ''}{self.POSTGRES_CLIENT_NAME or ''}{'-' + suffix if suffix else ''}"
- def _update_query(self, uri: str) -> str:
+ def _update_query(self, uri: str, application_name: str, suffix: str | None) -> str:
# SEE https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
- new_params: dict[str, str] = {}
- if self.POSTGRES_CLIENT_NAME:
- new_params = {
- "application_name": self.POSTGRES_CLIENT_NAME,
- }
+ new_params: dict[str, str] = {
+ "application_name": self.client_name(application_name, suffix=suffix),
+ }
if new_params:
parsed_uri = urlparse(uri)
@@ -103,17 +123,36 @@ def _update_query(self, uri: str) -> str:
return urlunparse(parsed_uri._replace(query=updated_query))
return uri
- model_config = SettingsConfigDict(
- json_schema_extra={
- "examples": [
- # minimal required
- {
- "POSTGRES_HOST": "localhost",
- "POSTGRES_PORT": "5432",
- "POSTGRES_USER": "usr",
- "POSTGRES_PASSWORD": "secret",
- "POSTGRES_DB": "db",
- }
- ],
- }
- )
+ @staticmethod
+ def _update_json_schema_extra(schema: JsonDict) -> None:
+ schema.update(
+ {
+ "examples": [
+ # minimal required
+ {
+ "POSTGRES_HOST": "localhost",
+ "POSTGRES_PORT": "5432",
+ "POSTGRES_USER": "usr",
+ "POSTGRES_PASSWORD": "secret",
+ "POSTGRES_DB": "db",
+ },
+ # full example
+ {
+ "POSTGRES_HOST": "localhost",
+ "POSTGRES_PORT": "5432",
+ "POSTGRES_USER": "usr",
+ "POSTGRES_PASSWORD": "secret",
+ "POSTGRES_DB": "db",
+ "POSTGRES_MINSIZE": 1,
+ "POSTGRES_MAXSIZE": 50,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
+ "POSTGRES_CLIENT_NAME": "my_app", # first-choice
+ "HOST": "should be ignored",
+ "HOST_NAME": "should be ignored",
+ },
+ ],
+ }
+ )
+
+ model_config = SettingsConfigDict(json_schema_extra=_update_json_schema_extra)
diff --git a/packages/settings-library/src/settings_library/redis.py b/packages/settings-library/src/settings_library/redis.py
index 40dd88aabf98..28d6b6c66bdb 100644
--- a/packages/settings-library/src/settings_library/redis.py
+++ b/packages/settings-library/src/settings_library/redis.py
@@ -15,10 +15,11 @@ class RedisDatabase(IntEnum):
SCHEDULED_MAINTENANCE = 3
USER_NOTIFICATIONS = 4
ANNOUNCEMENTS = 5
- DISTRIBUTED_IDENTIFIERS = 6
+ LONG_RUNNING_TASKS = 6
DEFERRED_TASKS = 7
DYNAMIC_SERVICES = 8
CELERY_TASKS = 9
+ DOCUMENTS = 10
class RedisSettings(BaseCustomSettings):
diff --git a/packages/settings-library/src/settings_library/utils_cli.py b/packages/settings-library/src/settings_library/utils_cli.py
index 106b1d6fb746..3d180c8be2d3 100644
--- a/packages/settings-library/src/settings_library/utils_cli.py
+++ b/packages/settings-library/src/settings_library/utils_cli.py
@@ -26,8 +26,9 @@ def print_as_envfile(
**pydantic_export_options,
):
exclude_unset = pydantic_export_options.get("exclude_unset", False)
+ settings_cls = settings_obj.__class__
- for name, field in settings_obj.model_fields.items():
+ for name, field in settings_cls.model_fields.items():
auto_default_from_env = (
field.json_schema_extra is not None
and field.json_schema_extra.get("auto_default_from_env", False)
@@ -66,6 +67,9 @@ def print_as_envfile(
typer.echo(f"# {field.description}")
if isinstance(value, Enum):
value = value.value
+ elif isinstance(value, dict | list):
+ # Serialize complex objects as JSON to ensure they can be parsed correctly
+ value = json_dumps(value)
typer.echo(f"{name}={value}")
diff --git a/packages/settings-library/tests/conftest.py b/packages/settings-library/tests/conftest.py
index 0431a6c67487..c2a02e3a9b46 100644
--- a/packages/settings-library/tests/conftest.py
+++ b/packages/settings-library/tests/conftest.py
@@ -4,6 +4,7 @@
import sys
from pathlib import Path
+from typing import Annotated
import pytest
import settings_library
@@ -96,13 +97,15 @@ class _ApplicationSettings(BaseCustomSettings):
# NOTE: by convention, an addon is disabled when APP_ADDON=None, so we make this
# entry nullable as well
- APP_OPTIONAL_ADDON: _ModuleSettings | None = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ APP_OPTIONAL_ADDON: Annotated[
+ _ModuleSettings | None,
+ Field(json_schema_extra={"auto_default_from_env": True}),
+ ]
# NOTE: example of a group that cannot be disabled (not nullable)
- APP_REQUIRED_PLUGIN: PostgresSettings | None = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ APP_REQUIRED_PLUGIN: Annotated[
+ PostgresSettings | None,
+ Field(json_schema_extra={"auto_default_from_env": True}),
+ ]
return _ApplicationSettings
diff --git a/packages/settings-library/tests/data/.env-compact b/packages/settings-library/tests/data/.env-compact
index a0292aedbec6..b11273684dc2 100644
--- a/packages/settings-library/tests/data/.env-compact
+++ b/packages/settings-library/tests/data/.env-compact
@@ -3,4 +3,4 @@
APP_HOST=localhost
APP_PORT=80
APP_OPTIONAL_ADDON='{"MODULE_VALUE": 10, "MODULE_VALUE_DEFAULT": 42}'
-APP_REQUIRED_PLUGIN='{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "**********", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAXSIZE": 50, "POSTGRES_CLIENT_NAME": "None"}'
+APP_REQUIRED_PLUGIN='{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "**********", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAX_POOLSIZE": 10, "POSTGRES_MAX_OVERFLOW": 20, "POSTGRES_MAXSIZE": 50, "POSTGRES_CLIENT_NAME": "None"}'
diff --git a/packages/settings-library/tests/data/.env-granular b/packages/settings-library/tests/data/.env-granular
index f1c1f9c703c7..c27099b7b98b 100644
--- a/packages/settings-library/tests/data/.env-granular
+++ b/packages/settings-library/tests/data/.env-granular
@@ -12,9 +12,9 @@ POSTGRES_USER=foo
POSTGRES_PASSWORD=**********
# Database name
POSTGRES_DB=foodb
-# Minimum number of connections in the pool
POSTGRES_MINSIZE=1
-# Maximum number of connections in the pool
+POSTGRES_MAX_POOLSIZE=10
+POSTGRES_MAX_OVERFLOW=20
POSTGRES_MAXSIZE=50
# Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)
POSTGRES_CLIENT_NAME=None
diff --git a/packages/settings-library/tests/data/.env-mixed b/packages/settings-library/tests/data/.env-mixed
index 5333630c3ca0..4aed86d3b9b7 100644
--- a/packages/settings-library/tests/data/.env-mixed
+++ b/packages/settings-library/tests/data/.env-mixed
@@ -12,9 +12,9 @@ POSTGRES_USER=foo
POSTGRES_PASSWORD=**********
# Database name
POSTGRES_DB=foodb
-# Minimum number of connections in the pool
POSTGRES_MINSIZE=1
-# Maximum number of connections in the pool
+POSTGRES_MAX_POOLSIZE=10
+POSTGRES_MAX_OVERFLOW=20
POSTGRES_MAXSIZE=50
# Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)
POSTGRES_CLIENT_NAME=None
diff --git a/packages/settings-library/tests/data/.env-sample b/packages/settings-library/tests/data/.env-sample
index cfc2ed996850..d6f115f76c23 100644
--- a/packages/settings-library/tests/data/.env-sample
+++ b/packages/settings-library/tests/data/.env-sample
@@ -8,9 +8,9 @@ POSTGRES_USER=foo
POSTGRES_PASSWORD=secret
# Database name
POSTGRES_DB=foodb
-# Maximum number of connections in the pool
POSTGRES_MINSIZE=1
-# Minimum number of connections in the pool
+POSTGRES_MAX_POOLSIZE=10
+POSTGRES_MAX_OVERFLOW=20
POSTGRES_MAXSIZE=50
# --- APP_MODULE_FIELD ---
diff --git a/packages/settings-library/tests/test__models_examples.py b/packages/settings-library/tests/test__models_examples.py
index c93ed934cf13..beaaefc88802 100644
--- a/packages/settings-library/tests/test__models_examples.py
+++ b/packages/settings-library/tests/test__models_examples.py
@@ -2,7 +2,7 @@
import pytest
import settings_library
-from pydantic import BaseModel
+from pydantic_settings import BaseSettings
from pytest_simcore.pydantic_models import (
assert_validation_model,
walk_model_examples_in_package,
@@ -14,9 +14,19 @@
walk_model_examples_in_package(settings_library),
)
def test_all_settings_library_models_config_examples(
- model_cls: type[BaseModel], example_name: str, example_data: Any
+ model_cls: type[BaseSettings], example_name: str, example_data: Any
):
+ assert (
+ model_cls.model_config.get("populate_by_name") is True
+ ), f"populate_by_name must be enabled in {model_cls}. It will be deprecated in the future but for now it is required to use aliases in the settings"
+ assert (
+ model_cls.model_config.get("validate_by_alias") is True
+ ), f"validate_by_alias must be enabled in {model_cls}"
+ assert (
+ model_cls.model_config.get("validate_by_name") is True
+ ), f"validate_by_name must be enabled in {model_cls}"
+
assert_validation_model(
model_cls, example_name=example_name, example_data=example_data
)
diff --git a/packages/settings-library/tests/test__pydantic_settings.py b/packages/settings-library/tests/test__pydantic_settings.py
index eb2989852cbf..a2d06c8a3ef7 100644
--- a/packages/settings-library/tests/test__pydantic_settings.py
+++ b/packages/settings-library/tests/test__pydantic_settings.py
@@ -2,7 +2,7 @@
# pylint: disable=unused-argument
# pylint: disable=unused-variable
-""" Tests subtle details about pydantic models
+"""Tests subtle details about pydantic models
This test suite intends to "freeze" some concepts/invariants from pydantic upon which we are going
to build this libraries.
@@ -17,7 +17,12 @@
import pytest
from common_library.basic_types import LogLevel
from common_library.pydantic_fields_extension import is_nullable
-from pydantic import AliasChoices, Field, ValidationInfo, field_validator
+from pydantic import (
+ AliasChoices,
+ Field,
+ ValidationInfo,
+ field_validator,
+)
from pydantic_core import PydanticUndefined
from pydantic_settings import BaseSettings
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
diff --git a/packages/settings-library/tests/test_base.py b/packages/settings-library/tests/test_base.py
index d4ebd9877606..026abdfc5d60 100644
--- a/packages/settings-library/tests/test_base.py
+++ b/packages/settings-library/tests/test_base.py
@@ -6,7 +6,7 @@
import json
from collections.abc import Callable
-from typing import Any
+from typing import Annotated, Any
import pydantic
import pytest
@@ -74,12 +74,18 @@ class M1(BaseCustomSettings):
VALUE_NULLABLE_DEFAULT_VALUE: S | None = S(S_VALUE=42)
VALUE_NULLABLE_DEFAULT_NULL: S | None = None
- VALUE_NULLABLE_DEFAULT_ENV: S | None = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
- VALUE_DEFAULT_ENV: S = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ VALUE_NULLABLE_DEFAULT_ENV: Annotated[
+ S | None,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ]
+ VALUE_DEFAULT_ENV: Annotated[
+ S,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ]
class M2(BaseCustomSettings):
#
@@ -91,14 +97,20 @@ class M2(BaseCustomSettings):
VALUE_NULLABLE_DEFAULT_NULL: S | None = None
# defaults enabled but if not exists, it disables
- VALUE_NULLABLE_DEFAULT_ENV: S | None = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ VALUE_NULLABLE_DEFAULT_ENV: Annotated[
+ S | None,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ]
# cannot be disabled
- VALUE_DEFAULT_ENV: S = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ VALUE_DEFAULT_ENV: Annotated[
+ S,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ]
# Changed in version 3.7: Dictionary order is guaranteed to be insertion order
_classes = {"M1": M1, "M2": M2, "S": S}
@@ -108,7 +120,7 @@ class M2(BaseCustomSettings):
def test_create_settings_class(
- create_settings_class: Callable[[str], type[BaseCustomSettings]]
+ create_settings_class: Callable[[str], type[BaseCustomSettings]],
):
M = create_settings_class("M1")
@@ -216,9 +228,12 @@ def test_auto_default_to_none_logs_a_warning(
class SettingsClass(BaseCustomSettings):
VALUE_NULLABLE_DEFAULT_NULL: S | None = None
- VALUE_NULLABLE_DEFAULT_ENV: S | None = Field(
- json_schema_extra={"auto_default_from_env": True},
- )
+ VALUE_NULLABLE_DEFAULT_ENV: Annotated[
+ S | None,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ] = None
instance = SettingsClass.create_from_envs()
assert instance.VALUE_NULLABLE_DEFAULT_NULL is None
@@ -245,9 +260,12 @@ def test_auto_default_to_not_none(
class SettingsClass(BaseCustomSettings):
VALUE_NULLABLE_DEFAULT_NULL: S | None = None
- VALUE_NULLABLE_DEFAULT_ENV: S | None = Field(
- json_schema_extra={"auto_default_from_env": True},
- )
+ VALUE_NULLABLE_DEFAULT_ENV: Annotated[
+ S | None,
+ Field(
+ json_schema_extra={"auto_default_from_env": True},
+ ),
+ ] = None
instance = SettingsClass.create_from_envs()
assert instance.VALUE_NULLABLE_DEFAULT_NULL is None
@@ -317,32 +335,38 @@ class SettingsClassExt(SettingsClass):
}
-def test_issubclass_type_error_with_pydantic_models():
- # There is a problem
- #
- # TypeError: issubclass() arg 1 must be a class
- #
- # SEE https://github.com/pydantic/pydantic/issues/545
- #
- # >> issubclass(dict, BaseSettings)
- # False
- # >> issubclass(dict[str, str], BaseSettings)
- # Traceback (most recent call last):
- # File "", line 1, in
- # File "/home/crespo/.pyenv/versions/3.10.13/lib/python3.10/abc.py", line 123, in __subclasscheck__
- # return _abc_subclasscheck(cls, subclass)
- # TypeError: issubclass() arg 1 must be a class
- #
+def test_fixed_issubclass_type_error_with_pydantic_models():
assert not issubclass(dict, BaseSettings)
-
- # NOTE: this should be fixed by pydantic at some point. When this happens, this test will fail
- with pytest.raises(TypeError):
- issubclass(dict[str, str], BaseSettings)
+ assert not issubclass(
+ # FIXED with
+ #
+ # pydantic 2.11.7
+ # pydantic_core 2.33.2
+ # pydantic-extra-types 2.10.5
+ # pydantic-settings 2.7.0
+ #
+ #
+ # TypeError: issubclass() arg 1 must be a class
+ #
+ # SEE https://github.com/pydantic/pydantic/issues/545
+ #
+ # >> issubclass(dict, BaseSettings)
+ # False
+ # >> issubclass(dict[str, str], BaseSettings)
+ # Traceback (most recent call last):
+ # File "", line 1, in
+ # File "/home/crespo/.pyenv/versions/3.10.13/lib/python3.10/abc.py", line 123, in __subclasscheck__
+ # return _abc_subclasscheck(cls, subclass)
+ # TypeError: issubclass() arg 1 must be a class
+ #
+ dict[str, str],
+ BaseSettings,
+ )
# here reproduces the problem with our settings that ANE and PC had
class SettingsClassThatFailed(BaseCustomSettings):
- FOO: dict[str, str] | None = Field(default=None)
+ FOO: dict[str, str] | None = None
SettingsClassThatFailed(FOO={})
assert SettingsClassThatFailed(FOO=None) == SettingsClassThatFailed()
@@ -352,9 +376,7 @@ def test_upgrade_failure_to_pydantic_settings_2_6(
mock_env_devel_environment: EnvVarsDict,
):
class ProblematicSettings(BaseCustomSettings):
- WEBSERVER_EMAIL: SMTPSettings | None = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ WEBSERVER_EMAIL: SMTPSettings | None = None
model_config = SettingsConfigDict(nested_model_default_partial_update=True)
diff --git a/packages/settings-library/tests/test_base_w_postgres.py b/packages/settings-library/tests/test_base_w_postgres.py
index 37329a4e9bb9..641d1df62a3d 100644
--- a/packages/settings-library/tests/test_base_w_postgres.py
+++ b/packages/settings-library/tests/test_base_w_postgres.py
@@ -5,6 +5,7 @@
import os
from collections.abc import Callable
+from typing import Annotated
import pytest
from pydantic import AliasChoices, Field, ValidationError, __version__
@@ -53,17 +54,22 @@ class _FakePostgresSettings(BaseCustomSettings):
POSTGRES_USER: str
POSTGRES_PASSWORD: str
- POSTGRES_DB: str = Field(...)
-
- POSTGRES_MINSIZE: int = Field(1, ge=1)
- POSTGRES_MAXSIZE: int = Field(50, ge=1)
-
- POSTGRES_CLIENT_NAME: str | None = Field(
- None,
- validation_alias=AliasChoices(
- "HOST", "HOSTNAME", "POSTGRES_CLIENT_NAME"
+ POSTGRES_DB: str
+ POSTGRES_MINSIZE: Annotated[int, Field(ge=1)] = 1
+ POSTGRES_MAXSIZE: Annotated[int, Field(ge=1)] = 50
+ POSTGRES_MAX_POOLSIZE: int = 10
+ POSTGRES_MAX_OVERFLOW: Annotated[int, Field(ge=0)] = 20
+
+ POSTGRES_CLIENT_NAME: Annotated[
+ str | None,
+ Field(
+ validation_alias=AliasChoices(
+ "POSTGRES_CLIENT_NAME",
+ "HOST",
+ "HOSTNAME",
+ ),
),
- )
+ ] = None
#
# Different constraints on WEBSERVER_POSTGRES subsettings
@@ -77,15 +83,17 @@ class S2(BaseCustomSettings):
class S3(BaseCustomSettings):
# cannot be disabled!!
- WEBSERVER_POSTGRES_DEFAULT_ENV: _FakePostgresSettings = Field(
- json_schema_extra={"auto_default_from_env": True}
- )
+ WEBSERVER_POSTGRES_DEFAULT_ENV: Annotated[
+ _FakePostgresSettings,
+ Field(json_schema_extra={"auto_default_from_env": True}),
+ ]
class S4(BaseCustomSettings):
# defaults enabled but if cannot be resolved, it disables
- WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV: _FakePostgresSettings | None = (
- Field(json_schema_extra={"auto_default_from_env": True})
- )
+ WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV: Annotated[
+ _FakePostgresSettings | None,
+ Field(json_schema_extra={"auto_default_from_env": True}),
+ ]
class S5(BaseCustomSettings):
# defaults disabled but only explicit enabled
@@ -120,7 +128,6 @@ class S5(BaseCustomSettings):
def test_parse_from_empty_envs(
postgres_envvars_unset: None, model_classes_factory: Callable
):
-
S1, S2, S3, S4, S5 = model_classes_factory()
with pytest.raises(ValidationError, match="WEBSERVER_POSTGRES") as exc_info:
@@ -154,7 +161,6 @@ def test_parse_from_individual_envs(
monkeypatch: pytest.MonkeyPatch,
model_classes_factory: Callable,
):
-
S1, S2, S3, S4, S5 = model_classes_factory()
# environment
@@ -195,6 +201,8 @@ def test_parse_from_individual_envs(
"POSTGRES_DB": "db",
"POSTGRES_MAXSIZE": 50,
"POSTGRES_MINSIZE": 1,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
}
}
@@ -210,6 +218,8 @@ def test_parse_from_individual_envs(
"POSTGRES_DB": "db",
"POSTGRES_MAXSIZE": 50,
"POSTGRES_MINSIZE": 1,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
}
}
@@ -222,7 +232,6 @@ def test_parse_from_individual_envs(
def test_parse_compact_env(
postgres_envvars_unset: None, monkeypatch, model_classes_factory
):
-
S1, S2, S3, S4, S5 = model_classes_factory()
# environment
@@ -257,6 +266,8 @@ def test_parse_compact_env(
"POSTGRES_DB": "db2",
"POSTGRES_MAXSIZE": 50,
"POSTGRES_MINSIZE": 1,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
}
}
@@ -336,7 +347,6 @@ def test_parse_compact_env(
def test_parse_from_mixed_envs(
postgres_envvars_unset: None, monkeypatch, model_classes_factory
):
-
S1, S2, S3, S4, S5 = model_classes_factory()
# environment
@@ -367,6 +377,8 @@ def test_parse_from_mixed_envs(
"POSTGRES_DB": "db2",
"POSTGRES_MAXSIZE": 50,
"POSTGRES_MINSIZE": 1,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
}
}
@@ -466,7 +478,6 @@ def test_parse_from_mixed_envs(
def test_toggle_plugin_1(
postgres_envvars_unset: None, monkeypatch, model_classes_factory
):
-
*_, S4, S5 = model_classes_factory()
# empty environ
@@ -529,7 +540,6 @@ def test_toggle_plugin_3(
def test_toggle_plugin_4(
postgres_envvars_unset: None, monkeypatch, model_classes_factory
):
-
*_, S4, S5 = model_classes_factory()
JSON_VALUE = '{"POSTGRES_HOST":"pg2", "POSTGRES_USER":"test2", "POSTGRES_PASSWORD":"shh2", "POSTGRES_DB":"db2"}'
@@ -559,7 +569,6 @@ def test_toggle_plugin_4(
)
with monkeypatch.context() as patch:
-
# Enables both but remove individuals
setenvs_from_envfile(
patch,
diff --git a/packages/settings-library/tests/test_postgres.py b/packages/settings-library/tests/test_postgres.py
index 6c9067c2d6b1..bdc33901f6eb 100644
--- a/packages/settings-library/tests/test_postgres.py
+++ b/packages/settings-library/tests/test_postgres.py
@@ -6,6 +6,7 @@
from urllib.parse import urlparse
import pytest
+from faker import Faker
from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict
from pytest_simcore.helpers.typing_env import EnvVarsDict
from settings_library.postgres import PostgresSettings
@@ -24,7 +25,6 @@ def mock_environment(mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPa
def test_cached_property_dsn(mock_environment: EnvVarsDict):
-
settings = PostgresSettings.create_from_envs()
# all are upper-case
@@ -36,14 +36,17 @@ def test_cached_property_dsn(mock_environment: EnvVarsDict):
assert "dsn" not in settings.model_dump()
-def test_dsn_with_query(mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch):
+def test_dsn_with_query(
+ mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker
+):
settings = PostgresSettings()
assert settings.POSTGRES_CLIENT_NAME
assert settings.dsn == "postgresql://foo:secret@localhost:5432/foodb"
+ app_name = faker.pystr()
assert (
- settings.dsn_with_query
- == "postgresql://foo:secret@localhost:5432/foodb?application_name=Some+%2643+funky+name"
+ settings.dsn_with_query(app_name, suffix="my-suffix")
+ == f"postgresql://foo:secret@localhost:5432/foodb?application_name={app_name}-Some+%2643+funky+name-my-suffix"
)
with monkeypatch.context() as patch:
@@ -51,7 +54,9 @@ def test_dsn_with_query(mock_environment: EnvVarsDict, monkeypatch: pytest.Monke
settings = PostgresSettings()
assert not settings.POSTGRES_CLIENT_NAME
- assert settings.dsn == settings.dsn_with_query
+ assert f"{settings.dsn}?application_name=blah" == settings.dsn_with_query(
+ "blah", suffix=None
+ )
def test_dsn_with_async_sqlalchemy_has_query(
diff --git a/packages/settings-library/tests/test_utils_cli.py b/packages/settings-library/tests/test_utils_cli.py
index 49c684ea6265..a7194e1b4db0 100644
--- a/packages/settings-library/tests/test_utils_cli.py
+++ b/packages/settings-library/tests/test_utils_cli.py
@@ -6,7 +6,7 @@
import logging
from collections.abc import Callable
from io import StringIO
-from typing import Any
+from typing import Annotated, Any
import pytest
import typer
@@ -76,6 +76,8 @@ def fake_granular_env_file_content() -> str:
POSTGRES_DB=foodb
POSTGRES_MINSIZE=1
POSTGRES_MAXSIZE=50
+ POSTGRES_MAX_POOLSIZE=10
+ POSTGRES_MAX_OVERFLOW=20
POSTGRES_CLIENT_NAME=None
MODULE_VALUE=10
"""
@@ -190,6 +192,8 @@ def test_cli_default_settings_envs(
"POSTGRES_DB": "foodb",
"POSTGRES_MINSIZE": 1,
"POSTGRES_MAXSIZE": 50,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
},
}
@@ -221,6 +225,8 @@ def test_cli_compact_settings_envs(
"POSTGRES_DB": "foodb",
"POSTGRES_MINSIZE": 1,
"POSTGRES_MAXSIZE": 50,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
},
}
@@ -244,7 +250,7 @@ def test_cli_compact_settings_envs(
"APP_HOST": "localhost",
"APP_PORT": "80",
"APP_OPTIONAL_ADDON": '{"MODULE_VALUE":10,"MODULE_VALUE_DEFAULT":42}',
- "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST":"localhost","POSTGRES_PORT":5432,"POSTGRES_USER":"foo","POSTGRES_PASSWORD":"secret","POSTGRES_DB":"foodb","POSTGRES_MINSIZE":1,"POSTGRES_MAXSIZE":50,"POSTGRES_CLIENT_NAME":null}',
+ "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST":"localhost","POSTGRES_PORT":5432,"POSTGRES_USER":"foo","POSTGRES_PASSWORD":"secret","POSTGRES_DB":"foodb","POSTGRES_MINSIZE":1,"POSTGRES_MAXSIZE":50,"POSTGRES_MAX_POOLSIZE":10,"POSTGRES_MAX_OVERFLOW":20,"POSTGRES_CLIENT_NAME":null}',
}
settings_2 = fake_settings_class()
@@ -261,7 +267,7 @@ def test_compact_format(
APP_HOST=localhost
APP_PORT=80
APP_OPTIONAL_ADDON='{"MODULE_VALUE": 10, "MODULE_VALUE_DEFAULT": 42}'
- APP_REQUIRED_PLUGIN='{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "secret", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAXSIZE": 50, "POSTGRES_CLIENT_NAME": "None"}'
+ APP_REQUIRED_PLUGIN='{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "secret", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAXSIZE": 50, "POSTGRES_MAX_POOLSIZE": 10, "POSTGRES_MAX_OVERFLOW": 20, "POSTGRES_CLIENT_NAME": "None"}'
""",
)
@@ -292,10 +298,10 @@ def test_granular_format(
POSTGRES_PASSWORD=secret
# Database name
POSTGRES_DB=foodb
- # Minimum number of connections in the pool
POSTGRES_MINSIZE=1
- # Maximum number of connections in the pool
POSTGRES_MAXSIZE=50
+ POSTGRES_MAX_POOLSIZE=10
+ POSTGRES_MAX_OVERFLOW=20
# Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)
POSTGRES_CLIENT_NAME=None
""",
@@ -315,6 +321,8 @@ def test_granular_format(
"POSTGRES_DB": "foodb",
"POSTGRES_MINSIZE": 1,
"POSTGRES_MAXSIZE": 50,
+ "POSTGRES_MAX_POOLSIZE": 10,
+ "POSTGRES_MAX_OVERFLOW": 20,
"POSTGRES_CLIENT_NAME": None,
},
)
@@ -414,7 +422,7 @@ def test_cli_settings_exclude_unset_as_json(
def test_print_as(capsys: pytest.CaptureFixture):
class FakeSettings(BaseCustomSettings):
- INTEGER: int = Field(..., description="Some info")
+ INTEGER: Annotated[int, Field(description="Some info")]
SECRET: SecretStr
URL: AnyHttpUrl
diff --git a/packages/settings-library/tests/test_utils_logging.py b/packages/settings-library/tests/test_utils_logging.py
index d63a8ae8538c..f847a716e5f9 100644
--- a/packages/settings-library/tests/test_utils_logging.py
+++ b/packages/settings-library/tests/test_utils_logging.py
@@ -1,4 +1,5 @@
import logging
+from typing import Annotated
from pydantic import AliasChoices, Field, field_validator
from settings_library.base import BaseCustomSettings
@@ -17,17 +18,19 @@ class Settings(BaseCustomSettings, MixinLoggingSettings):
SC_BOOT_MODE: BootModeEnum | None = None
# LOGGING
- LOG_LEVEL: str = Field(
- "WARNING",
- validation_alias=AliasChoices(
- "APPNAME_LOG_LEVEL",
- "LOG_LEVEL",
+ LOG_LEVEL: Annotated[
+ str,
+ Field(
+ validation_alias=AliasChoices(
+ "APPNAME_LOG_LEVEL",
+ "LOG_LEVEL",
+ ),
),
- )
+ ] = "WARNING"
- APPNAME_DEBUG: bool = Field(
- default=False, description="Starts app in debug mode"
- )
+ APPNAME_DEBUG: Annotated[
+ bool, Field(description="Starts app in debug mode")
+ ] = False
@field_validator("LOG_LEVEL", mode="before")
@classmethod
diff --git a/packages/simcore-sdk/requirements/_base.in b/packages/simcore-sdk/requirements/_base.in
index 9be327aed363..4ce6caec6571 100644
--- a/packages/simcore-sdk/requirements/_base.in
+++ b/packages/simcore-sdk/requirements/_base.in
@@ -13,9 +13,10 @@
aiocache
aiofiles
aiohttp
+httpx
packaging
pint
-sqlalchemy[asyncio]
pydantic[email]
+sqlalchemy[asyncio]
tenacity
tqdm
diff --git a/packages/simcore-sdk/requirements/_base.txt b/packages/simcore-sdk/requirements/_base.txt
index 97a80119b7f0..c614e77032c5 100644
--- a/packages/simcore-sdk/requirements/_base.txt
+++ b/packages/simcore-sdk/requirements/_base.txt
@@ -14,7 +14,7 @@ aiofiles==24.1.0
# -r requirements/_base.in
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -44,6 +44,7 @@ anyio==4.8.0
# via
# fast-depends
# faststream
+ # httpx
arrow==1.3.0
# via
# -r requirements/../../../packages/models-library/requirements/_base.in
@@ -72,17 +73,13 @@ certifi==2025.1.31
# -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
+ # httpcore
+ # httpx
# requests
charset-normalizer==3.4.1
# via requests
-click==8.1.8
+click==8.2.1
# via typer
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-exporter-otlp-proto-grpc
- # opentelemetry-exporter-otlp-proto-http
- # opentelemetry-semantic-conventions
dnspython==2.7.0
# via email-validator
email-validator==2.2.0
@@ -101,7 +98,7 @@ frozenlist==1.5.0
# via
# aiohttp
# aiosignal
-googleapis-common-protos==1.68.0
+googleapis-common-protos==1.70.0
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
@@ -109,21 +106,47 @@ greenlet==3.1.1
# via sqlalchemy
grpcio==1.70.0
# via opentelemetry-exporter-otlp-proto-grpc
+h11==0.16.0
+ # via httpcore
+httpcore==1.0.9
+ # via httpx
+httpx==0.28.1
+ # via
+ # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
+ # -c requirements/../../../requirements/constraints.txt
+ # -r requirements/_base.in
idna==3.10
# via
# anyio
# email-validator
+ # httpx
# requests
# yarl
importlib-metadata==8.5.0
# via opentelemetry-api
+jsonref==1.1.0
+ # via
+ # -r requirements/../../../packages/models-library/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in
jsonschema==4.23.0
# via
# -r requirements/../../../packages/models-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in
jsonschema-specifications==2024.10.1
# via jsonschema
-mako==1.3.9
+mako==1.3.10
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -150,7 +173,7 @@ multidict==6.1.0
# via
# aiohttp
# yarl
-opentelemetry-api==1.30.0
+opentelemetry-api==1.34.1
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
@@ -163,51 +186,51 @@ opentelemetry-api==1.30.0
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
-opentelemetry-exporter-otlp==1.30.0
+opentelemetry-exporter-otlp==1.34.1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-exporter-otlp-proto-common==1.30.0
+opentelemetry-exporter-otlp-proto-common==1.34.1
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-exporter-otlp-proto-grpc==1.30.0
+opentelemetry-exporter-otlp-proto-grpc==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-exporter-otlp-proto-http==1.30.0
+opentelemetry-exporter-otlp-proto-http==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation==0.55b1
# via
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-logging
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
-opentelemetry-instrumentation-aio-pika==0.51b0
+opentelemetry-instrumentation-aio-pika==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-asyncpg==0.51b0
- # via -r requirements/../../../packages/postgres-database/requirements/_base.in
-opentelemetry-instrumentation-logging==0.51b0
+opentelemetry-instrumentation-asyncpg==0.55b1
+ # via -r requirements/../../../packages/service-library/requirements/_base.in
+opentelemetry-instrumentation-logging==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-redis==0.51b0
+opentelemetry-instrumentation-redis==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-requests==0.51b0
+opentelemetry-instrumentation-requests==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-proto==1.30.0
+opentelemetry-proto==1.34.1
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-sdk==1.30.0
+opentelemetry-sdk==1.34.1
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-semantic-conventions==0.51b0
+opentelemetry-semantic-conventions==0.55b1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
-opentelemetry-util-http==0.51b0
+opentelemetry-util-http==0.55b1
# via opentelemetry-instrumentation-requests
orjson==3.10.15
# via
@@ -248,7 +271,7 @@ propcache==0.3.0
# via
# aiohttp
# yarl
-protobuf==5.29.3
+protobuf==5.29.5
# via
# googleapis-common-protos
# opentelemetry-proto
@@ -258,7 +281,7 @@ psycopg2-binary==2.9.10
# via sqlalchemy
pycryptodome==3.21.0
# via stream-zip
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -291,9 +314,9 @@ pydantic==2.10.6
# fast-depends
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in
@@ -384,9 +407,9 @@ referencing==0.35.1
# -c requirements/../../../requirements/constraints.txt
# jsonschema
# jsonschema-specifications
-requests==2.32.3
+requests==2.32.4
# via opentelemetry-exporter-otlp-proto-http
-rich==13.9.4
+rich==14.1.0
# via
# -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../packages/settings-library/requirements/_base.in
@@ -432,13 +455,13 @@ tqdm==4.67.1
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# -r requirements/_base.in
-typer==0.15.2
+typer==0.16.1
# via
# -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../packages/settings-library/requirements/_base.in
types-python-dateutil==2.9.0.20241206
# via arrow
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# aiodebug
# alembic
@@ -446,13 +469,20 @@ typing-extensions==4.12.2
# faststream
# flexcache
# flexparser
+ # opentelemetry-api
+ # opentelemetry-exporter-otlp-proto-grpc
+ # opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
+ # opentelemetry-semantic-conventions
# pint
# pydantic
# pydantic-core
# pydantic-extra-types
# typer
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+urllib3==2.5.0
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -471,7 +501,6 @@ urllib3==2.3.0
# requests
wrapt==1.17.2
# via
- # deprecated
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-redis
diff --git a/packages/simcore-sdk/requirements/_test.txt b/packages/simcore-sdk/requirements/_test.txt
index 9601c8200551..82fe7b4fdfd7 100644
--- a/packages/simcore-sdk/requirements/_test.txt
+++ b/packages/simcore-sdk/requirements/_test.txt
@@ -10,7 +10,7 @@ aiohappyeyeballs==2.6.1
# via
# -c requirements/_base.txt
# aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
@@ -73,7 +73,7 @@ charset-normalizer==3.4.1
# via
# -c requirements/_base.txt
# requests
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# -r requirements/_test.in
@@ -101,7 +101,7 @@ flask==3.1.0
# via
# flask-cors
# moto
-flask-cors==5.0.1
+flask-cors==6.0.1
# via moto
frozenlist==1.5.0
# via
@@ -125,7 +125,7 @@ iniconfig==2.0.0
# via pytest
itsdangerous==2.2.0
# via flask
-jinja2==3.1.5
+jinja2==3.1.6
# via
# -c requirements/../../../requirements/constraints.txt
# flask
@@ -158,7 +158,7 @@ jsonschema-specifications==2024.10.1
# openapi-schema-validator
lazy-object-proxy==1.10.0
# via openapi-spec-validator
-mako==1.3.9
+mako==1.3.10
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
@@ -179,9 +179,9 @@ multidict==6.1.0
# aiobotocore
# aiohttp
# yarl
-mypy==1.15.0
+mypy==1.16.1
# via sqlalchemy
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via mypy
networkx==3.4.2
# via cfn-lint
@@ -197,8 +197,12 @@ packaging==24.2
# pytest-sugar
pathable==0.4.4
# via jsonschema-path
+pathspec==0.12.1
+ # via mypy
pluggy==1.5.0
- # via pytest
+ # via
+ # pytest
+ # pytest-cov
ply==3.11
# via jsonpath-ng
pprintpp==0.4.0
@@ -212,18 +216,22 @@ py-partiql-parser==0.6.1
# via moto
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via
# -c requirements/_base.txt
# pydantic
+pygments==2.19.1
+ # via
+ # -c requirements/_base.txt
+ # pytest
pyparsing==3.2.1
# via moto
-pytest==8.3.5
+pytest==8.4.1
# via
# -r requirements/_test.in
# pytest-asyncio
@@ -233,21 +241,21 @@ pytest==8.3.5
# pytest-mock
# pytest-sugar
# pytest-xdist
-pytest-asyncio==0.26.0
+pytest-asyncio==1.0.0
# via -r requirements/_test.in
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
pytest-icdiff==0.9
# via -r requirements/_test.in
pytest-instafail==0.5.0
# via -r requirements/_test.in
-pytest-mock==3.14.0
+pytest-mock==3.14.1
# via -r requirements/_test.in
pytest-runner==6.0.1
# via -r requirements/_test.in
pytest-sugar==1.0.0
# via -r requirements/_test.in
-pytest-xdist==3.6.1
+pytest-xdist==3.8.0
# via -r requirements/_test.in
python-dateutil==2.9.0.post0
# via
@@ -276,7 +284,7 @@ referencing==0.35.1
# jsonschema-specifications
regex==2024.11.6
# via cfn-lint
-requests==2.32.3
+requests==2.32.4
# via
# -c requirements/_base.txt
# -r requirements/_test.in
@@ -295,7 +303,7 @@ rpds-py==0.23.1
# referencing
s3transfer==0.11.3
# via boto3
-setuptools==75.8.2
+setuptools==80.9.0
# via moto
six==1.17.0
# via
@@ -326,7 +334,7 @@ types-requests==2.32.0.20250301
# via types-tqdm
types-tqdm==4.67.0.20250301
# via -r requirements/_test.in
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# alembic
@@ -338,9 +346,14 @@ typing-extensions==4.12.2
# sqlalchemy2-stubs
# types-aiobotocore
# types-aiobotocore-s3
+ # typing-inspection
+typing-inspection==0.4.1
+ # via
+ # -c requirements/_base.txt
+ # pydantic
tzdata==2025.1
# via faker
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
diff --git a/packages/simcore-sdk/requirements/_tools.txt b/packages/simcore-sdk/requirements/_tools.txt
index 57c8ea032462..606e31a1bf7e 100644
--- a/packages/simcore-sdk/requirements/_tools.txt
+++ b/packages/simcore-sdk/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
@@ -28,11 +28,11 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via
# -c requirements/_test.txt
# -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# -c requirements/_test.txt
# black
@@ -46,7 +46,10 @@ packaging==24.2
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # -c requirements/_test.txt
+ # black
+ # mypy
pip==25.0.1
# via pip-tools
pip-tools==7.4.1
@@ -73,13 +76,13 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via
# -c requirements/_test.txt
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py b/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py
index db552f193b72..679c9645aea2 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py
@@ -169,7 +169,7 @@ async def _state_metadata_entry_exists(
async def _delete_legacy_archive(
- project_id: ProjectID, node_uuid: NodeID, path: Path
+ project_id: ProjectID, node_uuid: NodeID, path: Path, *, application_name: str
) -> None:
"""removes the .zip state archive from storage"""
s3_object = __create_s3_object_key(
@@ -180,13 +180,15 @@ async def _delete_legacy_archive(
# NOTE: if service is opened by a person which the users shared it with,
# they will not have the permission to delete the node
# Removing it via it's owner allows to always have access to the delete operation.
- owner_id = await DBManager().get_project_owner_user_id(project_id)
+ owner_id = await DBManager(
+ application_name=application_name
+ ).get_project_owner_user_id(project_id)
await filemanager.delete_file(
user_id=owner_id, store_id=SIMCORE_LOCATION, s3_object=s3_object
)
-async def push(
+async def push( # pylint: disable=too-many-arguments
user_id: UserID,
project_id: ProjectID,
node_uuid: NodeID,
@@ -198,6 +200,7 @@ async def push(
progress_bar: ProgressBarData,
aws_s3_cli_settings: AwsS3CliSettings | None,
legacy_state: LegacyState | None,
+ application_name: str,
) -> None:
"""pushes and removes the legacy archive if present"""
@@ -226,6 +229,7 @@ async def push(
project_id=project_id,
node_uuid=node_uuid,
path=source_path,
+ application_name=application_name,
)
if legacy_state:
@@ -244,6 +248,7 @@ async def push(
project_id=project_id,
node_uuid=node_uuid,
path=legacy_state.old_state_path,
+ application_name=application_name,
)
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py
index 484380fded76..043b763764ad 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py
@@ -82,7 +82,9 @@ async def complete_upload(
state_url = _get_https_link_if_storage_secure(
f"{file_upload_complete_response.data.links.state}"
)
- _logger.info("completed upload of %s", f"{len(parts)} parts, received {state_url}")
+ _logger.info(
+ "required upload completion of %s", f"{len(parts)} parts, received {state_url}"
+ )
async for attempt in AsyncRetrying(
reraise=True,
@@ -101,14 +103,14 @@ async def complete_upload(
).validate_python(await resp.json())
assert future_enveloped.data # nosec
if future_enveloped.data.state == FileUploadCompleteState.NOK:
- msg = "upload not ready yet"
+ msg = "upload not ready yet (FileUploadCompleteState.NOK)"
raise ValueError(msg)
if is_directory:
assert future_enveloped.data.e_tag is None # nosec
return None
assert future_enveloped.data.e_tag # nosec
- _logger.debug(
+ _logger.info(
"multipart upload completed in %s, received %s",
attempt.retry_state.retry_object.statistics,
f"{future_enveloped.data.e_tag=}",
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py
index 76d6d8222d31..21c0f0173b91 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py
@@ -82,22 +82,25 @@ async def _update_comp_run_snapshot_tasks_if_computational(
class DBContextManager:
- def __init__(self, db_engine: AsyncEngine | None = None) -> None:
+ def __init__(
+ self, db_engine: AsyncEngine | None = None, *, application_name: str
+ ) -> None:
self._db_engine: AsyncEngine | None = db_engine
self._db_engine_created: bool = False
+ self._application_name: str = application_name
@staticmethod
- async def _create_db_engine() -> AsyncEngine:
+ async def _create_db_engine(application_name: str) -> AsyncEngine:
settings = NodePortsSettings.create_from_envs()
engine = await create_async_engine_and_database_ready(
- settings.POSTGRES_SETTINGS
+ settings.POSTGRES_SETTINGS, f"{application_name}-simcore-sdk"
)
assert isinstance(engine, AsyncEngine) # nosec
return engine
async def __aenter__(self) -> AsyncEngine:
if not self._db_engine:
- self._db_engine = await self._create_db_engine()
+ self._db_engine = await self._create_db_engine(self._application_name)
self._db_engine_created = True
return self._db_engine
@@ -107,8 +110,9 @@ async def __aexit__(self, exc_type, exc, tb) -> None:
class DBManager:
- def __init__(self, db_engine: AsyncEngine | None = None):
+ def __init__(self, db_engine: AsyncEngine | None = None, *, application_name: str):
self._db_engine = db_engine
+ self._application_name = application_name
async def write_ports_configuration(
self,
@@ -124,7 +128,9 @@ async def write_ports_configuration(
node_configuration = json_loads(json_configuration)
async with (
- DBContextManager(self._db_engine) as engine,
+ DBContextManager(
+ self._db_engine, application_name=self._application_name
+ ) as engine,
engine.begin() as connection,
):
# 1. Update comp_tasks table
@@ -154,7 +160,9 @@ async def get_ports_configuration_from_node_uuid(
"Getting ports configuration of node %s from comp_tasks table", node_uuid
)
async with (
- DBContextManager(self._db_engine) as engine,
+ DBContextManager(
+ self._db_engine, application_name=self._application_name
+ ) as engine,
engine.connect() as connection,
):
node = await _get_node_from_db(project_id, node_uuid, connection)
@@ -171,7 +179,9 @@ async def get_ports_configuration_from_node_uuid(
async def get_project_owner_user_id(self, project_id: ProjectID) -> UserID:
async with (
- DBContextManager(self._db_engine) as engine,
+ DBContextManager(
+ self._db_engine, application_name=self._application_name
+ ) as engine,
engine.connect() as connection,
):
prj_owner = await connection.scalar(
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py
index 51aa3bae3c1c..be5cde27a24a 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py
@@ -7,10 +7,10 @@
from typing import IO, Any, Final, Protocol, runtime_checkable
import aiofiles
+import httpx
from aiohttp import (
ClientConnectionError,
ClientError,
- ClientPayloadError,
ClientResponse,
ClientResponseError,
ClientSession,
@@ -39,6 +39,7 @@
from tqdm.contrib.logging import tqdm_logging_redirect
from yarl import URL
+from ..config.http_clients import client_request_settings
from . import exceptions
from .constants import CHUNK_SIZE
@@ -148,13 +149,13 @@ async def __call__(self, log: str) -> None: ...
async def _file_chunk_writer(
file: Path,
- response: ClientResponse,
+ response: httpx.Response,
pbar: tqdm,
io_log_redirect_cb: LogRedirectCB | None,
progress_bar: ProgressBarData,
):
async with aiofiles.open(file, "wb") as file_pointer:
- while chunk := await response.content.read(CHUNK_SIZE):
+ async for chunk in response.aiter_bytes(CHUNK_SIZE):
await file_pointer.write(chunk)
if io_log_redirect_cb and pbar.update(len(chunk)):
with log_catch(_logger, reraise=False):
@@ -172,7 +173,6 @@ async def _file_chunk_writer(
async def download_link_to_file(
- session: ClientSession,
url: URL,
file_path: Path,
*,
@@ -185,16 +185,25 @@ async def download_link_to_file(
reraise=True,
wait=wait_exponential(min=1, max=10),
stop=stop_after_attempt(num_retries),
- retry=retry_if_exception_type(ClientConnectionError),
+ retry=retry_if_exception_type(httpx.TransportError),
before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True),
after=after_log(_logger, log_level=logging.ERROR),
):
with attempt:
async with AsyncExitStack() as stack:
- response = await stack.enter_async_context(session.get(url))
- if response.status == status.HTTP_404_NOT_FOUND:
+ client = await stack.enter_async_context(
+ httpx.AsyncClient(
+ timeout=httpx.Timeout(
+ client_request_settings.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT
+ )
+ )
+ )
+ response = await stack.enter_async_context(
+ client.stream("GET", f"{url}")
+ )
+ if response.status_code == status.HTTP_404_NOT_FOUND:
raise exceptions.InvalidDownloadLinkError(url)
- if response.status > _VALID_HTTP_STATUS_CODES:
+ if response.status_code > _VALID_HTTP_STATUS_CODES:
raise exceptions.TransferError(url)
file_path.parent.mkdir(parents=True, exist_ok=True)
# SEE https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Length
@@ -231,7 +240,7 @@ async def download_link_to_file(
sub_progress,
)
_logger.debug("Download complete")
- except ClientPayloadError as exc:
+ except httpx.HTTPError as exc:
raise exceptions.TransferError(url) from exc
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py
index 5fdd631474d1..0849e8a0732a 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py
@@ -217,7 +217,6 @@ async def download_path_from_s3(
return await download_file_from_link(
download_link,
local_path,
- client_session=session,
io_log_redirect_cb=io_log_redirect_cb,
progress_bar=progress_bar,
)
@@ -229,7 +228,6 @@ async def download_file_from_link(
*,
io_log_redirect_cb: LogRedirectCB | None,
file_name: str | None = None,
- client_session: ClientSession | None = None,
progress_bar: ProgressBarData,
) -> Path:
# a download link looks something like:
@@ -242,15 +240,14 @@ async def download_file_from_link(
if io_log_redirect_cb:
await io_log_redirect_cb(f"downloading {local_file_path}, please wait...")
- async with ClientSessionContextManager(client_session) as session:
- await download_link_to_file(
- session,
- download_link,
- local_file_path,
- num_retries=NodePortsSettings.create_from_envs().NODE_PORTS_IO_NUM_RETRY_ATTEMPTS,
- io_log_redirect_cb=io_log_redirect_cb,
- progress_bar=progress_bar,
- )
+
+ await download_link_to_file(
+ download_link,
+ local_file_path,
+ num_retries=NodePortsSettings.create_from_envs().NODE_PORTS_IO_NUM_RETRY_ATTEMPTS,
+ io_log_redirect_cb=io_log_redirect_cb,
+ progress_bar=progress_bar,
+ )
if io_log_redirect_cb:
await io_log_redirect_cb(f"download of {local_file_path} complete.")
return local_file_path
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py
index 8874f98efe74..83c7435b4eeb 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py
@@ -22,17 +22,11 @@ async def ports(
project_id: ProjectIDStr,
node_uuid: NodeIDStr,
*,
- db_manager: DBManager | None = None,
+ db_manager: DBManager,
r_clone_settings: RCloneSettings | None = None,
io_log_redirect_cb: LogRedirectCB | None = None,
aws_s3_cli_settings: AwsS3CliSettings | None = None
) -> Nodeports:
- log.debug("creating node_ports_v2 object using provided dbmanager: %s", db_manager)
- # NOTE: warning every dbmanager create a new db engine!
- if db_manager is None: # NOTE: keeps backwards compatibility
- log.debug("no db manager provided, creating one...")
- db_manager = DBManager()
-
return await load(
db_manager=db_manager,
user_id=user_id,
@@ -47,9 +41,9 @@ async def ports(
__all__ = (
"DBManager",
- "exceptions",
"FileLinkType",
"Nodeports",
"Port",
+ "exceptions",
"ports",
)
diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py
index 014aff565298..a7f9ec22fd04 100644
--- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py
+++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py
@@ -114,10 +114,9 @@ def check_value(cls, v: DataItemValue, info: ValidationInfo) -> DataItemValue:
and not isinstance(v, PortLink)
):
if port_utils.is_file_type(property_type):
- if not isinstance(v, (FileLink, DownloadLink)):
- raise ValueError(
- f"{property_type!r} value does not validate against any of FileLink, DownloadLink or PortLink schemas"
- )
+ if not isinstance(v, FileLink | DownloadLink):
+ msg = f"{property_type!r} value does not validate against any of FileLink, DownloadLink or PortLink schemas"
+ raise ValueError(msg)
elif property_type == "ref_contentSchema":
v, _ = validate_port_content(
port_key=info.data.get("key"),
@@ -125,10 +124,11 @@ def check_value(cls, v: DataItemValue, info: ValidationInfo) -> DataItemValue:
unit=None,
content_schema=info.data.get("content_schema", {}),
)
- elif isinstance(v, (list, dict)):
- raise TypeError(
+ elif isinstance(v, list | dict):
+ msg = (
f"Containers as {v} currently only supported within content_schema."
)
+ raise TypeError(msg)
return v
@field_validator("value_item", "value_concrete", mode="before")
@@ -194,28 +194,29 @@ async def get_value(
)
async def _evaluate() -> ItemValue | None:
+ # NOTE: review types returned by this function !!!
if isinstance(self.value, PortLink):
# this is a link to another node's port
- other_port_itemvalue: None | (
- ItemValue
- ) = await port_utils.get_value_link_from_port_link(
- self.value,
- # pylint: disable=protected-access
- self._node_ports._node_ports_creator_cb,
- file_link_type=file_link_type,
+ other_port_itemvalue: ItemValue | None = (
+ await port_utils.get_value_link_from_port_link(
+ self.value,
+ # pylint: disable=protected-access
+ self._node_ports._node_ports_creator_cb,
+ file_link_type=file_link_type,
+ )
)
return other_port_itemvalue
if isinstance(self.value, FileLink):
# let's get the download/upload link from storage
- url_itemvalue: None | (
- AnyUrl
- ) = await port_utils.get_download_link_from_storage(
- # pylint: disable=protected-access
- user_id=self._node_ports.user_id,
- value=self.value,
- link_type=file_link_type,
+ url_itemvalue: AnyUrl | None = (
+ await port_utils.get_download_link_from_storage(
+ # pylint: disable=protected-access
+ user_id=self._node_ports.user_id,
+ value=self.value,
+ link_type=file_link_type,
+ )
)
return url_itemvalue
@@ -256,15 +257,15 @@ async def _evaluate() -> ItemConcreteValue | None:
if isinstance(self.value, PortLink):
# this is a link to another node
- other_port_concretevalue: None | (
- ItemConcreteValue
- ) = await port_utils.get_value_from_link(
- # pylint: disable=protected-access
- key=self.key,
- value=self.value,
- file_to_key_map=self.file_to_key_map,
- node_port_creator=self._node_ports._node_ports_creator_cb, # noqa: SLF001
- progress_bar=progress_bar,
+ other_port_concretevalue: None | ItemConcreteValue = (
+ await port_utils.get_value_from_link(
+ # pylint: disable=protected-access
+ key=self.key,
+ value=self.value,
+ file_to_key_map=self.file_to_key_map,
+ node_port_creator=self._node_ports._node_ports_creator_cb, # noqa: SLF001
+ progress_bar=progress_bar,
+ )
)
value = other_port_concretevalue
diff --git a/packages/simcore-sdk/tests/conftest.py b/packages/simcore-sdk/tests/conftest.py
index 39bd7070bae4..f8086084d748 100644
--- a/packages/simcore-sdk/tests/conftest.py
+++ b/packages/simcore-sdk/tests/conftest.py
@@ -10,6 +10,7 @@
import pytest
import simcore_sdk
+from faker import Faker
from helpers.utils_port_v2 import CONSTANT_UUID
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.postgres_tools import PostgresTestConfig
@@ -23,10 +24,12 @@
pytest_plugins = [
"pytest_simcore.aws_s3_service",
"pytest_simcore.aws_server",
+ "pytest_simcore.db_entries_mocks",
"pytest_simcore.disk_usage_monitoring",
"pytest_simcore.docker_compose",
"pytest_simcore.docker_swarm",
"pytest_simcore.file_extra",
+ "pytest_simcore.logging",
"pytest_simcore.minio_service",
"pytest_simcore.postgres_service",
"pytest_simcore.pytest_global_environs",
@@ -66,8 +69,7 @@ def empty_configuration_file() -> Path:
@pytest.fixture
def node_ports_config(
postgres_host_config: PostgresTestConfig, minio_s3_settings_envs: EnvVarsDict
-) -> None:
- ...
+) -> None: ...
@pytest.fixture
@@ -84,3 +86,8 @@ def constant_uuid4(mocker: MockerFixture) -> None:
"simcore_sdk.node_ports_common.data_items_utils.uuid4",
return_value=CONSTANT_UUID,
)
+
+
+@pytest.fixture
+def mock_app_name(faker: Faker) -> str:
+ return faker.pystr()
diff --git a/packages/simcore-sdk/tests/integration/conftest.py b/packages/simcore-sdk/tests/integration/conftest.py
index b32fc4aa1dfa..c7c755c24d5e 100644
--- a/packages/simcore-sdk/tests/integration/conftest.py
+++ b/packages/simcore-sdk/tests/integration/conftest.py
@@ -19,10 +19,10 @@
from models_library.users import UserID
from pydantic import TypeAdapter
from pytest_simcore.helpers.faker_factories import random_project, random_user
+from pytest_simcore.helpers.postgres_tools import sync_insert_and_get_row_lifespan
from settings_library.aws_s3_cli import AwsS3CliSettings
from settings_library.r_clone import RCloneSettings, S3Provider
from settings_library.s3 import S3Settings
-from simcore_postgres_database.models.comp_pipeline import comp_pipeline
from simcore_postgres_database.models.comp_tasks import comp_tasks
from simcore_postgres_database.models.file_meta_data import file_meta_data
from simcore_postgres_database.models.projects import projects
@@ -41,18 +41,16 @@ def user_id(postgres_db: sa.engine.Engine) -> Iterable[UserID]:
# which would turn this test too complex.
# pylint: disable=no-value-for-parameter
- with postgres_db.connect() as conn:
- result = conn.execute(
- users.insert().values(**random_user(name="test")).returning(users.c.id)
- )
- row = result.first()
- assert row
- usr_id = row[users.c.id]
-
- yield usr_id
+ with sync_insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup
+ postgres_db,
+ table=users,
+ values=random_user(
+ name="test",
+ ),
+ pk_col=users.c.id,
+ ) as user_row:
- with postgres_db.connect() as conn:
- conn.execute(users.delete().where(users.c.id == usr_id))
+ yield user_row["id"]
@pytest.fixture
@@ -100,9 +98,9 @@ def _create(key: str, file_path: Path) -> SimcoreS3FileID:
@pytest.fixture()
-def default_configuration(
+async def default_configuration(
node_ports_config: None,
- create_pipeline: Callable[[str], str],
+ create_pipeline: Callable[..., Awaitable[dict[str, Any]]],
create_task: Callable[..., str],
default_configuration_file: Path,
project_id: str,
@@ -110,7 +108,7 @@ def default_configuration(
) -> dict[str, Any]:
# prepare database with default configuration
json_configuration = default_configuration_file.read_text()
- create_pipeline(project_id)
+ await create_pipeline(project_id=project_id)
return _set_configuration(create_task, project_id, node_uuid, json_configuration)
@@ -167,15 +165,15 @@ async def _create(file_path: Path) -> dict[str, Any]:
@pytest.fixture()
-def create_special_configuration(
+async def create_special_configuration(
node_ports_config: None,
- create_pipeline: Callable[[str], str],
+ create_pipeline: Callable[..., Awaitable[dict[str, Any]]],
create_task: Callable[..., str],
empty_configuration_file: Path,
project_id: str,
node_uuid: str,
-) -> Callable:
- def _create(
+) -> Callable[..., Awaitable[tuple[dict, str, str]]]:
+ async def _create(
inputs: list[tuple[str, str, Any]] | None = None,
outputs: list[tuple[str, str, Any]] | None = None,
project_id: str = project_id,
@@ -184,7 +182,7 @@ def _create(
config_dict = json.loads(empty_configuration_file.read_text())
_assign_config(config_dict, "inputs", inputs if inputs else [])
_assign_config(config_dict, "outputs", outputs if outputs else [])
- project_id = create_pipeline(project_id)
+ await create_pipeline(project_id=project_id)
config_dict = _set_configuration(
create_task, project_id, node_id, json.dumps(config_dict)
)
@@ -194,13 +192,13 @@ def _create(
@pytest.fixture()
-def create_2nodes_configuration(
+async def create_2nodes_configuration(
node_ports_config: None,
- create_pipeline: Callable[[str], str],
+ create_pipeline: Callable[..., Awaitable[dict[str, Any]]],
create_task: Callable[..., str],
empty_configuration_file: Path,
-) -> Callable:
- def _create(
+) -> Callable[..., Awaitable[tuple[dict, str, str]]]:
+ async def _create(
prev_node_inputs: list[tuple[str, str, Any]],
prev_node_outputs: list[tuple[str, str, Any]],
inputs: list[tuple[str, str, Any]],
@@ -209,7 +207,7 @@ def _create(
previous_node_id: str,
node_id: str,
) -> tuple[dict, str, str]:
- create_pipeline(project_id)
+ await create_pipeline(project_id=project_id)
# create previous node
previous_config_dict = json.loads(empty_configuration_file.read_text())
@@ -241,34 +239,6 @@ def _create(
return _create
-@pytest.fixture
-def create_pipeline(postgres_db: sa.engine.Engine) -> Iterator[Callable[[str], str]]:
- created_pipeline_ids: list[str] = []
-
- def _create(project_id: str) -> str:
- with postgres_db.connect() as conn:
- result = conn.execute(
- comp_pipeline.insert() # pylint: disable=no-value-for-parameter
- .values(project_id=project_id)
- .returning(comp_pipeline.c.project_id)
- )
- row = result.first()
- assert row
- new_pipeline_id = row[comp_pipeline.c.project_id]
- created_pipeline_ids.append(f"{new_pipeline_id}")
- return new_pipeline_id
-
- yield _create
-
- # cleanup
- with postgres_db.connect() as conn:
- conn.execute(
- comp_pipeline.delete().where( # pylint: disable=no-value-for-parameter
- comp_pipeline.c.project_id.in_(created_pipeline_ids)
- )
- )
-
-
@pytest.fixture
def create_task(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., str]]:
created_task_ids: list[int] = []
diff --git a/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py b/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py
index a25e95aa715f..f0ba46092397 100644
--- a/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py
+++ b/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py
@@ -292,6 +292,7 @@ async def test_delete_legacy_archive(
project_id=project_id,
node_uuid=node_uuid,
path=content_path,
+ application_name=faker.pystr(),
)
assert (
diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py b/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py
index db8e51d7dbd4..cfd97db1c982 100644
--- a/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py
+++ b/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py
@@ -3,7 +3,7 @@
# pylint:disable=redefined-outer-name
import json
-from collections.abc import Callable
+from collections.abc import Awaitable, Callable
from pathlib import Path
from simcore_sdk.node_ports_common.dbmanager import DBManager
@@ -21,8 +21,9 @@ async def test_db_manager_read_config(
node_uuid: str,
node_ports_config: None,
default_configuration: dict,
+ mock_app_name: str,
):
- db_manager = DBManager()
+ db_manager = DBManager(application_name=mock_app_name)
ports_configuration_str = await db_manager.get_ports_configuration_from_node_uuid(
project_id, node_uuid
)
@@ -35,15 +36,16 @@ async def test_db_manager_write_config(
project_id: str,
node_uuid: str,
node_ports_config: None,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
default_configuration_file: Path,
+ mock_app_name: str,
):
# create an empty config
- create_special_configuration()
+ await create_special_configuration()
# read the default config
json_configuration = default_configuration_file.read_text()
# write the default config to the database
- db_manager = DBManager()
+ db_manager = DBManager(application_name=mock_app_name)
await db_manager.write_ports_configuration(
json_configuration, project_id, node_uuid
)
diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py b/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py
index 88d16e383d25..9832f758a2ef 100644
--- a/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py
+++ b/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py
@@ -34,6 +34,7 @@
from servicelib.progress_bar import ProgressBarData
from settings_library.r_clone import RCloneSettings
from simcore_sdk import node_ports_v2
+from simcore_sdk.node_ports_common.dbmanager import DBManager
from simcore_sdk.node_ports_common.exceptions import UnboundPortError
from simcore_sdk.node_ports_v2 import exceptions
from simcore_sdk.node_ports_v2.links import ItemConcreteValue, PortLink
@@ -162,12 +163,18 @@ async def option_r_clone_settings(
return None
+@pytest.fixture
+def default_db_manager(faker: Faker) -> DBManager:
+ return node_ports_v2.DBManager(application_name=f"pytest_{faker.pystr()}")
+
+
async def test_default_configuration(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
default_configuration: dict[str, Any],
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
):
config_dict = default_configuration
await check_config_valid(
@@ -176,6 +183,7 @@ async def test_default_configuration(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
),
config_dict,
)
@@ -185,15 +193,17 @@ async def test_invalid_ports(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
):
- config_dict, _, _ = create_special_configuration()
+ config_dict, _, _ = await create_special_configuration()
PORTS = await node_ports_v2.ports(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
@@ -223,14 +233,15 @@ async def test_port_value_accessors(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
item_type: str,
item_value: ItemConcreteValue,
item_pytype: type,
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
): # pylint: disable=W0613, W0621
item_key = TypeAdapter(ServicePortKey).validate_python("some_key")
- config_dict, _, _ = create_special_configuration(
+ config_dict, _, _ = await create_special_configuration(
inputs=[(item_key, item_type, item_value)],
outputs=[(item_key, item_type, None)],
)
@@ -240,6 +251,7 @@ async def test_port_value_accessors(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
@@ -266,7 +278,7 @@ async def test_port_value_accessors(
],
)
async def test_port_file_accessors(
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
s3_simcore_location: LocationID,
item_type: str,
item_value: str,
@@ -279,6 +291,7 @@ async def test_port_file_accessors(
option_r_clone_settings: RCloneSettings | None,
request: pytest.FixtureRequest,
constant_uuid4: None,
+ default_db_manager: DBManager,
):
if item_value == "symlink_path":
item_value = request.getfixturevalue("symlink_path")
@@ -287,7 +300,7 @@ async def test_port_file_accessors(
config_value["path"] = f"{project_id}/{node_uuid}/{Path(config_value['path']).name}"
- config_dict, _project_id, _node_uuid = create_special_configuration(
+ config_dict, _project_id, _node_uuid = await create_special_configuration(
inputs=[("in_1", item_type, config_value)],
outputs=[("out_34", item_type, None)],
)
@@ -300,6 +313,7 @@ async def test_port_file_accessors(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
assert (
@@ -367,16 +381,18 @@ async def test_adding_new_ports(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
postgres_db: sa.engine.Engine,
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
):
- config_dict, project_id, node_uuid = create_special_configuration()
+ config_dict, project_id, node_uuid = await create_special_configuration()
PORTS = await node_ports_v2.ports(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
@@ -418,11 +434,12 @@ async def test_removing_ports(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
postgres_db: sa.engine.Engine,
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
):
- config_dict, project_id, node_uuid = create_special_configuration(
+ config_dict, project_id, node_uuid = await create_special_configuration(
inputs=[("in_14", "integer", 15), ("in_17", "boolean", False)],
outputs=[("out_123", "string", "blahblah"), ("out_2", "number", -12.3)],
) # pylint: disable=W0612
@@ -431,6 +448,7 @@ async def test_removing_ports(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
# let's remove the first input
@@ -469,14 +487,15 @@ async def test_get_value_from_previous_node(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_2nodes_configuration: Callable,
+ create_2nodes_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
create_node_link: Callable,
item_type: str,
item_value: ItemConcreteValue,
item_pytype: type,
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
):
- config_dict, _, _ = create_2nodes_configuration(
+ config_dict, _, _ = await create_2nodes_configuration(
prev_node_inputs=None,
prev_node_outputs=[("output_int", item_type, item_value)],
inputs=[("in_15", item_type, create_node_link("output_int"))],
@@ -491,6 +510,7 @@ async def test_get_value_from_previous_node(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
@@ -515,7 +535,7 @@ async def test_get_value_from_previous_node(
],
)
async def test_get_file_from_previous_node(
- create_2nodes_configuration: Callable,
+ create_2nodes_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
@@ -526,8 +546,9 @@ async def test_get_file_from_previous_node(
item_pytype: type,
option_r_clone_settings: RCloneSettings | None,
constant_uuid4: None,
+ default_db_manager: DBManager,
):
- config_dict, _, _ = create_2nodes_configuration(
+ config_dict, _, _ = await create_2nodes_configuration(
prev_node_inputs=None,
prev_node_outputs=[
("output_int", item_type, await create_store_link(item_value))
@@ -543,6 +564,7 @@ async def test_get_file_from_previous_node(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
file_path = await (await PORTS.inputs)[
@@ -572,7 +594,7 @@ async def test_get_file_from_previous_node(
],
)
async def test_get_file_from_previous_node_with_mapping_of_same_key_name(
- create_2nodes_configuration: Callable,
+ create_2nodes_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
@@ -585,8 +607,9 @@ async def test_get_file_from_previous_node_with_mapping_of_same_key_name(
item_pytype: type,
option_r_clone_settings: RCloneSettings | None,
constant_uuid4: None,
+ default_db_manager: DBManager,
):
- config_dict, _, this_node_uuid = create_2nodes_configuration(
+ config_dict, _, this_node_uuid = await create_2nodes_configuration(
prev_node_inputs=None,
prev_node_outputs=[("in_15", item_type, await create_store_link(item_value))],
inputs=[("in_15", item_type, create_node_link("in_15"))],
@@ -600,6 +623,7 @@ async def test_get_file_from_previous_node_with_mapping_of_same_key_name(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
# add a filetokeymap
@@ -635,7 +659,7 @@ async def test_get_file_from_previous_node_with_mapping_of_same_key_name(
],
)
async def test_file_mapping(
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
@@ -649,8 +673,9 @@ async def test_file_mapping(
option_r_clone_settings: RCloneSettings | None,
create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID],
constant_uuid4: None,
+ default_db_manager: DBManager,
):
- config_dict, project_id, node_uuid = create_special_configuration(
+ config_dict, project_id, node_uuid = await create_special_configuration(
inputs=[("in_1", item_type, await create_store_link(item_value))],
outputs=[("out_1", item_type, None)],
project_id=project_id,
@@ -661,6 +686,7 @@ async def test_file_mapping(
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
# add a filetokeymap
@@ -735,11 +761,12 @@ async def test_regression_concurrent_port_update_fails(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
int_item_value: int,
parallel_int_item_value: int,
port_count: int,
option_r_clone_settings: RCloneSettings | None,
+ default_db_manager: DBManager,
) -> None:
"""
when using `await PORTS.outputs` test will fail
@@ -747,13 +774,14 @@ async def test_regression_concurrent_port_update_fails(
"""
outputs = [(f"value_{i}", "integer", None) for i in range(port_count)]
- config_dict, _, _ = create_special_configuration(inputs=[], outputs=outputs)
+ config_dict, _, _ = await create_special_configuration(inputs=[], outputs=outputs)
PORTS = await node_ports_v2.ports(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
@@ -824,25 +852,29 @@ async def test_batch_update_inputs_outputs(
user_id: int,
project_id: str,
node_uuid: NodeIDStr,
- create_special_configuration: Callable,
+ create_special_configuration: Callable[..., Awaitable[tuple[dict, str, str]]],
port_count: int,
option_r_clone_settings: RCloneSettings | None,
faker: Faker,
output_callbacks: _Callbacks,
spy_outputs_callbaks: dict[str, AsyncMock],
use_output_callbacks: bool,
+ default_db_manager: DBManager,
) -> None:
callbacks = output_callbacks if use_output_callbacks else None
outputs = [(f"value_out_{i}", "integer", None) for i in range(port_count)]
inputs = [(f"value_in_{i}", "integer", None) for i in range(port_count)]
- config_dict, _, _ = create_special_configuration(inputs=inputs, outputs=outputs)
+ config_dict, _, _ = await create_special_configuration(
+ inputs=inputs, outputs=outputs
+ )
PORTS = await node_ports_v2.ports(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
r_clone_settings=option_r_clone_settings,
+ db_manager=default_db_manager,
)
await check_config_valid(PORTS, config_dict)
diff --git a/packages/simcore-sdk/tests/unit/conftest.py b/packages/simcore-sdk/tests/unit/conftest.py
index 34cd932081cf..527e02d10038 100644
--- a/packages/simcore-sdk/tests/unit/conftest.py
+++ b/packages/simcore-sdk/tests/unit/conftest.py
@@ -3,7 +3,7 @@
# pylint:disable=redefined-outer-name
import json
-from collections.abc import AsyncIterator, Callable
+from collections.abc import Callable
from random import randint
from typing import Any
from uuid import uuid4
@@ -32,7 +32,8 @@ async def mock_db_manager(
monkeypatch,
project_id: str,
node_uuid: str,
-) -> AsyncIterator[Callable]:
+ mock_app_name: str,
+) -> Callable[[dict[str, Any]], DBManager]:
def _mock_db_manager(port_cfg: dict[str, Any]) -> DBManager:
async def mock_get_ports_configuration_from_node_uuid(*args, **kwargs) -> str:
return json.dumps(port_cfg)
@@ -55,7 +56,6 @@ async def mock_write_ports_configuration(
mock_write_ports_configuration,
)
- db_manager = DBManager()
- return db_manager
+ return DBManager(application_name=mock_app_name)
return _mock_db_manager
diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py
index 250f9d2599d4..abd91d7b5910 100644
--- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py
+++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py
@@ -3,8 +3,9 @@
# pylint:disable=redefined-outer-name
# pylint:disable=protected-access
+from collections.abc import Callable
from pathlib import Path
-from typing import Any, Callable
+from typing import Any
from unittest.mock import AsyncMock
import pytest
@@ -222,8 +223,7 @@ async def test_node_ports_v2_packages(
node_uuid: str,
):
db_manager = mock_db_manager(default_configuration)
- node_ports = await ports(user_id, project_id, node_uuid)
- node_ports = await ports(user_id, project_id, node_uuid, db_manager=db_manager)
+ await ports(user_id, project_id, node_uuid, db_manager=db_manager)
@pytest.fixture
diff --git a/requirements/base.Makefile b/requirements/base.Makefile
index 35823f26d16c..a2ee7be1ddfd 100644
--- a/requirements/base.Makefile
+++ b/requirements/base.Makefile
@@ -15,7 +15,7 @@ UPGRADE_OPTION := $(if $(upgrade),--upgrade-package "$(upgrade)",$(DO_CLEAN_OR_U
objects = $(sort $(wildcard *.in))
outputs := $(objects:.in=.txt)
-reqs: $(outputs) ## pip-compiles all requirements/*.in -> requirements/*.txt; make reqs upgrade=foo will only upgrade package foo
+reqs: $(outputs) ## pip-compiles all requirements/*.in -> requirements/*.txt; make reqs upgrade=foo will only upgrade package foo; make reqs startswith=pytest will upgrade packages starting with pytest
touch:
@$(foreach p,${objects},touch ${p};)
@@ -36,6 +36,12 @@ help: ## this colorful help
@echo ""
@awk --posix 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@echo ""
+ @echo "Examples:"
+ @echo " make reqs # Upgrade all packages"
+ @echo " make reqs upgrade=pytest # Upgrade only pytest package"
+ @echo " make reqs startswith=pytest # Upgrade all packages starting with 'pytest'"
+ @echo " make reqs clean=1 # Clean and rebuild all requirements"
+ @echo ""
# ------------------------------------------------------------------------------------------
@@ -44,10 +50,28 @@ help: ## this colorful help
# extracting subsets of requiremenst like e.g _dask-distributed.*
#
%.txt: %.in
- cd ..; \
- uv pip compile $(UPGRADE_OPTION) \
- --no-header \
- --output-file requirements/$@ requirements/$<
+ @if [ -n "$(startswith)" ]; then \
+ MATCHING_PACKAGES=$$(grep '^$(startswith)' $@ 2>/dev/null | cut -d= -f1); \
+ if [ -z "$$MATCHING_PACKAGES" ]; then \
+ echo "No packages starting with '$(startswith)' found in $@. Skipping."; \
+ exit 0; \
+ fi; \
+ STARTSWITH_UPGRADE=$$(echo "$$MATCHING_PACKAGES" | xargs -n1 echo --upgrade-package); \
+ cd ..; \
+ uv pip compile $$STARTSWITH_UPGRADE \
+ --no-header \
+ --output-file requirements/$@ requirements/$<; \
+ elif [ -n "$(upgrade)" ]; then \
+ cd ..; \
+ uv pip compile --upgrade-package "$(upgrade)" \
+ --no-header \
+ --output-file requirements/$@ requirements/$<; \
+ else \
+ cd ..; \
+ uv pip compile $(DO_CLEAN_OR_UPGRADE) \
+ --no-header \
+ --output-file requirements/$@ requirements/$<; \
+ fi
_test.txt: _base.txt
diff --git a/requirements/constraints.txt b/requirements/constraints.txt
index 627c93b23ac6..b839fb276dcd 100644
--- a/requirements/constraints.txt
+++ b/requirements/constraints.txt
@@ -10,7 +10,6 @@
# Vulnerabilities -----------------------------------------------------------------------------------------
#
aiohttp>=3.7.4, !=3.11.13 # https://github.com/advisories/GHSA-v6wp-4m6f-gcjg, 3.11.13 was yanked https://github.com/aio-libs/aiohttp/issues/10617
-aiohttp<3.12.0 # attempt to fix an issue with Content Length which could have been introduced in 3.12.x versions
certifi>=2023.7.22 # https://github.com/advisories/GHSA-xqr8-7jwr-rhp7
cryptography>=41.0.6 # https://github.com/advisories/GHSA-v8gr-m533-ghj9
httpx>=0.23.0 # https://github.com/advisories/GHSA-h8pj-cxx2-jfg2 / CVE-2021-41945
@@ -51,6 +50,11 @@ httpx!=0.28.0 # Waiting for fix in respx: https://github.com/lundberg/respx/pul
#
pydantic>=2.10.0 # Avoids inter-version compatibility serialization errors as: _pickle.UnpicklingError: NEWOBJ class argument must be a type, not _AnnotatedAlias
+# See issue https://github.com/ITISFoundation/osparc-simcore/issues/7300
+pydantic-settings<2.7.1
+
+# avoid downgrades of openapi-spec-validator related libraries
+referencing<=0.35.1
#
@@ -67,9 +71,3 @@ pennsieve>=999999999
# User alternative e.g. parametrized fixture or request.getfixturevalue(.)
pytest-lazy-fixture>=999999999
-
-# avoid downgrades of openapi-spec-validator related libraries
-referencing<=0.35.1
-
-# See issue https://github.com/ITISFoundation/osparc-simcore/issues/7300
-pydantic-settings<2.7.1
diff --git a/requirements/how-to-upgrade-python.md b/requirements/how-to-upgrade-python.md
index 51217f5f53b4..0b28fcad29f6 100644
--- a/requirements/how-to-upgrade-python.md
+++ b/requirements/how-to-upgrade-python.md
@@ -17,16 +17,7 @@ Both python and pip version are specified:
ARG PYTHON_VERSION="3.9.12"
FROM python:${PYTHON_VERSION}-slim-bookworm as base
```
-- in the CI ``.github/workflows/ci-testing-deploy.yml``
- ```yaml
- jobs:
- ... :
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- python: ["3.9"]
- ```
- and in ``ci/helpers/ensure_python_pip.bash``
+- in ``.python-version``
diff --git a/requirements/tools/Dockerfile b/requirements/tools/Dockerfile
index e5fc9373dc99..534f47ce757f 100644
--- a/requirements/tools/Dockerfile
+++ b/requirements/tools/Dockerfile
@@ -9,7 +9,7 @@
#
#
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:${PYTHON_VERSION}-slim-bookworm AS base
@@ -31,10 +31,7 @@ COPY --from=uv_build /uv /uvx /bin/
RUN uv venv "${VIRTUAL_ENV}"
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
+
diff --git a/requirements/tools/Makefile b/requirements/tools/Makefile
index ee094b80ef94..87e3afdcd4b6 100644
--- a/requirements/tools/Makefile
+++ b/requirements/tools/Makefile
@@ -24,6 +24,9 @@ SERVICES_DIR := $(abspath $(REPODIR)/services)
IMAGE := local/requirements/tools
UPGRADE_OPTION := $(if $(upgrade),upgrade=$(upgrade),)
+STARTSWITH_OPTION := $(if $(startswith),startswith=$(startswith),)
+UPGRADE_OR_STARTSWITH_OPTION := $(if $(upgrade),upgrade=$(upgrade),$(if $(startswith),startswith=$(startswith),))
+UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION := $(if $(upgrade),upgrade=$(upgrade),$(if $(startswith),startswith=$(startswith),$(if $(clean),clean=$(clean),)))
# tools
MAKE_C := $(MAKE) --directory
@@ -70,20 +73,20 @@ touch: ## touches all package requirement inputs
only-tools: ## upgrades tools repo wide
# Upgrading ONLY _tools.in
- @$(foreach p,${_tools-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);)
+ @$(foreach p,${_tools-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION);)
-reqs: ## updates test & tooling requirements
- # Upgrading $(upgrade) requirements
- @$(foreach p,${_target-inputs},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);)
+reqs: guard-UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION ## updates test & tooling requirements
+ # Upgrading $(upgrade)$(startswith)$(if $(clean), cleaning) requirements
+ @$(foreach p,${_target-inputs},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION);)
-reqs-all: guard-UPGRADE_OPTION ## updates a given package repository-wise IN ALL `requirements/` folders (e.g. make reqs-all upgrade=foo==1.2.3 )
- # Upgrading $(upgrade) ALL requirements
- @$(foreach p,${_all-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);)
+reqs-all: guard-UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION ## updates a given package or pattern repository-wise IN ALL `requirements/` folders (e.g. make reqs-all upgrade=foo==1.2.3 or make reqs-all startswith=pytest or make reqs-all clean=1)
+ # Upgrading $(upgrade)$(startswith)$(if $(clean), cleaning) ALL requirements
+ @$(foreach p,${_all-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION);)
-reqs-services: guard-UPGRADE_OPTION ## updates a given package on all services [and not packages] (e.g. make reqs-services upgrade=foo==1.2.3 )
- # Upgrading $(upgrade) in services
- @$(foreach p,${_services-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);)
+reqs-services: guard-UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION ## updates a given package or pattern on all services [and not packages] (e.g. make reqs-services upgrade=foo==1.2.3 or make reqs-services startswith=pytest or make reqs-services clean=1)
+ # Upgrading $(upgrade)$(startswith)$(if $(clean), cleaning) in services
+ @$(foreach p,${_services-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION);)
reqs-ci: ## upgrades requirements for pylint recipe in CI
@@ -112,7 +115,7 @@ run: build ## Runs upgrade in a container [WARNING! UNDER DEV. USE CAREFULY]
--user=$(shell id -u):$(shell id -g) \
--entrypoint=/bin/bash \
${IMAGE_NAME} \
- -c "cd requirements/tools; make reqs $(if $(upgrade),upgrade=$(upgrade),)"
+ -c "cd requirements/tools; make reqs $(UPGRADE_OR_STARTSWITH_OR_CLEAN_OPTION)"
.PHONY: shell
@@ -134,6 +137,21 @@ help: ## this colorful help
@echo ""
@awk --posix 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@echo ""
+ @echo "Examples:"
+ @echo " make reqs # Upgrade all test & tooling requirements"
+ @echo " make reqs upgrade=pytest # Upgrade only pytest package in test & tooling"
+ @echo " make reqs startswith=pytest # Upgrade all packages starting with 'pytest' in test & tooling"
+ @echo " make reqs clean=1 # Clean and rebuild all test & tooling requirements"
+ @echo ""
+ @echo " make reqs-all upgrade=fastapi # Upgrade fastapi in ALL requirements files"
+ @echo " make reqs-all startswith=pydantic # Upgrade all packages starting with pydantic repo-wide"
+ @echo " make reqs-all clean=1 # Clean and rebuild ALL requirements"
+ @echo ""
+ @echo " make reqs-services upgrade=uvicorn # Upgrade uvicorn only in services"
+ @echo " make reqs-services startswith=sqlalchemy # Upgrade all packages starting with sqlalchemy in services folder"
+ @echo ""
+ @echo " make only-tools upgrade=black # Upgrade black only in tools requirements"
+ @echo ""
.PHONY: guard-%
diff --git a/requirements/tools/check_changes.py b/requirements/tools/check_changes.py
index e14425a740b3..234cafeed4b3 100644
--- a/requirements/tools/check_changes.py
+++ b/requirements/tools/check_changes.py
@@ -166,9 +166,11 @@ def main_changes_stats() -> None:
f'{",".join(to_versions) if to_versions else "🗑️ removed":10s}',
"|",
# how big the version change is
- f"{tag_upgrade(sorted(set(before[name]))[-1], sorted(set(after[name]))[-1]):10s}"
- if to_versions
- else "",
+ (
+ f"{tag_upgrade(sorted(set(before[name]))[-1], sorted(set(after[name]))[-1]):10s}"
+ if to_versions
+ else ""
+ ),
"|",
counts[name],
"|",
@@ -279,7 +281,7 @@ def main() -> None:
if args.changed_reqs:
main_changes_stats()
else:
- repo_wide_changes(exclude={"*/director/*"})
+ repo_wide_changes(exclude={"*/director/*", "*/.cache/uv/*"})
if __name__ == "__main__":
diff --git a/scripts/common.Makefile b/scripts/common.Makefile
index 0dc78b889dd7..dbaddbfce93e 100644
--- a/scripts/common.Makefile
+++ b/scripts/common.Makefile
@@ -173,6 +173,21 @@ mypy: $(REPO_BASE_DIR)/mypy.ini ## runs mypy python static type-checker on this
$(CURDIR)/src
+.PHONY: mypy-debug
+mypy-debug: $(REPO_BASE_DIR)/mypy.ini ## runs mypy with profiling and reporting enabled
+ @rm -rf $(CURDIR)/.mypy-report
+ @mkdir -p $(CURDIR)/.mypy-report
+ @mypy \
+ --config-file=$(REPO_BASE_DIR)/mypy.ini \
+ --show-error-context \
+ --show-traceback \
+ --verbose \
+ --linecount-report $(CURDIR)/.mypy-report \
+ --any-exprs-report $(CURDIR)/.mypy-report \
+ $(CURDIR)/src \
+ | tee $(CURDIR)/.mypy-report/mypy.logs
+
+
.PHONY: codestyle
codestyle codestyle-ci: ## enforces codestyle (isort & black) finally runs pylint & mypy
@$(SCRIPTS_DIR)/codestyle.bash $(if $(findstring -ci,$@),ci,development) $(shell basename "${SRC_DIR}")
diff --git a/scripts/erd/Dockerfile b/scripts/erd/Dockerfile
index 6991e52f8c0e..522b55e9101b 100644
--- a/scripts/erd/Dockerfile
+++ b/scripts/erd/Dockerfile
@@ -8,7 +8,7 @@
#
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:${PYTHON_VERSION}-slim-bookworm AS base
@@ -28,10 +28,6 @@ RUN apt-get update \
# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv
COPY --from=uv_build /uv /uvx /bin/
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
COPY requirements.txt .
RUN --mount=type=cache,target=/root/.cache/uv \
diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py
index 4eff89026dd6..9f1919b8d33f 100755
--- a/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py
+++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py
@@ -615,7 +615,11 @@ async def _cancel_all_jobs(
the_cluster,
dask_task.job_id,
)
- if comp_task is not None and abort_in_db:
+ if (
+ comp_task is not None
+ and comp_task.state not in ["FAILED", "SUCCESS", "ABORTED"]
+ and abort_in_db
+ ):
await db.abort_job_in_db(state, comp_task.project_id, comp_task.node_id)
rich.print("cancelled all tasks")
diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py
index 14190934aa19..c266cdd3cd52 100644
--- a/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py
+++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py
@@ -18,8 +18,11 @@ async def db_engine(
state: AppState,
) -> AsyncGenerator[AsyncEngine, Any]:
async with contextlib.AsyncExitStack() as stack:
- assert state.environment["POSTGRES_ENDPOINT"] # nosec
- db_endpoint = state.environment["POSTGRES_ENDPOINT"]
+ assert state.environment["POSTGRES_HOST"] # nosec
+ assert state.environment["POSTGRES_PORT"] # nosec
+ db_endpoint = (
+ f"{state.environment['POSTGRES_HOST']}:{state.environment['POSTGRES_PORT']}"
+ )
if state.main_bastion_host:
assert state.ssh_key_path # nosec
db_host, db_port = db_endpoint.split(":")
diff --git a/scripts/maintenance/migrate_project/Dockerfile b/scripts/maintenance/migrate_project/Dockerfile
index 1092f6ca3b1e..31d120405ab3 100644
--- a/scripts/maintenance/migrate_project/Dockerfile
+++ b/scripts/maintenance/migrate_project/Dockerfile
@@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:3.11.9-buster
diff --git a/scripts/metrics/Makefile b/scripts/metrics/Makefile
deleted file mode 100644
index 3453cfe3cfb6..000000000000
--- a/scripts/metrics/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-.DEFAULT_GOAL := install
-
-SHELL := /bin/bash
-
-install:
- # creating python virtual environment
- @python3 -m venv .venv
- # installing python dependencies
- @.venv/bin/pip install --upgrade pip setuptools wheel
- @.venv/bin/pip install -r requirements.txt
- # activate the python virtual environment by running: ```source .venv/bin/activate```
diff --git a/scripts/metrics/compute_list_of_images_in_registry.py b/scripts/metrics/compute_list_of_images_in_registry.py
deleted file mode 100755
index 518c7932b5f2..000000000000
--- a/scripts/metrics/compute_list_of_images_in_registry.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#! /usr/bin/env python3
-
-import asyncio
-import json
-from collections import defaultdict, deque
-from datetime import date, datetime
-from pathlib import Path
-from pprint import pformat
-
-import typer
-from httpx import URL, AsyncClient
-
-N = len("2020-10-09T12:28:14.7710")
-
-
-async def get_repos(client):
- r = await client.get(
- "/_catalog",
- )
- r.raise_for_status()
- list_of_repositories = r.json()["repositories"]
- typer.secho(
- f"got the list of {len(list_of_repositories)} repositories from the registry"
- )
- filtered_list_of_repositories = list(
- filter(
- lambda repo: repo.startswith("simcore/services/dynamic/")
- or repo.startswith("simcore/services/comp/"),
- list_of_repositories,
- )
- )
- return filtered_list_of_repositories
-
-
-async def list_images_in_registry(
- endpoint: URL,
- username: str,
- password: str,
- from_date: datetime | None,
- to_date: datetime,
-) -> dict[str, list[tuple[str, str, str, str]]]:
- if not from_date:
- from_date = datetime(year=2000, month=1, day=1)
- typer.secho(
- f"listing images from {from_date} to {to_date} from {endpoint}",
- fg=typer.colors.YELLOW,
- )
-
- list_of_images_in_date_range = defaultdict(list)
-
- async with AsyncClient(
- base_url=endpoint.join("v2"), auth=(username, password), http2=True
- ) as client:
- list_of_repositories = await get_repos(client)
-
- with typer.progressbar(
- list_of_repositories, label="Processing repositories"
- ) as progress:
- for repo in progress:
- r = await client.get(f"/{repo}/tags/list")
- r.raise_for_status()
- list_of_tags = [tag for tag in r.json()["tags"] if tag != "latest"]
-
- # we go in reverse order, so the first that does not go in the date range will stop the loop
- for tag in reversed(list_of_tags):
- r = await client.get(f"/{repo}/manifests/{tag}")
- r.raise_for_status()
- manifest = r.json()
- # manifest[history] contains all the blobs, taking the latest one corresponds to the image creation date
- history = manifest["history"]
- tag_creation_dates = deque()
- for blob in history:
- v1_comp = json.loads(blob["v1Compatibility"])
- tag_creation_dates.append(
- datetime.strptime(
- v1_comp["created"][:N], "%Y-%m-%dT%H:%M:%S.%f"
- )
- )
- tag_last_date = sorted(tag_creation_dates)[-1]
- # check this service is in the time range
- if tag_last_date < from_date or tag_last_date > to_date:
- break
-
- # get the image labels from the last blob (same as director does)
- v1_comp = json.loads(history[0]["v1Compatibility"])
- container_config = v1_comp.get(
- "container_config", v1_comp["config"]
- )
-
- simcore_labels = {}
- for label_key, label_value in container_config["Labels"].items():
- if label_key.startswith("io.simcore"):
- simcore_labels.update(json.loads(label_value))
-
- list_of_images_in_date_range[repo].append(
- (
- tag,
- simcore_labels["name"],
- simcore_labels["description"],
- simcore_labels["type"],
- )
- )
- typer.secho(
- f"Completed. Found {len(list_of_images_in_date_range)} created between {from_date} and {to_date}",
- fg=typer.colors.YELLOW,
- )
- typer.secho(f"{pformat(list_of_images_in_date_range)}")
-
- return list_of_images_in_date_range
-
-
-def main(
- endpoint: str,
- username: str,
- password: str = typer.Option(..., prompt=True, hide_input=True),
- from_date: datetime | None = typer.Option(None, formats=["%Y-%m-%d"]),
- to_date: datetime = typer.Option(f"{date.today()}", formats=["%Y-%m-%d"]),
- markdown: bool = typer.Option(False),
-):
- endpoint_url = URL(endpoint)
- list_of_images: dict[str, list[tuple[str, str, str, str]]] = asyncio.run(
- list_images_in_registry(endpoint_url, username, password, from_date, to_date)
- )
-
- if markdown:
- output_file = Path.cwd() / f"{endpoint_url.host}.md"
- with output_file.open("w") as fp:
- fp.write(
- f"# {endpoint_url.host}: Services added between {from_date} and {to_date}\n\n"
- )
- fp.write("| Service | Version(s) | Name | Description | Type |\n")
- fp.write("| ------- | ---------- | ---- | ----------- | ---- |\n")
- for repo, repo_details in list_of_images.items():
- for index, (version, name, description, service_type) in enumerate(
- repo_details
- ):
- filtered_description = description.strip().replace("\n", "")
- fp.write(
- f"| {repo if index == 0 else ''} | {version} | {name if index == 0 else ''} | {filtered_description if index == 0 else ''} | {('Dynamic service' if service_type == 'dynamic' else 'Computational service') if index == 0 else ''} |\n"
- )
-
-
-if __name__ == "__main__":
- typer.run(main)
diff --git a/scripts/metrics/requirements.txt b/scripts/metrics/requirements.txt
deleted file mode 100644
index c843a6494bf3..000000000000
--- a/scripts/metrics/requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-black
-httpx[http2]
-pydantic[email,dotenv]
-pylint
-typer[all]
diff --git a/scripts/openapi/oas_resolver/Dockerfile b/scripts/openapi/oas_resolver/Dockerfile
index 92c1113ab657..a0fb9f7232a3 100644
--- a/scripts/openapi/oas_resolver/Dockerfile
+++ b/scripts/openapi/oas_resolver/Dockerfile
@@ -2,7 +2,7 @@
# Usage:
# docker build . -t oas_resolver
# docker run -v /path/to/api:/input -v /path/to/compiled/file:/output oas_resolver /input/path/to/openapi.yaml /output/output_file.yaml
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:3.6-alpine
@@ -17,11 +17,6 @@ WORKDIR /src
# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv
COPY --from=uv_build /uv /uvx /bin/
-# update pip
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install prance click openapi_spec_validator
diff --git a/scripts/pydeps-docker/Dockerfile b/scripts/pydeps-docker/Dockerfile
index 401a57c38eb1..5ebd38a61cf7 100644
--- a/scripts/pydeps-docker/Dockerfile
+++ b/scripts/pydeps-docker/Dockerfile
@@ -9,7 +9,7 @@
#
#
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
# we docker image is built based on debian
FROM python:${PYTHON_VERSION}-slim-bookworm AS base
@@ -33,11 +33,6 @@ ARG HOME_DIR
RUN mkdir -p ${HOME_DIR}
COPY .pydeps ${HOME_DIR}/.pydeps
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
-
# devenv
RUN --mount=type=cache,target=/root/.cache/uv \
diff --git a/scripts/pyupgrade.bash b/scripts/pyupgrade.bash
index 4423a8583ee6..3428b60efff6 100755
--- a/scripts/pyupgrade.bash
+++ b/scripts/pyupgrade.bash
@@ -19,12 +19,8 @@ Build() {
--tag "$IMAGE_NAME" \
- <=0.9", "rich", "requests"]
+dependencies = ["arrow", "python-dotenv","pydantic", "pydantic-settings", "typer>=0.9", "rich", "requests"]
requires-python = ">=3.10"
[project.scripts]
diff --git a/services/agent/Dockerfile b/services/agent/Dockerfile
index ff0658cfcc75..f2073066cc17 100644
--- a/services/agent/Dockerfile
+++ b/services/agent/Dockerfile
@@ -2,7 +2,7 @@
# Define arguments in the global scope
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
@@ -33,6 +33,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=private \
set -eux && \
apt-get update && \
apt-get install -y --no-install-recommends \
+ fd-find \
gosu \
curl \
&& apt-get clean -y \
@@ -96,10 +97,7 @@ RUN uv venv "${VIRTUAL_ENV}"
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
+
WORKDIR /build
@@ -116,6 +114,9 @@ WORKDIR /build
FROM build AS prod-only-deps
ENV SC_BUILD_TARGET=prod-only-deps
+# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
+ENV UV_COMPILE_BYTECODE=1 \
+ UV_LINK_MODE=copy
WORKDIR /build/services/agent
@@ -141,8 +142,6 @@ ENV SC_BUILD_TARGET=production \
SC_BOOT_MODE=production
ENV PYTHONOPTIMIZE=TRUE
-# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
-ENV UV_COMPILE_BYTECODE=1
WORKDIR /home/scu
diff --git a/services/agent/docker/boot.sh b/services/agent/docker/boot.sh
index 5cc8f9f5aad6..3b502cd95747 100755
--- a/services/agent/docker/boot.sh
+++ b/services/agent/docker/boot.sh
@@ -24,7 +24,7 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then
command -v python | sed 's/^/ /'
cd services/agent
- uv pip --quiet sync requirements/dev.txt
+ uv pip --quiet sync --link-mode=copy requirements/dev.txt
cd -
echo "$INFO" "PIP :"
uv pip list
@@ -33,7 +33,7 @@ fi
if [ "${SC_BOOT_MODE}" = "debug" ]; then
# NOTE: production does NOT pre-installs debugpy
if command -v uv >/dev/null 2>&1; then
- uv pip install debugpy
+ uv pip install --link-mode=copy debugpy
else
pip install debugpy
fi
@@ -48,20 +48,22 @@ SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]')
echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL"
if [ "${SC_BOOT_MODE}" = "debug" ]; then
- reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;)
-
+ reload_dir_packages=$(fdfind src /devel/packages --exec echo '--reload-dir {} ' | tr '\n' ' ')
exec sh -c "
cd services/agent/src/simcore_service_agent && \
- python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${AGENT_SERVER_REMOTE_DEBUG_PORT} -m uvicorn main:the_app \
+ python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${AGENT_SERVER_REMOTE_DEBUG_PORT} -m \
+ uvicorn \
+ --factory main:app_factory \
--host 0.0.0.0 \
--port 8000 \
--reload \
- $reload_dir_packages
+ $reload_dir_packages \
--reload-dir . \
--log-level \"${SERVER_LOG_LEVEL}\"
"
else
- exec uvicorn simcore_service_agent.main:the_app \
+ exec uvicorn \
+ --factory simcore_service_agent.main:app_factory \
--host 0.0.0.0 \
--port 8000 \
--log-level "${SERVER_LOG_LEVEL}" \
diff --git a/services/agent/docker/entrypoint.sh b/services/agent/docker/entrypoint.sh
index e89ad5408a31..a319c6824d73 100755
--- a/services/agent/docker/entrypoint.sh
+++ b/services/agent/docker/entrypoint.sh
@@ -19,6 +19,7 @@ echo "$INFO" "Workdir : $(pwd)"
echo "$INFO" "User : $(id scu)"
echo "$INFO" "python : $(command -v python)"
echo "$INFO" "pip : $(command -v pip)"
+echo "$INFO" "UV : $(command -v uv)"
#
# DEVELOPMENT MODE
@@ -56,10 +57,9 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then
usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME"
echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \;
- # change user property of files already around
+ fdfind --owner ":$SC_USER_ID" --exclude proc --exec-batch chgrp --no-dereference "$CONT_GROUPNAME" . '/'
echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \;
+ fdfind --owner "$SC_USER_ID:" --exclude proc --exec-batch chown --no-dereference "$SC_USER_NAME" . '/'
fi
fi
diff --git a/services/agent/requirements/_base.txt b/services/agent/requirements/_base.txt
index b3fe3dfecfbc..da576d1b4fbe 100644
--- a/services/agent/requirements/_base.txt
+++ b/services/agent/requirements/_base.txt
@@ -12,7 +12,7 @@ aiofiles==24.1.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
aiohappyeyeballs==2.5.0
# via aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -45,6 +45,8 @@ arrow==1.3.0
# -r requirements/../../../packages/models-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in
# -r requirements/../../../packages/service-library/requirements/_base.in
+asgi-lifespan==2.1.0
+ # via -r requirements/../../../packages/service-library/requirements/_fastapi.in
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==25.1.0
@@ -69,19 +71,14 @@ certifi==2025.1.31
# httpcore
# httpx
# requests
+ # sentry-sdk
charset-normalizer==3.4.1
# via requests
-click==8.1.8
+click==8.2.1
# via
# rich-toolkit
# typer
# uvicorn
-deprecated==1.2.18
- # via
- # opentelemetry-api
- # opentelemetry-exporter-otlp-proto-grpc
- # opentelemetry-exporter-otlp-proto-http
- # opentelemetry-semantic-conventions
dnspython==2.7.0
# via email-validator
email-validator==2.2.0
@@ -92,12 +89,14 @@ exceptiongroup==1.2.2
# via aio-pika
fast-depends==2.4.12
# via faststream
-fastapi==0.115.12
+fastapi==0.116.1
# via
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# fastapi-lifespan-manager
-fastapi-cli==0.0.7
+fastapi-cli==0.0.8
# via fastapi
+fastapi-cloud-cli==0.1.5
+ # via fastapi-cli
fastapi-lifespan-manager==0.1.4
# via -r requirements/../../../packages/service-library/requirements/_fastapi.in
faststream==0.5.35
@@ -106,13 +105,13 @@ frozenlist==1.5.0
# via
# aiohttp
# aiosignal
-googleapis-common-protos==1.69.1
+googleapis-common-protos==1.70.0
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
grpcio==1.70.0
# via opentelemetry-exporter-otlp-proto-grpc
-h11==0.14.0
+h11==0.16.0
# via
# httpcore
# uvicorn
@@ -120,7 +119,7 @@ h2==4.2.0
# via httpx
hpack==4.1.0
# via h2
-httpcore==1.0.7
+httpcore==1.0.9
# via httpx
httptools==0.6.4
# via uvicorn
@@ -140,6 +139,7 @@ httpx==0.28.1
# -c requirements/../../../requirements/constraints.txt
# -r requirements/../../../packages/service-library/requirements/_fastapi.in
# fastapi
+ # fastapi-cloud-cli
hyperframe==6.1.0
# via h2
idna==3.10
@@ -166,6 +166,10 @@ jinja2==3.1.6
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# fastapi
+jsonref==1.1.0
+ # via
+ # -r requirements/../../../packages/models-library/requirements/_base.in
+ # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in
jsonschema==4.23.0
# via
# -r requirements/../../../packages/models-library/requirements/_base.in
@@ -182,7 +186,7 @@ multidict==6.1.0
# via
# aiohttp
# yarl
-opentelemetry-api==1.30.0
+opentelemetry-api==1.34.1
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
@@ -190,6 +194,7 @@ opentelemetry-api==1.30.0
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-asgi
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
# opentelemetry-instrumentation-logging
@@ -197,59 +202,63 @@ opentelemetry-api==1.30.0
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
-opentelemetry-exporter-otlp==1.30.0
+opentelemetry-exporter-otlp==1.34.1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-exporter-otlp-proto-common==1.30.0
+opentelemetry-exporter-otlp-proto-common==1.34.1
# via
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-exporter-otlp-proto-grpc==1.30.0
+opentelemetry-exporter-otlp-proto-grpc==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-exporter-otlp-proto-http==1.30.0
+opentelemetry-exporter-otlp-proto-http==1.34.1
# via opentelemetry-exporter-otlp
-opentelemetry-instrumentation==0.51b0
+opentelemetry-instrumentation==0.55b1
# via
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-asgi
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
# opentelemetry-instrumentation-logging
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
-opentelemetry-instrumentation-aio-pika==0.51b0
+opentelemetry-instrumentation-aio-pika==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-asgi==0.51b0
+opentelemetry-instrumentation-asgi==0.55b1
# via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-fastapi==0.51b0
+opentelemetry-instrumentation-asyncpg==0.55b1
+ # via -r requirements/../../../packages/service-library/requirements/_base.in
+opentelemetry-instrumentation-fastapi==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_fastapi.in
-opentelemetry-instrumentation-httpx==0.51b0
+opentelemetry-instrumentation-httpx==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_fastapi.in
-opentelemetry-instrumentation-logging==0.51b0
+opentelemetry-instrumentation-logging==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-redis==0.51b0
+opentelemetry-instrumentation-redis==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-instrumentation-requests==0.51b0
+opentelemetry-instrumentation-requests==0.55b1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-opentelemetry-proto==1.30.0
+opentelemetry-proto==1.34.1
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-sdk==1.30.0
+opentelemetry-sdk==1.34.1
# via
# -r requirements/../../../packages/service-library/requirements/_base.in
# opentelemetry-exporter-otlp-proto-grpc
# opentelemetry-exporter-otlp-proto-http
-opentelemetry-semantic-conventions==0.51b0
+opentelemetry-semantic-conventions==0.55b1
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-asgi
+ # opentelemetry-instrumentation-asyncpg
# opentelemetry-instrumentation-fastapi
# opentelemetry-instrumentation-httpx
# opentelemetry-instrumentation-redis
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
-opentelemetry-util-http==0.51b0
+opentelemetry-util-http==0.55b1
# via
# opentelemetry-instrumentation-asgi
# opentelemetry-instrumentation-fastapi
@@ -289,7 +298,7 @@ propcache==0.3.0
# via
# aiohttp
# yarl
-protobuf==5.29.3
+protobuf==5.29.5
# via
# googleapis-common-protos
# opentelemetry-proto
@@ -297,7 +306,7 @@ psutil==7.0.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
pycryptodome==3.21.0
# via stream-zip
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -325,11 +334,12 @@ pydantic==2.10.6
# -r requirements/_base.in
# fast-depends
# fastapi
+ # fastapi-cloud-cli
# pydantic-extra-types
# pydantic-settings
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via pydantic
-pydantic-extra-types==2.10.2
+pydantic-extra-types==2.10.5
# via
# -r requirements/../../../packages/common-library/requirements/_base.in
# -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in
@@ -416,27 +426,35 @@ referencing==0.35.1
# -c requirements/../../../requirements/constraints.txt
# jsonschema
# jsonschema-specifications
-requests==2.32.3
+requests==2.32.4
# via opentelemetry-exporter-otlp-proto-http
-rich==13.9.4
+rich==14.1.0
# via
# -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../packages/settings-library/requirements/_base.in
# rich-toolkit
# typer
-rich-toolkit==0.14.7
- # via fastapi-cli
+rich-toolkit==0.15.0
+ # via
+ # fastapi-cli
+ # fastapi-cloud-cli
+rignore==0.6.4
+ # via fastapi-cloud-cli
rpds-py==0.23.1
# via
# jsonschema
# referencing
+sentry-sdk==2.35.0
+ # via fastapi-cloud-cli
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
- # via anyio
-starlette==0.46.0
+ # via
+ # anyio
+ # asgi-lifespan
+starlette==0.47.2
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -459,26 +477,35 @@ toolz==1.0.0
# via -r requirements/../../../packages/service-library/requirements/_base.in
tqdm==4.67.1
# via -r requirements/../../../packages/service-library/requirements/_base.in
-typer==0.15.2
+typer==0.16.1
# via
# -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in
# -r requirements/../../../packages/settings-library/requirements/_base.in
# fastapi-cli
+ # fastapi-cloud-cli
types-python-dateutil==2.9.0.20241206
# via arrow
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# aiodebug
# anyio
# fastapi
# faststream
+ # opentelemetry-api
+ # opentelemetry-exporter-otlp-proto-grpc
+ # opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
+ # opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pydantic-extra-types
# rich-toolkit
+ # starlette
# typer
-urllib3==2.3.0
+ # typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+urllib3==2.5.0
# via
# -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt
@@ -493,10 +520,12 @@ urllib3==2.3.0
# -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt
# -c requirements/../../../requirements/constraints.txt
# requests
+ # sentry-sdk
uvicorn==0.34.2
# via
# fastapi
# fastapi-cli
+ # fastapi-cloud-cli
uvloop==0.21.0
# via uvicorn
watchfiles==1.0.5
@@ -505,7 +534,6 @@ websockets==15.0.1
# via uvicorn
wrapt==1.17.2
# via
- # deprecated
# opentelemetry-instrumentation
# opentelemetry-instrumentation-aio-pika
# opentelemetry-instrumentation-httpx
diff --git a/services/agent/requirements/_test.txt b/services/agent/requirements/_test.txt
index a0c0ea3b114c..063dda189f27 100644
--- a/services/agent/requirements/_test.txt
+++ b/services/agent/requirements/_test.txt
@@ -10,7 +10,7 @@ aiohappyeyeballs==2.5.0
# via
# -c requirements/_base.txt
# aiohttp
-aiohttp==3.11.18
+aiohttp==3.12.12
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
@@ -32,7 +32,9 @@ anyio==4.8.0
# -c requirements/_base.txt
# httpx
asgi-lifespan==2.1.0
- # via -r requirements/_test.in
+ # via
+ # -c requirements/_base.txt
+ # -r requirements/_test.in
attrs==25.1.0
# via
# -c requirements/_base.txt
@@ -72,7 +74,7 @@ charset-normalizer==3.4.1
# via
# -c requirements/_base.txt
# requests
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# flask
@@ -93,7 +95,7 @@ flask==3.1.0
# via
# flask-cors
# moto
-flask-cors==5.0.1
+flask-cors==6.0.1
# via moto
frozenlist==1.5.0
# via
@@ -102,11 +104,11 @@ frozenlist==1.5.0
# aiosignal
graphql-core==3.2.6
# via moto
-h11==0.14.0
+h11==0.16.0
# via
# -c requirements/_base.txt
# httpcore
-httpcore==1.0.7
+httpcore==1.0.9
# via
# -c requirements/_base.txt
# httpx
@@ -188,7 +190,9 @@ packaging==24.2
pathable==0.4.4
# via jsonschema-path
pluggy==1.5.0
- # via pytest
+ # via
+ # pytest
+ # pytest-cov
ply==3.11
# via jsonpath-ng
propcache==0.3.0
@@ -200,28 +204,32 @@ py-partiql-parser==0.6.1
# via moto
pycparser==2.22
# via cffi
-pydantic==2.10.6
+pydantic==2.11.7
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
# aws-sam-translator
-pydantic-core==2.27.2
+pydantic-core==2.33.2
# via
# -c requirements/_base.txt
# pydantic
+pygments==2.19.1
+ # via
+ # -c requirements/_base.txt
+ # pytest
pyparsing==3.2.1
# via moto
-pytest==8.3.5
+pytest==8.4.1
# via
# -r requirements/_test.in
# pytest-asyncio
# pytest-cov
# pytest-mock
-pytest-asyncio==0.26.0
+pytest-asyncio==1.0.0
# via -r requirements/_test.in
-pytest-cov==6.0.0
+pytest-cov==6.2.1
# via -r requirements/_test.in
-pytest-mock==3.14.0
+pytest-mock==3.14.1
# via -r requirements/_test.in
pytest-runner==6.0.1
# via -r requirements/_test.in
@@ -252,7 +260,7 @@ referencing==0.35.1
# jsonschema-specifications
regex==2024.11.6
# via cfn-lint
-requests==2.32.3
+requests==2.32.4
# via
# -c requirements/_base.txt
# docker
@@ -270,7 +278,7 @@ rpds-py==0.23.1
# referencing
s3transfer==0.11.3
# via boto3
-setuptools==75.8.2
+setuptools==80.9.0
# via moto
six==1.17.0
# via
@@ -284,7 +292,7 @@ sniffio==1.3.1
# asgi-lifespan
sympy==1.13.3
# via cfn-lint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# anyio
@@ -292,9 +300,14 @@ typing-extensions==4.12.2
# cfn-lint
# pydantic
# pydantic-core
+ # typing-inspection
+typing-inspection==0.4.1
+ # via
+ # -c requirements/_base.txt
+ # pydantic
tzdata==2025.1
# via faker
-urllib3==2.3.0
+urllib3==2.5.0
# via
# -c requirements/../../../requirements/constraints.txt
# -c requirements/_base.txt
diff --git a/services/agent/requirements/_tools.txt b/services/agent/requirements/_tools.txt
index 70694d84d7ba..3a03174fcf52 100644
--- a/services/agent/requirements/_tools.txt
+++ b/services/agent/requirements/_tools.txt
@@ -8,7 +8,7 @@ bump2version==1.0.1
# via -r requirements/../../../requirements/devenv.txt
cfgv==3.4.0
# via pre-commit
-click==8.1.8
+click==8.2.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
@@ -28,9 +28,9 @@ isort==6.0.1
# pylint
mccabe==0.7.0
# via pylint
-mypy==1.15.0
+mypy==1.16.1
# via -r requirements/../../../requirements/devenv.txt
-mypy-extensions==1.0.0
+mypy-extensions==1.1.0
# via
# black
# mypy
@@ -43,7 +43,9 @@ packaging==24.2
# black
# build
pathspec==0.12.1
- # via black
+ # via
+ # black
+ # mypy
pip==25.0.1
# via pip-tools
pip-tools==7.4.1
@@ -69,13 +71,13 @@ pyyaml==6.0.2
# pre-commit
ruff==0.9.9
# via -r requirements/../../../requirements/devenv.txt
-setuptools==75.8.2
+setuptools==80.9.0
# via
# -c requirements/_test.txt
# pip-tools
tomlkit==0.13.2
# via pylint
-typing-extensions==4.12.2
+typing-extensions==4.14.1
# via
# -c requirements/_base.txt
# -c requirements/_test.txt
diff --git a/services/agent/src/simcore_service_agent/core/application.py b/services/agent/src/simcore_service_agent/core/application.py
index 442c4649c626..e7972c1042a1 100644
--- a/services/agent/src/simcore_service_agent/core/application.py
+++ b/services/agent/src/simcore_service_agent/core/application.py
@@ -1,5 +1,6 @@
import logging
+from common_library.json_serialization import json_dumps
from fastapi import FastAPI
from servicelib.fastapi.openapi import (
get_common_oas_options,
@@ -9,7 +10,6 @@
initialize_fastapi_app_tracing,
setup_tracing,
)
-from servicelib.logging_utils import config_all_loggers
from .._meta import (
API_VTAG,
@@ -27,24 +27,16 @@
from ..services.volumes_manager import setup_volume_manager
from .settings import ApplicationSettings
-logger = logging.getLogger(__name__)
+_logger = logging.getLogger(__name__)
-def _setup_logger(settings: ApplicationSettings):
- # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148
- logging.basicConfig(level=settings.LOG_LEVEL.value) # NOSONAR
- logging.root.setLevel(settings.LOG_LEVEL.value)
- config_all_loggers(
- log_format_local_dev_enabled=settings.AGENT_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED,
- logger_filter_mapping=settings.AGENT_VOLUMES_LOG_FILTER_MAPPING,
- tracing_settings=settings.AGENT_TRACING,
- )
-
-
-def create_app() -> FastAPI:
- settings = ApplicationSettings.create_from_envs()
- _setup_logger(settings)
- logger.debug(settings.model_dump_json(indent=2))
+def create_app(settings: ApplicationSettings | None = None) -> FastAPI:
+ if settings is None:
+ settings = ApplicationSettings.create_from_envs()
+ _logger.info(
+ "Application settings: %s",
+ json_dumps(settings, indent=2, sort_keys=True),
+ )
assert settings.SC_BOOT_MODE # nosec
app = FastAPI(
diff --git a/services/agent/src/simcore_service_agent/core/settings.py b/services/agent/src/simcore_service_agent/core/settings.py
index d11b286f065f..edec12db2a57 100644
--- a/services/agent/src/simcore_service_agent/core/settings.py
+++ b/services/agent/src/simcore_service_agent/core/settings.py
@@ -2,10 +2,10 @@
from typing import Annotated
from common_library.basic_types import DEFAULT_FACTORY
+from common_library.logging.logging_utils_filtering import LoggerName, MessageSubstring
from models_library.basic_types import BootModeEnum, LogLevel
from models_library.docker import DockerNodeID
from pydantic import AliasChoices, AnyHttpUrl, Field, field_validator
-from servicelib.logging_utils_filtering import LoggerName, MessageSubstring
from settings_library.base import BaseCustomSettings
from settings_library.r_clone import S3Provider
from settings_library.rabbit import RabbitSettings
diff --git a/services/agent/src/simcore_service_agent/main.py b/services/agent/src/simcore_service_agent/main.py
index a16db0c3d527..1af4eb695c24 100644
--- a/services/agent/src/simcore_service_agent/main.py
+++ b/services/agent/src/simcore_service_agent/main.py
@@ -1,3 +1,36 @@
+import logging
+from typing import Final
+
+from common_library.json_serialization import json_dumps
+from fastapi import FastAPI
+from servicelib.fastapi.logging_lifespan import create_logging_shutdown_event
from simcore_service_agent.core.application import create_app
+from simcore_service_agent.core.settings import ApplicationSettings
+
+_logger = logging.getLogger(__name__)
+
+_NOISY_LOGGERS: Final[tuple[str, ...]] = (
+ "aio_pika",
+ "aiormq",
+ "httpcore",
+ "httpx",
+)
+
+
+def app_factory() -> FastAPI:
+ app_settings = ApplicationSettings.create_from_envs()
+ logging_shutdown_event = create_logging_shutdown_event(
+ log_format_local_dev_enabled=app_settings.AGENT_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED,
+ logger_filter_mapping=app_settings.AGENT_VOLUMES_LOG_FILTER_MAPPING,
+ tracing_settings=app_settings.AGENT_TRACING,
+ log_base_level=app_settings.log_level,
+ noisy_loggers=_NOISY_LOGGERS,
+ )
-the_app = create_app()
+ _logger.info(
+ "Application settings: %s",
+ json_dumps(app_settings, indent=2, sort_keys=True),
+ )
+ app = create_app(settings=app_settings)
+ app.add_event_handler("shutdown", logging_shutdown_event)
+ return app
diff --git a/services/agent/src/simcore_service_agent/services/backup.py b/services/agent/src/simcore_service_agent/services/backup.py
index a7e125af0c42..0e1a9b00bcac 100644
--- a/services/agent/src/simcore_service_agent/services/backup.py
+++ b/services/agent/src/simcore_service_agent/services/backup.py
@@ -1,18 +1,25 @@
import asyncio
+import json
import logging
+import socket
import tempfile
from asyncio.streams import StreamReader
+from datetime import timedelta
from pathlib import Path
from textwrap import dedent
from typing import Final
from uuid import uuid4
+import httpx
from fastapi import FastAPI
+from servicelib.container_utils import run_command_in_container
from settings_library.utils_r_clone import resolve_provider
from ..core.settings import ApplicationSettings
from ..models.volumes import DynamicServiceVolumeLabels, VolumeDetails
+_TIMEOUT_PERMISSION_CHANGES: Final[timedelta] = timedelta(minutes=5)
+
_logger = logging.getLogger(__name__)
@@ -107,6 +114,35 @@ def _log_expected_operation(
_logger.log(log_level, formatted_message)
+def _get_self_container_ip() -> str:
+ return socket.gethostbyname(socket.gethostname())
+
+
+async def _get_self_container() -> str:
+ ip = _get_self_container_ip()
+
+ async with httpx.AsyncClient(
+ transport=httpx.AsyncHTTPTransport(uds="/var/run/docker.sock")
+ ) as client:
+ response = await client.get("http://localhost/containers/json")
+ for entry in response.json():
+ if ip in json.dumps(entry):
+ container_id: str = entry["Id"]
+ return container_id
+
+ msg = "Could not determine self container ID"
+ raise RuntimeError(msg)
+
+
+async def _ensure_permissions_on_source_dir(source_dir: Path) -> None:
+ self_container = await _get_self_container()
+ await run_command_in_container(
+ self_container,
+ command=f"chmod -R o+rX '{source_dir}'",
+ timeout=_TIMEOUT_PERMISSION_CHANGES.total_seconds(),
+ )
+
+
async def _store_in_s3(
settings: ApplicationSettings, volume_name: str, volume_details: VolumeDetails
) -> None:
@@ -148,6 +184,8 @@ async def _store_in_s3(
volume_details.labels, s3_path, r_clone_ls_output, volume_name
)
+ await _ensure_permissions_on_source_dir(source_dir)
+
# sync files via rclone
r_clone_sync = [
"rclone",
diff --git a/services/agent/src/simcore_service_agent/services/volumes_manager.py b/services/agent/src/simcore_service_agent/services/volumes_manager.py
index 860ab86d0e21..1ef6ef1d0cbd 100644
--- a/services/agent/src/simcore_service_agent/services/volumes_manager.py
+++ b/services/agent/src/simcore_service_agent/services/volumes_manager.py
@@ -6,10 +6,10 @@
import arrow
from aiodocker.docker import Docker
+from common_library.async_tools import cancel_wait_task
from fastapi import FastAPI
from models_library.projects_nodes_io import NodeID
from pydantic import NonNegativeFloat
-from servicelib.async_utils import cancel_wait_task
from servicelib.background_task import create_periodic_task
from servicelib.fastapi.app_state import SingletonInAppStateMixin
from servicelib.logging_utils import log_context
diff --git a/services/agent/tests/conftest.py b/services/agent/tests/conftest.py
index 97df58d4e5a7..8213e84ad47d 100644
--- a/services/agent/tests/conftest.py
+++ b/services/agent/tests/conftest.py
@@ -12,9 +12,11 @@
from settings_library.r_clone import S3Provider
pytest_plugins = [
+ "pytest_simcore.asyncio_event_loops",
"pytest_simcore.aws_server",
"pytest_simcore.docker_compose",
"pytest_simcore.docker_swarm",
+ "pytest_simcore.logging",
"pytest_simcore.rabbit_service",
"pytest_simcore.repository_paths",
]
diff --git a/services/agent/tests/unit/test_core_settings.py b/services/agent/tests/unit/test_core_settings.py
new file mode 100644
index 000000000000..7e3fb5b5d2d4
--- /dev/null
+++ b/services/agent/tests/unit/test_core_settings.py
@@ -0,0 +1,18 @@
+# pylint: disable=unused-variable
+# pylint: disable=unused-argument
+# pylint: disable=redefined-outer-name
+
+
+from pytest_simcore.helpers.monkeypatch_envs import (
+ EnvVarsDict,
+)
+from simcore_service_agent.core.settings import ApplicationSettings
+
+
+def test_valid_application_settings(mock_environment: EnvVarsDict):
+ assert mock_environment
+
+ settings = ApplicationSettings() # type: ignore
+ assert settings
+
+ assert settings == ApplicationSettings.create_from_envs()
diff --git a/services/agent/tests/unit/test_services_backup.py b/services/agent/tests/unit/test_services_backup.py
index d544a25dfa5e..2d73dd80fb17 100644
--- a/services/agent/tests/unit/test_services_backup.py
+++ b/services/agent/tests/unit/test_services_backup.py
@@ -1,18 +1,22 @@
# pylint: disable=redefined-outer-name
+# pylint: disable=unused-argument
import asyncio
-from collections.abc import Awaitable, Callable
+from collections.abc import AsyncIterable, Awaitable, Callable
from pathlib import Path
from typing import Final
from uuid import uuid4
import aioboto3
+import aiodocker
import pytest
from fastapi import FastAPI
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.services_types import ServiceRunID
from pydantic import NonNegativeInt
+from pytest_mock import MockerFixture
+from servicelib.container_utils import run_command_in_container
from simcore_service_agent.core.settings import ApplicationSettings
from simcore_service_agent.services.backup import backup_volume
from simcore_service_agent.services.docker_utils import get_volume_details
@@ -37,6 +41,28 @@ def volume_content(tmpdir: Path) -> Path:
return path
+@pytest.fixture
+async def mock_container_with_data(
+ volume_content: Path, monkeypatch: pytest.MonkeyPatch
+) -> AsyncIterable[str]:
+ async with aiodocker.Docker() as client:
+ container = await client.containers.run(
+ config={
+ "Image": "alpine:latest",
+ "Cmd": ["/bin/ash", "-c", "sleep 10000"],
+ "HostConfig": {"Binds": [f"{volume_content}:{volume_content}:rw"]},
+ }
+ )
+ container_inspect = await container.show()
+
+ container_name = container_inspect["Name"][1:]
+ monkeypatch.setenv("HOSTNAME", container_name)
+
+ yield container_inspect["Id"]
+
+ await container.delete(force=True)
+
+
@pytest.fixture
def downlaoded_from_s3(tmpdir: Path) -> Path:
path = Path(tmpdir) / "downloaded_from_s3"
@@ -44,7 +70,24 @@ def downlaoded_from_s3(tmpdir: Path) -> Path:
return path
+@pytest.fixture
+async def mock__get_self_container_ip(
+ mock_container_with_data: str,
+ mocker: MockerFixture,
+) -> None:
+ container_ip = await run_command_in_container(
+ mock_container_with_data, command="hostname -i"
+ )
+
+ mocker.patch(
+ "simcore_service_agent.services.backup._get_self_container_ip",
+ return_value=container_ip.strip(),
+ )
+
+
async def test_backup_volume(
+ mock_container_with_data: str,
+ mock__get_self_container_ip: None,
volume_content: Path,
project_id: ProjectID,
swarm_stack_name: str,
diff --git a/services/api-server/.env-devel b/services/api-server/.env-devel
index 29d4830d47fb..a18401e3a5bd 100644
--- a/services/api-server/.env-devel
+++ b/services/api-server/.env-devel
@@ -28,9 +28,12 @@ POSTGRES_PASSWORD=test
POSTGRES_DB=test
POSTGRES_HOST=127.0.0.1
-# Enables debug
-SC_BOOT_MODE=debug
-
+# rabbit
+RABBIT_HOST=rabbit
+RABBIT_PASSWORD=adminadmin
+RABBIT_PORT=5672
+RABBIT_SECURE=false
+RABBIT_USER=admin
# webserver
WEBSERVER_HOST=webserver
diff --git a/services/api-server/Dockerfile b/services/api-server/Dockerfile
index 99ef5272ec9b..9f9b0da600af 100644
--- a/services/api-server/Dockerfile
+++ b/services/api-server/Dockerfile
@@ -2,7 +2,7 @@
# Define arguments in the global scope
ARG PYTHON_VERSION="3.11.9"
-ARG UV_VERSION="0.6"
+ARG UV_VERSION="0.7"
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build
FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64
@@ -30,6 +30,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=private \
set -eux && \
apt-get update && \
apt-get install -y --no-install-recommends \
+ fd-find \
gosu \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/* \
@@ -89,10 +90,7 @@ RUN uv venv "${VIRTUAL_ENV}"
-RUN --mount=type=cache,target=/root/.cache/uv \
- uv pip install --upgrade \
- wheel \
- setuptools
+
WORKDIR /build
@@ -109,6 +107,9 @@ WORKDIR /build
FROM build AS prod-only-deps
ENV SC_BUILD_TARGET=prod-only-deps
+# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
+ENV UV_COMPILE_BYTECODE=1 \
+ UV_LINK_MODE=copy
WORKDIR /build/services/api-server
@@ -135,8 +136,6 @@ ENV SC_BUILD_TARGET=production \
SC_BOOT_MODE=production
ENV PYTHONOPTIMIZE=TRUE
-# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
-ENV UV_COMPILE_BYTECODE=1
WORKDIR /home/scu
diff --git a/services/api-server/Makefile b/services/api-server/Makefile
index e923de11db89..555c88f6ec37 100644
--- a/services/api-server/Makefile
+++ b/services/api-server/Makefile
@@ -28,9 +28,11 @@ reqs: ## compiles pip requirements (.in -> .txt)
define _create_and_validate_openapi
# generating openapi specs file under $< (NOTE: Skips DEV FEATURES since this OAS is the 'offically released'!)
- @source .env; \
+ set -o allexport; \
+ source .env; \
+ set +o allexport; \
export API_SERVER_DEV_FEATURES_ENABLED=$1; \
- python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@
+ python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(app_factory().openapi(), indent=2) )" > $@
# validates OAS file: $@
docker run --rm \
diff --git a/services/api-server/VERSION b/services/api-server/VERSION
index ac39a106c485..54d1a4f2a4a7 100644
--- a/services/api-server/VERSION
+++ b/services/api-server/VERSION
@@ -1 +1 @@
-0.9.0
+0.13.0
diff --git a/services/api-server/docker/boot.sh b/services/api-server/docker/boot.sh
index ea12e3446c95..227be9c56b96 100755
--- a/services/api-server/docker/boot.sh
+++ b/services/api-server/docker/boot.sh
@@ -19,7 +19,7 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then
command -v python | sed 's/^/ /'
cd services/api-server
- uv pip --quiet sync requirements/dev.txt
+ uv pip --quiet sync --link-mode=copy requirements/dev.txt
cd -
echo "$INFO" "PIP :"
uv pip list
@@ -28,7 +28,7 @@ fi
if [ "${SC_BOOT_MODE}" = "debug" ]; then
# NOTE: production does NOT pre-installs debugpy
if command -v uv >/dev/null 2>&1; then
- uv pip install debugpy
+ uv pip install --link-mode=copy debugpy
else
pip install debugpy
fi
@@ -39,20 +39,51 @@ APP_LOG_LEVEL=${API_SERVER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}}
SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]')
echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL"
-if [ "${SC_BOOT_MODE}" = "debug" ]; then
- reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;)
+if [ "${API_SERVER_WORKER_MODE}" = "true" ]; then
+ if [ "${SC_BOOT_MODE}" = "debug" ]; then
+ exec watchmedo auto-restart \
+ --directory /devel/packages \
+ --directory services/api-server \
+ --pattern "*.py" \
+ --recursive \
+ -- \
+ celery \
+ --app=boot_celery_worker:app \
+ --workdir=services/api-server/docker \
+ worker --pool=threads \
+ --loglevel="${API_SERVER_LOGLEVEL}" \
+ --concurrency="${CELERY_CONCURRENCY}" \
+ --hostname="${API_SERVER_WORKER_NAME}" \
+ --queues="${CELERY_QUEUES:-default}"
+ else
+ exec celery \
+ --app=boot_celery_worker:app \
+ --workdir=services/api-server/docker \
+ worker --pool=threads \
+ --loglevel="${API_SERVER_LOGLEVEL}" \
+ --concurrency="${CELERY_CONCURRENCY}" \
+ --hostname="${API_SERVER_WORKER_NAME}" \
+ --queues="${CELERY_QUEUES:-default}"
+ fi
+else
+ if [ "${SC_BOOT_MODE}" = "debug" ]; then
+ reload_dir_packages=$(fdfind src /devel/packages --exec echo '--reload-dir {} ' | tr '\n' ' ')
- exec sh -c "
- cd services/api-server/src/simcore_service_api_server && \
- python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${API_SERVER_REMOTE_DEBUG_PORT} -m uvicorn main:the_app \
+ exec sh -c "
+ cd services/api-server/src/simcore_service_api_server && \
+ python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${API_SERVER_REMOTE_DEBUG_PORT} -m \
+ uvicorn \
+ --factory main:app_factory \
+ --host 0.0.0.0 \
+ --reload \
+ $reload_dir_packages \
+ --reload-dir . \
+ --log-level \"${SERVER_LOG_LEVEL}\"
+ "
+ else
+ exec uvicorn \
+ --factory simcore_service_api_server.main:app_factory \
--host 0.0.0.0 \
- --reload \
- $reload_dir_packages
- --reload-dir . \
- --log-level \"${SERVER_LOG_LEVEL}\"
- "
-else
- exec uvicorn simcore_service_api_server.main:the_app \
- --host 0.0.0.0 \
- --log-level "${SERVER_LOG_LEVEL}"
+ --log-level "${SERVER_LOG_LEVEL}"
+ fi
fi
diff --git a/services/api-server/docker/boot_celery_worker.py b/services/api-server/docker/boot_celery_worker.py
new file mode 100644
index 000000000000..e0c7e119ced8
--- /dev/null
+++ b/services/api-server/docker/boot_celery_worker.py
@@ -0,0 +1,13 @@
+from celery.signals import worker_init, worker_shutdown # type: ignore[import-untyped]
+from celery_library.signals import (
+ on_worker_shutdown,
+)
+from simcore_service_api_server.celery_worker.worker_main import (
+ get_app,
+ worker_init_wrapper,
+)
+
+app = get_app()
+
+worker_init.connect(worker_init_wrapper)
+worker_shutdown.connect(on_worker_shutdown)
diff --git a/services/api-server/docker/entrypoint.sh b/services/api-server/docker/entrypoint.sh
index b579236b5623..0124a12961b3 100755
--- a/services/api-server/docker/entrypoint.sh
+++ b/services/api-server/docker/entrypoint.sh
@@ -20,6 +20,7 @@ echo "$INFO" "Workdir : $(pwd)"
echo "$INFO" "User : $(id scu)"
echo "$INFO" "python : $(command -v python)"
echo "$INFO" "pip : $(command -v pip)"
+echo "$INFO" "UV : $(command -v uv)"
USERNAME=scu
GROUPNAME=scu
@@ -57,10 +58,9 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then
usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME"
echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \;
- # change user property of files already around
+ fdfind --owner ":$SC_USER_ID" --exclude proc --exec-batch chgrp --no-dereference "$CONT_GROUPNAME" . '/'
echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
- find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \;
+ fdfind --owner "$SC_USER_ID:" --exclude proc --exec-batch chown --no-dereference "$SC_USER_NAME" . '/'
fi
fi
diff --git a/services/api-server/docker/healthcheck.py b/services/api-server/docker/healthcheck.py
index 808782f32617..66ba806d0dbb 100755
--- a/services/api-server/docker/healthcheck.py
+++ b/services/api-server/docker/healthcheck.py
@@ -18,18 +18,49 @@
"""
import os
+import subprocess
import sys
from urllib.request import urlopen
+from simcore_service_api_server.core.settings import ApplicationSettings
+
SUCCESS, UNHEALTHY = 0, 1
# Disabled if boots with debugger
ok = os.environ.get("SC_BOOT_MODE", "").lower() == "debug"
+app_settings = ApplicationSettings.create_from_envs()
+
+
+def _is_celery_worker_healthy():
+ assert app_settings.API_SERVER_CELERY
+ broker_url = app_settings.API_SERVER_CELERY.CELERY_RABBIT_BROKER.dsn
+
+ try:
+ result = subprocess.run(
+ [
+ "celery",
+ "--broker",
+ broker_url,
+ "inspect",
+ "ping",
+ "--destination",
+ "celery@" + os.getenv("API_SERVER_WORKER_NAME", "worker"),
+ ],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ return "pong" in result.stdout
+ except subprocess.CalledProcessError:
+ return False
+
+
# Queries host
# pylint: disable=consider-using-with
ok = (
ok
+ or (app_settings.API_SERVER_WORKER_MODE and _is_celery_worker_healthy())
or urlopen(
"{host}{baseurl}".format(
host=sys.argv[1], baseurl=os.environ.get("SIMCORE_NODE_BASEPATH", "")
diff --git a/services/api-server/docs/api-server.drawio.svg b/services/api-server/docs/api-server.drawio.svg
index 98f7dcfdfc6e..8024640c9508 100644
--- a/services/api-server/docs/api-server.drawio.svg
+++ b/services/api-server/docs/api-server.drawio.svg
@@ -1,19 +1,43 @@
-